1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1409 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1411 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1412 kAudioObjectPropertyScopeGlobal,
\r
1413 kAudioObjectPropertyElementMaster };
\r
1415 property.mSelector = kAudioDeviceProcessorOverload;
\r
1416 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1417 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1418 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1419 error( RtAudioError::WARNING );
\r
1422 if ( stream_.state == STREAM_RUNNING )
\r
1423 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1424 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1425 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1427 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1428 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1432 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1434 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1435 kAudioObjectPropertyScopeGlobal,
\r
1436 kAudioObjectPropertyElementMaster };
\r
1438 property.mSelector = kAudioDeviceProcessorOverload;
\r
1439 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1440 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1441 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1442 error( RtAudioError::WARNING );
\r
1445 if ( stream_.state == STREAM_RUNNING )
\r
1446 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1447 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1448 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1450 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1451 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1455 for ( int i=0; i<2; i++ ) {
\r
1456 if ( stream_.userBuffer[i] ) {
\r
1457 free( stream_.userBuffer[i] );
\r
1458 stream_.userBuffer[i] = 0;
\r
1462 if ( stream_.deviceBuffer ) {
\r
1463 free( stream_.deviceBuffer );
\r
1464 stream_.deviceBuffer = 0;
\r
1467 // Destroy pthread condition variable.
\r
1468 pthread_cond_destroy( &handle->condition );
\r
1470 stream_.apiHandle = 0;
\r
1472 stream_.mode = UNINITIALIZED;
\r
1473 stream_.state = STREAM_CLOSED;
\r
1476 void RtApiCore :: startStream( void )
\r
1479 if ( stream_.state == STREAM_RUNNING ) {
\r
1480 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1481 error( RtAudioError::WARNING );
\r
1485 OSStatus result = noErr;
\r
1486 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1489 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1490 if ( result != noErr ) {
\r
1491 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1492 errorText_ = errorStream_.str();
\r
1497 if ( stream_.mode == INPUT ||
\r
1498 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1500 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1501 if ( result != noErr ) {
\r
1502 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1503 errorText_ = errorStream_.str();
\r
1508 handle->drainCounter = 0;
\r
1509 handle->internalDrain = false;
\r
1510 stream_.state = STREAM_RUNNING;
\r
1513 if ( result == noErr ) return;
\r
1514 error( RtAudioError::SYSTEM_ERROR );
\r
1517 void RtApiCore :: stopStream( void )
\r
1520 if ( stream_.state == STREAM_STOPPED ) {
\r
1521 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1522 error( RtAudioError::WARNING );
\r
1526 OSStatus result = noErr;
\r
1527 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1528 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1530 if ( handle->drainCounter == 0 ) {
\r
1531 handle->drainCounter = 2;
\r
1532 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1535 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1536 if ( result != noErr ) {
\r
1537 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1538 errorText_ = errorStream_.str();
\r
1543 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1545 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1546 if ( result != noErr ) {
\r
1547 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1548 errorText_ = errorStream_.str();
\r
1553 stream_.state = STREAM_STOPPED;
\r
1556 if ( result == noErr ) return;
\r
1557 error( RtAudioError::SYSTEM_ERROR );
\r
1560 void RtApiCore :: abortStream( void )
\r
1563 if ( stream_.state == STREAM_STOPPED ) {
\r
1564 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1565 error( RtAudioError::WARNING );
\r
1569 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1570 handle->drainCounter = 2;
\r
1575 // This function will be called by a spawned thread when the user
\r
1576 // callback function signals that the stream should be stopped or
\r
1577 // aborted. It is better to handle it this way because the
\r
1578 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1579 // function is called.
\r
1580 static void *coreStopStream( void *ptr )
\r
1582 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1583 RtApiCore *object = (RtApiCore *) info->object;
\r
1585 object->stopStream();
\r
1586 pthread_exit( NULL );
\r
1589 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1590 const AudioBufferList *inBufferList,
\r
1591 const AudioBufferList *outBufferList )
\r
1593 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1594 if ( stream_.state == STREAM_CLOSED ) {
\r
1595 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1596 error( RtAudioError::WARNING );
\r
1600 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1601 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1603 // Check if we were draining the stream and signal is finished.
\r
1604 if ( handle->drainCounter > 3 ) {
\r
1605 ThreadHandle threadId;
\r
1607 stream_.state = STREAM_STOPPING;
\r
1608 if ( handle->internalDrain == true )
\r
1609 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1610 else // external call to stopStream()
\r
1611 pthread_cond_signal( &handle->condition );
\r
1615 AudioDeviceID outputDevice = handle->id[0];
\r
1617 // Invoke user callback to get fresh output data UNLESS we are
\r
1618 // draining stream or duplex mode AND the input/output devices are
\r
1619 // different AND this function is called for the input device.
\r
1620 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1621 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1622 double streamTime = getStreamTime();
\r
1623 RtAudioStreamStatus status = 0;
\r
1624 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1625 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1626 handle->xrun[0] = false;
\r
1628 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1629 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1630 handle->xrun[1] = false;
\r
1633 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1634 stream_.bufferSize, streamTime, status, info->userData );
\r
1635 if ( cbReturnValue == 2 ) {
\r
1636 stream_.state = STREAM_STOPPING;
\r
1637 handle->drainCounter = 2;
\r
1641 else if ( cbReturnValue == 1 ) {
\r
1642 handle->drainCounter = 1;
\r
1643 handle->internalDrain = true;
\r
1647 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1649 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1651 if ( handle->nStreams[0] == 1 ) {
\r
1652 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1654 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1656 else { // fill multiple streams with zeros
\r
1657 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1658 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1660 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1664 else if ( handle->nStreams[0] == 1 ) {
\r
1665 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1666 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1667 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1669 else { // copy from user buffer
\r
1670 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1671 stream_.userBuffer[0],
\r
1672 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1675 else { // fill multiple streams
\r
1676 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1677 if ( stream_.doConvertBuffer[0] ) {
\r
1678 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1679 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1682 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1683 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1684 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1685 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1686 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1689 else { // fill multiple multi-channel streams with interleaved data
\r
1690 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1691 Float32 *out, *in;
\r
1693 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1694 UInt32 inChannels = stream_.nUserChannels[0];
\r
1695 if ( stream_.doConvertBuffer[0] ) {
\r
1696 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1697 inChannels = stream_.nDeviceChannels[0];
\r
1700 if ( inInterleaved ) inOffset = 1;
\r
1701 else inOffset = stream_.bufferSize;
\r
1703 channelsLeft = inChannels;
\r
1704 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1706 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1707 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1710 // Account for possible channel offset in first stream
\r
1711 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1712 streamChannels -= stream_.channelOffset[0];
\r
1713 outJump = stream_.channelOffset[0];
\r
1717 // Account for possible unfilled channels at end of the last stream
\r
1718 if ( streamChannels > channelsLeft ) {
\r
1719 outJump = streamChannels - channelsLeft;
\r
1720 streamChannels = channelsLeft;
\r
1723 // Determine input buffer offsets and skips
\r
1724 if ( inInterleaved ) {
\r
1725 inJump = inChannels;
\r
1726 in += inChannels - channelsLeft;
\r
1730 in += (inChannels - channelsLeft) * inOffset;
\r
1733 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1734 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1735 *out++ = in[j*inOffset];
\r
1740 channelsLeft -= streamChannels;
\r
1746 // Don't bother draining input
\r
1747 if ( handle->drainCounter ) {
\r
1748 handle->drainCounter++;
\r
1752 AudioDeviceID inputDevice;
\r
1753 inputDevice = handle->id[1];
\r
1754 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1756 if ( handle->nStreams[1] == 1 ) {
\r
1757 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1758 convertBuffer( stream_.userBuffer[1],
\r
1759 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1760 stream_.convertInfo[1] );
\r
1762 else { // copy to user buffer
\r
1763 memcpy( stream_.userBuffer[1],
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1765 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1768 else { // read from multiple streams
\r
1769 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1770 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1772 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1773 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1774 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1775 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1776 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1779 else { // read from multiple multi-channel streams
\r
1780 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1781 Float32 *out, *in;
\r
1783 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1784 UInt32 outChannels = stream_.nUserChannels[1];
\r
1785 if ( stream_.doConvertBuffer[1] ) {
\r
1786 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1787 outChannels = stream_.nDeviceChannels[1];
\r
1790 if ( outInterleaved ) outOffset = 1;
\r
1791 else outOffset = stream_.bufferSize;
\r
1793 channelsLeft = outChannels;
\r
1794 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1796 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1797 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1800 // Account for possible channel offset in first stream
\r
1801 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1802 streamChannels -= stream_.channelOffset[1];
\r
1803 inJump = stream_.channelOffset[1];
\r
1807 // Account for possible unread channels at end of the last stream
\r
1808 if ( streamChannels > channelsLeft ) {
\r
1809 inJump = streamChannels - channelsLeft;
\r
1810 streamChannels = channelsLeft;
\r
1813 // Determine output buffer offsets and skips
\r
1814 if ( outInterleaved ) {
\r
1815 outJump = outChannels;
\r
1816 out += outChannels - channelsLeft;
\r
1820 out += (outChannels - channelsLeft) * outOffset;
\r
1823 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1824 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1825 out[j*outOffset] = *in++;
\r
1830 channelsLeft -= streamChannels;
\r
1834 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1835 convertBuffer( stream_.userBuffer[1],
\r
1836 stream_.deviceBuffer,
\r
1837 stream_.convertInfo[1] );
\r
1843 //MUTEX_UNLOCK( &stream_.mutex );
\r
1845 RtApi::tickStreamTime();
\r
1849 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1853 case kAudioHardwareNotRunningError:
\r
1854 return "kAudioHardwareNotRunningError";
\r
1856 case kAudioHardwareUnspecifiedError:
\r
1857 return "kAudioHardwareUnspecifiedError";
\r
1859 case kAudioHardwareUnknownPropertyError:
\r
1860 return "kAudioHardwareUnknownPropertyError";
\r
1862 case kAudioHardwareBadPropertySizeError:
\r
1863 return "kAudioHardwareBadPropertySizeError";
\r
1865 case kAudioHardwareIllegalOperationError:
\r
1866 return "kAudioHardwareIllegalOperationError";
\r
1868 case kAudioHardwareBadObjectError:
\r
1869 return "kAudioHardwareBadObjectError";
\r
1871 case kAudioHardwareBadDeviceError:
\r
1872 return "kAudioHardwareBadDeviceError";
\r
1874 case kAudioHardwareBadStreamError:
\r
1875 return "kAudioHardwareBadStreamError";
\r
1877 case kAudioHardwareUnsupportedOperationError:
\r
1878 return "kAudioHardwareUnsupportedOperationError";
\r
1880 case kAudioDeviceUnsupportedFormatError:
\r
1881 return "kAudioDeviceUnsupportedFormatError";
\r
1883 case kAudioDevicePermissionsError:
\r
1884 return "kAudioDevicePermissionsError";
\r
1887 return "CoreAudio unknown error";
\r
1891 //******************** End of __MACOSX_CORE__ *********************//
\r
1894 #if defined(__UNIX_JACK__)
\r
1896 // JACK is a low-latency audio server, originally written for the
\r
1897 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1898 // connect a number of different applications to an audio device, as
\r
1899 // well as allowing them to share audio between themselves.
\r
1901 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1902 // have ports connected to the server. The JACK server is typically
\r
1903 // started in a terminal as follows:
\r
1905 // .jackd -d alsa -d hw:0
\r
1907 // or through an interface program such as qjackctl. Many of the
\r
1908 // parameters normally set for a stream are fixed by the JACK server
\r
1909 // and can be specified when the JACK server is started. In
\r
1912 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1914 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1915 // frames, and number of buffers = 4. Once the server is running, it
\r
1916 // is not possible to override these values. If the values are not
\r
1917 // specified in the command-line, the JACK server uses default values.
\r
1919 // The JACK server does not have to be running when an instance of
\r
1920 // RtApiJack is created, though the function getDeviceCount() will
\r
1921 // report 0 devices found until JACK has been started. When no
\r
1922 // devices are available (i.e., the JACK server is not running), a
\r
1923 // stream cannot be opened.
\r
1925 #include <jack/jack.h>
\r
1926 #include <unistd.h>
\r
1929 // A structure to hold various information related to the Jack API
\r
1930 // implementation.
\r
1931 struct JackHandle {
\r
1932 jack_client_t *client;
\r
1933 jack_port_t **ports[2];
\r
1934 std::string deviceName[2];
\r
1936 pthread_cond_t condition;
\r
1937 int drainCounter; // Tracks callback counts when draining
\r
1938 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1941 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1944 static void jackSilentError( const char * ) {};
\r
1946 RtApiJack :: RtApiJack()
\r
1948 // Nothing to do here.
\r
1949 #if !defined(__RTAUDIO_DEBUG__)
\r
1950 // Turn off Jack's internal error reporting.
\r
1951 jack_set_error_function( &jackSilentError );
\r
1955 RtApiJack :: ~RtApiJack()
\r
1957 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1960 unsigned int RtApiJack :: getDeviceCount( void )
\r
1962 // See if we can become a jack client.
\r
1963 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1964 jack_status_t *status = NULL;
\r
1965 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1966 if ( client == 0 ) return 0;
\r
1968 const char **ports;
\r
1969 std::string port, previousPort;
\r
1970 unsigned int nChannels = 0, nDevices = 0;
\r
1971 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1973 // Parse the port names up to the first colon (:).
\r
1974 size_t iColon = 0;
\r
1976 port = (char *) ports[ nChannels ];
\r
1977 iColon = port.find(":");
\r
1978 if ( iColon != std::string::npos ) {
\r
1979 port = port.substr( 0, iColon + 1 );
\r
1980 if ( port != previousPort ) {
\r
1982 previousPort = port;
\r
1985 } while ( ports[++nChannels] );
\r
1989 jack_client_close( client );
\r
1993 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1995 RtAudio::DeviceInfo info;
\r
1996 info.probed = false;
\r
1998 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1999 jack_status_t *status = NULL;
\r
2000 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2001 if ( client == 0 ) {
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 const char **ports;
\r
2008 std::string port, previousPort;
\r
2009 unsigned int nPorts = 0, nDevices = 0;
\r
2010 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2012 // Parse the port names up to the first colon (:).
\r
2013 size_t iColon = 0;
\r
2015 port = (char *) ports[ nPorts ];
\r
2016 iColon = port.find(":");
\r
2017 if ( iColon != std::string::npos ) {
\r
2018 port = port.substr( 0, iColon );
\r
2019 if ( port != previousPort ) {
\r
2020 if ( nDevices == device ) info.name = port;
\r
2022 previousPort = port;
\r
2025 } while ( ports[++nPorts] );
\r
2029 if ( device >= nDevices ) {
\r
2030 jack_client_close( client );
\r
2031 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2032 error( RtAudioError::INVALID_USE );
\r
2036 // Get the current jack server sample rate.
\r
2037 info.sampleRates.clear();
\r
2039 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2040 info.sampleRates.push_back( info.preferredSampleRate );
\r
2042 // Count the available ports containing the client name as device
\r
2043 // channels. Jack "input ports" equal RtAudio output channels.
\r
2044 unsigned int nChannels = 0;
\r
2045 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2047 while ( ports[ nChannels ] ) nChannels++;
\r
2049 info.outputChannels = nChannels;
\r
2052 // Jack "output ports" equal RtAudio input channels.
\r
2054 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2056 while ( ports[ nChannels ] ) nChannels++;
\r
2058 info.inputChannels = nChannels;
\r
2061 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2062 jack_client_close(client);
\r
2063 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2064 error( RtAudioError::WARNING );
\r
2068 // If device opens for both playback and capture, we determine the channels.
\r
2069 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2070 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2072 // Jack always uses 32-bit floats.
\r
2073 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2075 // Jack doesn't provide default devices so we'll use the first available one.
\r
2076 if ( device == 0 && info.outputChannels > 0 )
\r
2077 info.isDefaultOutput = true;
\r
2078 if ( device == 0 && info.inputChannels > 0 )
\r
2079 info.isDefaultInput = true;
\r
2081 jack_client_close(client);
\r
2082 info.probed = true;
\r
2086 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2088 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2090 RtApiJack *object = (RtApiJack *) info->object;
\r
2091 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2096 // This function will be called by a spawned thread when the Jack
\r
2097 // server signals that it is shutting down. It is necessary to handle
\r
2098 // it this way because the jackShutdown() function must return before
\r
2099 // the jack_deactivate() function (in closeStream()) will return.
\r
2100 static void *jackCloseStream( void *ptr )
\r
2102 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2103 RtApiJack *object = (RtApiJack *) info->object;
\r
2105 object->closeStream();
\r
2107 pthread_exit( NULL );
\r
2109 static void jackShutdown( void *infoPointer )
\r
2111 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2112 RtApiJack *object = (RtApiJack *) info->object;
\r
2114 // Check current stream state. If stopped, then we'll assume this
\r
2115 // was called as a result of a call to RtApiJack::stopStream (the
\r
2116 // deactivation of a client handle causes this function to be called).
\r
2117 // If not, we'll assume the Jack server is shutting down or some
\r
2118 // other problem occurred and we should close the stream.
\r
2119 if ( object->isStreamRunning() == false ) return;
\r
2121 ThreadHandle threadId;
\r
2122 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2123 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2126 static int jackXrun( void *infoPointer )
\r
2128 JackHandle *handle = (JackHandle *) infoPointer;
\r
2130 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2131 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2136 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2137 unsigned int firstChannel, unsigned int sampleRate,
\r
2138 RtAudioFormat format, unsigned int *bufferSize,
\r
2139 RtAudio::StreamOptions *options )
\r
2141 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2143 // Look for jack server and try to become a client (only do once per stream).
\r
2144 jack_client_t *client = 0;
\r
2145 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2146 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2147 jack_status_t *status = NULL;
\r
2148 if ( options && !options->streamName.empty() )
\r
2149 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2151 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2152 if ( client == 0 ) {
\r
2153 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2154 error( RtAudioError::WARNING );
\r
2159 // The handle must have been created on an earlier pass.
\r
2160 client = handle->client;
\r
2163 const char **ports;
\r
2164 std::string port, previousPort, deviceName;
\r
2165 unsigned int nPorts = 0, nDevices = 0;
\r
2166 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2168 // Parse the port names up to the first colon (:).
\r
2169 size_t iColon = 0;
\r
2171 port = (char *) ports[ nPorts ];
\r
2172 iColon = port.find(":");
\r
2173 if ( iColon != std::string::npos ) {
\r
2174 port = port.substr( 0, iColon );
\r
2175 if ( port != previousPort ) {
\r
2176 if ( nDevices == device ) deviceName = port;
\r
2178 previousPort = port;
\r
2181 } while ( ports[++nPorts] );
\r
2185 if ( device >= nDevices ) {
\r
2186 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2190 // Count the available ports containing the client name as device
\r
2191 // channels. Jack "input ports" equal RtAudio output channels.
\r
2192 unsigned int nChannels = 0;
\r
2193 unsigned long flag = JackPortIsInput;
\r
2194 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2195 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2197 while ( ports[ nChannels ] ) nChannels++;
\r
2201 // Compare the jack ports for specified client to the requested number of channels.
\r
2202 if ( nChannels < (channels + firstChannel) ) {
\r
2203 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2204 errorText_ = errorStream_.str();
\r
2208 // Check the jack server sample rate.
\r
2209 unsigned int jackRate = jack_get_sample_rate( client );
\r
2210 if ( sampleRate != jackRate ) {
\r
2211 jack_client_close( client );
\r
2212 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2213 errorText_ = errorStream_.str();
\r
2216 stream_.sampleRate = jackRate;
\r
2218 // Get the latency of the JACK port.
\r
2219 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2220 if ( ports[ firstChannel ] ) {
\r
2221 // Added by Ge Wang
\r
2222 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2223 // the range (usually the min and max are equal)
\r
2224 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2225 // get the latency range
\r
2226 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2227 // be optimistic, use the min!
\r
2228 stream_.latency[mode] = latrange.min;
\r
2229 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2233 // The jack server always uses 32-bit floating-point data.
\r
2234 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2235 stream_.userFormat = format;
\r
2237 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2238 else stream_.userInterleaved = true;
\r
2240 // Jack always uses non-interleaved buffers.
\r
2241 stream_.deviceInterleaved[mode] = false;
\r
2243 // Jack always provides host byte-ordered data.
\r
2244 stream_.doByteSwap[mode] = false;
\r
2246 // Get the buffer size. The buffer size and number of buffers
\r
2247 // (periods) is set when the jack server is started.
\r
2248 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2249 *bufferSize = stream_.bufferSize;
\r
2251 stream_.nDeviceChannels[mode] = channels;
\r
2252 stream_.nUserChannels[mode] = channels;
\r
2254 // Set flags for buffer conversion.
\r
2255 stream_.doConvertBuffer[mode] = false;
\r
2256 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2257 stream_.doConvertBuffer[mode] = true;
\r
2258 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2259 stream_.nUserChannels[mode] > 1 )
\r
2260 stream_.doConvertBuffer[mode] = true;
\r
2262 // Allocate our JackHandle structure for the stream.
\r
2263 if ( handle == 0 ) {
\r
2265 handle = new JackHandle;
\r
2267 catch ( std::bad_alloc& ) {
\r
2268 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2272 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2273 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2276 stream_.apiHandle = (void *) handle;
\r
2277 handle->client = client;
\r
2279 handle->deviceName[mode] = deviceName;
\r
2281 // Allocate necessary internal buffers.
\r
2282 unsigned long bufferBytes;
\r
2283 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2284 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2285 if ( stream_.userBuffer[mode] == NULL ) {
\r
2286 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2290 if ( stream_.doConvertBuffer[mode] ) {
\r
2292 bool makeBuffer = true;
\r
2293 if ( mode == OUTPUT )
\r
2294 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2295 else { // mode == INPUT
\r
2296 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2297 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2298 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2299 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2303 if ( makeBuffer ) {
\r
2304 bufferBytes *= *bufferSize;
\r
2305 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2306 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2307 if ( stream_.deviceBuffer == NULL ) {
\r
2308 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2314 // Allocate memory for the Jack ports (channels) identifiers.
\r
2315 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2316 if ( handle->ports[mode] == NULL ) {
\r
2317 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2321 stream_.device[mode] = device;
\r
2322 stream_.channelOffset[mode] = firstChannel;
\r
2323 stream_.state = STREAM_STOPPED;
\r
2324 stream_.callbackInfo.object = (void *) this;
\r
2326 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2327 // We had already set up the stream for output.
\r
2328 stream_.mode = DUPLEX;
\r
2330 stream_.mode = mode;
\r
2331 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2332 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2333 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2336 // Register our ports.
\r
2338 if ( mode == OUTPUT ) {
\r
2339 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2340 snprintf( label, 64, "outport %d", i );
\r
2341 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2342 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2346 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2347 snprintf( label, 64, "inport %d", i );
\r
2348 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2353 // Setup the buffer conversion information structure. We don't use
\r
2354 // buffers to do channel offsets, so we override that parameter
\r
2356 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2362 pthread_cond_destroy( &handle->condition );
\r
2363 jack_client_close( handle->client );
\r
2365 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2366 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2369 stream_.apiHandle = 0;
\r
2372 for ( int i=0; i<2; i++ ) {
\r
2373 if ( stream_.userBuffer[i] ) {
\r
2374 free( stream_.userBuffer[i] );
\r
2375 stream_.userBuffer[i] = 0;
\r
2379 if ( stream_.deviceBuffer ) {
\r
2380 free( stream_.deviceBuffer );
\r
2381 stream_.deviceBuffer = 0;
\r
2387 void RtApiJack :: closeStream( void )
\r
2389 if ( stream_.state == STREAM_CLOSED ) {
\r
2390 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2391 error( RtAudioError::WARNING );
\r
2395 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2398 if ( stream_.state == STREAM_RUNNING )
\r
2399 jack_deactivate( handle->client );
\r
2401 jack_client_close( handle->client );
\r
2405 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2406 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2407 pthread_cond_destroy( &handle->condition );
\r
2409 stream_.apiHandle = 0;
\r
2412 for ( int i=0; i<2; i++ ) {
\r
2413 if ( stream_.userBuffer[i] ) {
\r
2414 free( stream_.userBuffer[i] );
\r
2415 stream_.userBuffer[i] = 0;
\r
2419 if ( stream_.deviceBuffer ) {
\r
2420 free( stream_.deviceBuffer );
\r
2421 stream_.deviceBuffer = 0;
\r
2424 stream_.mode = UNINITIALIZED;
\r
2425 stream_.state = STREAM_CLOSED;
\r
2428 void RtApiJack :: startStream( void )
\r
2431 if ( stream_.state == STREAM_RUNNING ) {
\r
2432 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2433 error( RtAudioError::WARNING );
\r
2437 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2438 int result = jack_activate( handle->client );
\r
2440 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2444 const char **ports;
\r
2446 // Get the list of available ports.
\r
2447 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2449 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2450 if ( ports == NULL) {
\r
2451 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2455 // Now make the port connections. Since RtAudio wasn't designed to
\r
2456 // allow the user to select particular channels of a device, we'll
\r
2457 // just open the first "nChannels" ports with offset.
\r
2458 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2460 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2461 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2464 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2471 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2473 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2474 if ( ports == NULL) {
\r
2475 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2479 // Now make the port connections. See note above.
\r
2480 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2482 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2483 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2486 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2493 handle->drainCounter = 0;
\r
2494 handle->internalDrain = false;
\r
2495 stream_.state = STREAM_RUNNING;
\r
2498 if ( result == 0 ) return;
\r
2499 error( RtAudioError::SYSTEM_ERROR );
\r
2502 void RtApiJack :: stopStream( void )
\r
2505 if ( stream_.state == STREAM_STOPPED ) {
\r
2506 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2507 error( RtAudioError::WARNING );
\r
2511 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2514 if ( handle->drainCounter == 0 ) {
\r
2515 handle->drainCounter = 2;
\r
2516 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2520 jack_deactivate( handle->client );
\r
2521 stream_.state = STREAM_STOPPED;
\r
2524 void RtApiJack :: abortStream( void )
\r
2527 if ( stream_.state == STREAM_STOPPED ) {
\r
2528 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2529 error( RtAudioError::WARNING );
\r
2533 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2534 handle->drainCounter = 2;
\r
2539 // This function will be called by a spawned thread when the user
\r
2540 // callback function signals that the stream should be stopped or
\r
2541 // aborted. It is necessary to handle it this way because the
\r
2542 // callbackEvent() function must return before the jack_deactivate()
\r
2543 // function will return.
\r
2544 static void *jackStopStream( void *ptr )
\r
2546 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2547 RtApiJack *object = (RtApiJack *) info->object;
\r
2549 object->stopStream();
\r
2550 pthread_exit( NULL );
\r
2553 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2555 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2556 if ( stream_.state == STREAM_CLOSED ) {
\r
2557 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2558 error( RtAudioError::WARNING );
\r
2561 if ( stream_.bufferSize != nframes ) {
\r
2562 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2563 error( RtAudioError::WARNING );
\r
2567 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2568 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2570 // Check if we were draining the stream and signal is finished.
\r
2571 if ( handle->drainCounter > 3 ) {
\r
2572 ThreadHandle threadId;
\r
2574 stream_.state = STREAM_STOPPING;
\r
2575 if ( handle->internalDrain == true )
\r
2576 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2578 pthread_cond_signal( &handle->condition );
\r
2582 // Invoke user callback first, to get fresh output data.
\r
2583 if ( handle->drainCounter == 0 ) {
\r
2584 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2585 double streamTime = getStreamTime();
\r
2586 RtAudioStreamStatus status = 0;
\r
2587 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2588 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2589 handle->xrun[0] = false;
\r
2591 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2592 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2593 handle->xrun[1] = false;
\r
2595 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2596 stream_.bufferSize, streamTime, status, info->userData );
\r
2597 if ( cbReturnValue == 2 ) {
\r
2598 stream_.state = STREAM_STOPPING;
\r
2599 handle->drainCounter = 2;
\r
2601 pthread_create( &id, NULL, jackStopStream, info );
\r
2604 else if ( cbReturnValue == 1 ) {
\r
2605 handle->drainCounter = 1;
\r
2606 handle->internalDrain = true;
\r
2610 jack_default_audio_sample_t *jackbuffer;
\r
2611 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2612 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2614 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2616 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2617 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2618 memset( jackbuffer, 0, bufferBytes );
\r
2622 else if ( stream_.doConvertBuffer[0] ) {
\r
2624 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2626 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2627 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2628 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2631 else { // no buffer conversion
\r
2632 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2633 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2634 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2639 // Don't bother draining input
\r
2640 if ( handle->drainCounter ) {
\r
2641 handle->drainCounter++;
\r
2645 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2647 if ( stream_.doConvertBuffer[1] ) {
\r
2648 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2649 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2650 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2652 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2654 else { // no buffer conversion
\r
2655 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2657 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2663 RtApi::tickStreamTime();
\r
2666 //******************** End of __UNIX_JACK__ *********************//
\r
2669 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2671 // The ASIO API is designed around a callback scheme, so this
\r
2672 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2673 // Jack. The primary constraint with ASIO is that it only allows
\r
2674 // access to a single driver at a time. Thus, it is not possible to
\r
2675 // have more than one simultaneous RtAudio stream.
\r
2677 // This implementation also requires a number of external ASIO files
\r
2678 // and a few global variables. The ASIO callback scheme does not
\r
2679 // allow for the passing of user data, so we must create a global
\r
2680 // pointer to our callbackInfo structure.
\r
2682 // On unix systems, we make use of a pthread condition variable.
\r
2683 // Since there is no equivalent in Windows, I hacked something based
\r
2684 // on information found in
\r
2685 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2687 #include "asiosys.h"
\r
2689 #include "iasiothiscallresolver.h"
\r
2690 #include "asiodrivers.h"
\r
2693 static AsioDrivers drivers;
\r
2694 static ASIOCallbacks asioCallbacks;
\r
2695 static ASIODriverInfo driverInfo;
\r
2696 static CallbackInfo *asioCallbackInfo;
\r
2697 static bool asioXRun;
\r
2699 struct AsioHandle {
\r
2700 int drainCounter; // Tracks callback counts when draining
\r
2701 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2702 ASIOBufferInfo *bufferInfos;
\r
2706 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2709 // Function declarations (definitions at end of section)
\r
2710 static const char* getAsioErrorString( ASIOError result );
\r
2711 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2712 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2714 RtApiAsio :: RtApiAsio()
\r
2716 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2717 // CoInitialize beforehand, but it must be for appartment threading
\r
2718 // (in which case, CoInitilialize will return S_FALSE here).
\r
2719 coInitialized_ = false;
\r
2720 HRESULT hr = CoInitialize( NULL );
\r
2721 if ( FAILED(hr) ) {
\r
2722 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2723 error( RtAudioError::WARNING );
\r
2725 coInitialized_ = true;
\r
2727 drivers.removeCurrentDriver();
\r
2728 driverInfo.asioVersion = 2;
\r
2730 // See note in DirectSound implementation about GetDesktopWindow().
\r
2731 driverInfo.sysRef = GetForegroundWindow();
\r
2734 RtApiAsio :: ~RtApiAsio()
\r
2736 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2737 if ( coInitialized_ ) CoUninitialize();
\r
2740 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2742 return (unsigned int) drivers.asioGetNumDev();
\r
2745 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2747 RtAudio::DeviceInfo info;
\r
2748 info.probed = false;
\r
2751 unsigned int nDevices = getDeviceCount();
\r
2752 if ( nDevices == 0 ) {
\r
2753 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2754 error( RtAudioError::INVALID_USE );
\r
2758 if ( device >= nDevices ) {
\r
2759 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2760 error( RtAudioError::INVALID_USE );
\r
2764 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2765 if ( stream_.state != STREAM_CLOSED ) {
\r
2766 if ( device >= devices_.size() ) {
\r
2767 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2768 error( RtAudioError::WARNING );
\r
2771 return devices_[ device ];
\r
2774 char driverName[32];
\r
2775 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2776 if ( result != ASE_OK ) {
\r
2777 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2779 error( RtAudioError::WARNING );
\r
2783 info.name = driverName;
\r
2785 if ( !drivers.loadDriver( driverName ) ) {
\r
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2787 errorText_ = errorStream_.str();
\r
2788 error( RtAudioError::WARNING );
\r
2792 result = ASIOInit( &driverInfo );
\r
2793 if ( result != ASE_OK ) {
\r
2794 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2795 errorText_ = errorStream_.str();
\r
2796 error( RtAudioError::WARNING );
\r
2800 // Determine the device channel information.
\r
2801 long inputChannels, outputChannels;
\r
2802 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2803 if ( result != ASE_OK ) {
\r
2804 drivers.removeCurrentDriver();
\r
2805 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2806 errorText_ = errorStream_.str();
\r
2807 error( RtAudioError::WARNING );
\r
2811 info.outputChannels = outputChannels;
\r
2812 info.inputChannels = inputChannels;
\r
2813 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2814 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2816 // Determine the supported sample rates.
\r
2817 info.sampleRates.clear();
\r
2818 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2819 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2820 if ( result == ASE_OK ) {
\r
2821 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2823 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2824 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2828 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2829 ASIOChannelInfo channelInfo;
\r
2830 channelInfo.channel = 0;
\r
2831 channelInfo.isInput = true;
\r
2832 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2833 result = ASIOGetChannelInfo( &channelInfo );
\r
2834 if ( result != ASE_OK ) {
\r
2835 drivers.removeCurrentDriver();
\r
2836 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2837 errorText_ = errorStream_.str();
\r
2838 error( RtAudioError::WARNING );
\r
2842 info.nativeFormats = 0;
\r
2843 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2844 info.nativeFormats |= RTAUDIO_SINT16;
\r
2845 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2846 info.nativeFormats |= RTAUDIO_SINT32;
\r
2847 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2848 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2849 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2850 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2851 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2852 info.nativeFormats |= RTAUDIO_SINT24;
\r
2854 if ( info.outputChannels > 0 )
\r
2855 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2856 if ( info.inputChannels > 0 )
\r
2857 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2859 info.probed = true;
\r
2860 drivers.removeCurrentDriver();
\r
2864 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2866 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2867 object->callbackEvent( index );
\r
2870 void RtApiAsio :: saveDeviceInfo( void )
\r
2874 unsigned int nDevices = getDeviceCount();
\r
2875 devices_.resize( nDevices );
\r
2876 for ( unsigned int i=0; i<nDevices; i++ )
\r
2877 devices_[i] = getDeviceInfo( i );
\r
2880 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2881 unsigned int firstChannel, unsigned int sampleRate,
\r
2882 RtAudioFormat format, unsigned int *bufferSize,
\r
2883 RtAudio::StreamOptions *options )
\r
2884 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2886 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2888 // For ASIO, a duplex stream MUST use the same driver.
\r
2889 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2890 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2894 char driverName[32];
\r
2895 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2896 if ( result != ASE_OK ) {
\r
2897 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2898 errorText_ = errorStream_.str();
\r
2902 // Only load the driver once for duplex stream.
\r
2903 if ( !isDuplexInput ) {
\r
2904 // The getDeviceInfo() function will not work when a stream is open
\r
2905 // because ASIO does not allow multiple devices to run at the same
\r
2906 // time. Thus, we'll probe the system before opening a stream and
\r
2907 // save the results for use by getDeviceInfo().
\r
2908 this->saveDeviceInfo();
\r
2910 if ( !drivers.loadDriver( driverName ) ) {
\r
2911 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2912 errorText_ = errorStream_.str();
\r
2916 result = ASIOInit( &driverInfo );
\r
2917 if ( result != ASE_OK ) {
\r
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2919 errorText_ = errorStream_.str();
\r
2924 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2925 bool buffersAllocated = false;
\r
2926 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2927 unsigned int nChannels;
\r
2930 // Check the device channel count.
\r
2931 long inputChannels, outputChannels;
\r
2932 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2933 if ( result != ASE_OK ) {
\r
2934 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2935 errorText_ = errorStream_.str();
\r
2939 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2940 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2942 errorText_ = errorStream_.str();
\r
2945 stream_.nDeviceChannels[mode] = channels;
\r
2946 stream_.nUserChannels[mode] = channels;
\r
2947 stream_.channelOffset[mode] = firstChannel;
\r
2949 // Verify the sample rate is supported.
\r
2950 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2951 if ( result != ASE_OK ) {
\r
2952 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2953 errorText_ = errorStream_.str();
\r
2957 // Get the current sample rate
\r
2958 ASIOSampleRate currentRate;
\r
2959 result = ASIOGetSampleRate( ¤tRate );
\r
2960 if ( result != ASE_OK ) {
\r
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2962 errorText_ = errorStream_.str();
\r
2966 // Set the sample rate only if necessary
\r
2967 if ( currentRate != sampleRate ) {
\r
2968 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2969 if ( result != ASE_OK ) {
\r
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2971 errorText_ = errorStream_.str();
\r
2976 // Determine the driver data type.
\r
2977 ASIOChannelInfo channelInfo;
\r
2978 channelInfo.channel = 0;
\r
2979 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2980 else channelInfo.isInput = true;
\r
2981 result = ASIOGetChannelInfo( &channelInfo );
\r
2982 if ( result != ASE_OK ) {
\r
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2984 errorText_ = errorStream_.str();
\r
2988 // Assuming WINDOWS host is always little-endian.
\r
2989 stream_.doByteSwap[mode] = false;
\r
2990 stream_.userFormat = format;
\r
2991 stream_.deviceFormat[mode] = 0;
\r
2992 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2993 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2994 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2996 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2997 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2998 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3000 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3001 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3002 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3004 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3005 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3006 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3008 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3009 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3010 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3013 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3015 errorText_ = errorStream_.str();
\r
3019 // Set the buffer size. For a duplex stream, this will end up
\r
3020 // setting the buffer size based on the input constraints, which
\r
3022 long minSize, maxSize, preferSize, granularity;
\r
3023 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3024 if ( result != ASE_OK ) {
\r
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3026 errorText_ = errorStream_.str();
\r
3030 if ( isDuplexInput ) {
\r
3031 // When this is the duplex input (output was opened before), then we have to use the same
\r
3032 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3033 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3034 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3035 // to the "bufferSize" param as usual to set up processing buffers.
\r
3037 *bufferSize = stream_.bufferSize;
\r
3040 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3041 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3042 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3043 else if ( granularity == -1 ) {
\r
3044 // Make sure bufferSize is a power of two.
\r
3045 int log2_of_min_size = 0;
\r
3046 int log2_of_max_size = 0;
\r
3048 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3049 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3050 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3053 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3054 int min_delta_num = log2_of_min_size;
\r
3056 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3057 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3058 if (current_delta < min_delta) {
\r
3059 min_delta = current_delta;
\r
3060 min_delta_num = i;
\r
3064 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3065 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3066 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3068 else if ( granularity != 0 ) {
\r
3069 // Set to an even multiple of granularity, rounding up.
\r
3070 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3075 // we don't use it anymore, see above!
\r
3076 // Just left it here for the case...
\r
3077 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3078 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3083 stream_.bufferSize = *bufferSize;
\r
3084 stream_.nBuffers = 2;
\r
3086 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3087 else stream_.userInterleaved = true;
\r
3089 // ASIO always uses non-interleaved buffers.
\r
3090 stream_.deviceInterleaved[mode] = false;
\r
3092 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3093 if ( handle == 0 ) {
\r
3095 handle = new AsioHandle;
\r
3097 catch ( std::bad_alloc& ) {
\r
3098 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3101 handle->bufferInfos = 0;
\r
3103 // Create a manual-reset event.
\r
3104 handle->condition = CreateEvent( NULL, // no security
\r
3105 TRUE, // manual-reset
\r
3106 FALSE, // non-signaled initially
\r
3107 NULL ); // unnamed
\r
3108 stream_.apiHandle = (void *) handle;
\r
3111 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3112 // and output separately, we'll have to dispose of previously
\r
3113 // created output buffers for a duplex stream.
\r
3114 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3115 ASIODisposeBuffers();
\r
3116 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3119 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3121 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3122 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3123 if ( handle->bufferInfos == NULL ) {
\r
3124 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3125 errorText_ = errorStream_.str();
\r
3129 ASIOBufferInfo *infos;
\r
3130 infos = handle->bufferInfos;
\r
3131 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3132 infos->isInput = ASIOFalse;
\r
3133 infos->channelNum = i + stream_.channelOffset[0];
\r
3134 infos->buffers[0] = infos->buffers[1] = 0;
\r
3136 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3137 infos->isInput = ASIOTrue;
\r
3138 infos->channelNum = i + stream_.channelOffset[1];
\r
3139 infos->buffers[0] = infos->buffers[1] = 0;
\r
3142 // prepare for callbacks
\r
3143 stream_.sampleRate = sampleRate;
\r
3144 stream_.device[mode] = device;
\r
3145 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3147 // store this class instance before registering callbacks, that are going to use it
\r
3148 asioCallbackInfo = &stream_.callbackInfo;
\r
3149 stream_.callbackInfo.object = (void *) this;
\r
3151 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3152 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3153 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3154 asioCallbacks.asioMessage = &asioMessages;
\r
3155 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3156 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3157 if ( result != ASE_OK ) {
\r
3158 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3159 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3160 // in that case, let's be naïve and try that instead
\r
3161 *bufferSize = preferSize;
\r
3162 stream_.bufferSize = *bufferSize;
\r
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3166 if ( result != ASE_OK ) {
\r
3167 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3168 errorText_ = errorStream_.str();
\r
3171 buffersAllocated = true;
\r
3172 stream_.state = STREAM_STOPPED;
\r
3174 // Set flags for buffer conversion.
\r
3175 stream_.doConvertBuffer[mode] = false;
\r
3176 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3177 stream_.doConvertBuffer[mode] = true;
\r
3178 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3179 stream_.nUserChannels[mode] > 1 )
\r
3180 stream_.doConvertBuffer[mode] = true;
\r
3182 // Allocate necessary internal buffers
\r
3183 unsigned long bufferBytes;
\r
3184 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3185 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3186 if ( stream_.userBuffer[mode] == NULL ) {
\r
3187 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3191 if ( stream_.doConvertBuffer[mode] ) {
\r
3193 bool makeBuffer = true;
\r
3194 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3195 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3196 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3197 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3200 if ( makeBuffer ) {
\r
3201 bufferBytes *= *bufferSize;
\r
3202 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3203 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3204 if ( stream_.deviceBuffer == NULL ) {
\r
3205 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3211 // Determine device latencies
\r
3212 long inputLatency, outputLatency;
\r
3213 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3214 if ( result != ASE_OK ) {
\r
3215 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3216 errorText_ = errorStream_.str();
\r
3217 error( RtAudioError::WARNING); // warn but don't fail
\r
3220 stream_.latency[0] = outputLatency;
\r
3221 stream_.latency[1] = inputLatency;
\r
3224 // Setup the buffer conversion information structure. We don't use
\r
3225 // buffers to do channel offsets, so we override that parameter
\r
3227 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3232 if ( !isDuplexInput ) {
\r
3233 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3234 // So we clean up for single channel only
\r
3236 if ( buffersAllocated )
\r
3237 ASIODisposeBuffers();
\r
3239 drivers.removeCurrentDriver();
\r
3242 CloseHandle( handle->condition );
\r
3243 if ( handle->bufferInfos )
\r
3244 free( handle->bufferInfos );
\r
3247 stream_.apiHandle = 0;
\r
3251 if ( stream_.userBuffer[mode] ) {
\r
3252 free( stream_.userBuffer[mode] );
\r
3253 stream_.userBuffer[mode] = 0;
\r
3256 if ( stream_.deviceBuffer ) {
\r
3257 free( stream_.deviceBuffer );
\r
3258 stream_.deviceBuffer = 0;
\r
3263 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3265 void RtApiAsio :: closeStream()
\r
3267 if ( stream_.state == STREAM_CLOSED ) {
\r
3268 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3269 error( RtAudioError::WARNING );
\r
3273 if ( stream_.state == STREAM_RUNNING ) {
\r
3274 stream_.state = STREAM_STOPPED;
\r
3277 ASIODisposeBuffers();
\r
3278 drivers.removeCurrentDriver();
\r
3280 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3282 CloseHandle( handle->condition );
\r
3283 if ( handle->bufferInfos )
\r
3284 free( handle->bufferInfos );
\r
3286 stream_.apiHandle = 0;
\r
3289 for ( int i=0; i<2; i++ ) {
\r
3290 if ( stream_.userBuffer[i] ) {
\r
3291 free( stream_.userBuffer[i] );
\r
3292 stream_.userBuffer[i] = 0;
\r
3296 if ( stream_.deviceBuffer ) {
\r
3297 free( stream_.deviceBuffer );
\r
3298 stream_.deviceBuffer = 0;
\r
3301 stream_.mode = UNINITIALIZED;
\r
3302 stream_.state = STREAM_CLOSED;
\r
3305 bool stopThreadCalled = false;
\r
3307 void RtApiAsio :: startStream()
\r
3310 if ( stream_.state == STREAM_RUNNING ) {
\r
3311 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3312 error( RtAudioError::WARNING );
\r
3316 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3317 ASIOError result = ASIOStart();
\r
3318 if ( result != ASE_OK ) {
\r
3319 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3320 errorText_ = errorStream_.str();
\r
3324 handle->drainCounter = 0;
\r
3325 handle->internalDrain = false;
\r
3326 ResetEvent( handle->condition );
\r
3327 stream_.state = STREAM_RUNNING;
\r
3331 stopThreadCalled = false;
\r
3333 if ( result == ASE_OK ) return;
\r
3334 error( RtAudioError::SYSTEM_ERROR );
\r
3337 void RtApiAsio :: stopStream()
\r
3340 if ( stream_.state == STREAM_STOPPED ) {
\r
3341 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3342 error( RtAudioError::WARNING );
\r
3346 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3347 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3348 if ( handle->drainCounter == 0 ) {
\r
3349 handle->drainCounter = 2;
\r
3350 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3354 stream_.state = STREAM_STOPPED;
\r
3356 ASIOError result = ASIOStop();
\r
3357 if ( result != ASE_OK ) {
\r
3358 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3359 errorText_ = errorStream_.str();
\r
3362 if ( result == ASE_OK ) return;
\r
3363 error( RtAudioError::SYSTEM_ERROR );
\r
3366 void RtApiAsio :: abortStream()
\r
3369 if ( stream_.state == STREAM_STOPPED ) {
\r
3370 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3371 error( RtAudioError::WARNING );
\r
3375 // The following lines were commented-out because some behavior was
\r
3376 // noted where the device buffers need to be zeroed to avoid
\r
3377 // continuing sound, even when the device buffers are completely
\r
3378 // disposed. So now, calling abort is the same as calling stop.
\r
3379 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3380 // handle->drainCounter = 2;
\r
3384 // This function will be called by a spawned thread when the user
\r
3385 // callback function signals that the stream should be stopped or
\r
3386 // aborted. It is necessary to handle it this way because the
\r
3387 // callbackEvent() function must return before the ASIOStop()
\r
3388 // function will return.
\r
3389 static unsigned __stdcall asioStopStream( void *ptr )
\r
3391 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3392 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3394 object->stopStream();
\r
3395 _endthreadex( 0 );
\r
3399 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3401 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3402 if ( stream_.state == STREAM_CLOSED ) {
\r
3403 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3404 error( RtAudioError::WARNING );
\r
3408 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3409 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3411 // Check if we were draining the stream and signal if finished.
\r
3412 if ( handle->drainCounter > 3 ) {
\r
3414 stream_.state = STREAM_STOPPING;
\r
3415 if ( handle->internalDrain == false )
\r
3416 SetEvent( handle->condition );
\r
3417 else { // spawn a thread to stop the stream
\r
3418 unsigned threadId;
\r
3419 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3420 &stream_.callbackInfo, 0, &threadId );
\r
3425 // Invoke user callback to get fresh output data UNLESS we are
\r
3426 // draining stream.
\r
3427 if ( handle->drainCounter == 0 ) {
\r
3428 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3429 double streamTime = getStreamTime();
\r
3430 RtAudioStreamStatus status = 0;
\r
3431 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3432 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3435 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3436 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3439 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3440 stream_.bufferSize, streamTime, status, info->userData );
\r
3441 if ( cbReturnValue == 2 ) {
\r
3442 stream_.state = STREAM_STOPPING;
\r
3443 handle->drainCounter = 2;
\r
3444 unsigned threadId;
\r
3445 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3446 &stream_.callbackInfo, 0, &threadId );
\r
3449 else if ( cbReturnValue == 1 ) {
\r
3450 handle->drainCounter = 1;
\r
3451 handle->internalDrain = true;
\r
3455 unsigned int nChannels, bufferBytes, i, j;
\r
3456 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3457 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3459 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3461 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3463 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3464 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3465 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3469 else if ( stream_.doConvertBuffer[0] ) {
\r
3471 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3472 if ( stream_.doByteSwap[0] )
\r
3473 byteSwapBuffer( stream_.deviceBuffer,
\r
3474 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3475 stream_.deviceFormat[0] );
\r
3477 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3478 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3479 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3480 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3486 if ( stream_.doByteSwap[0] )
\r
3487 byteSwapBuffer( stream_.userBuffer[0],
\r
3488 stream_.bufferSize * stream_.nUserChannels[0],
\r
3489 stream_.userFormat );
\r
3491 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3492 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3493 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3494 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3500 // Don't bother draining input
\r
3501 if ( handle->drainCounter ) {
\r
3502 handle->drainCounter++;
\r
3506 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3508 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3510 if (stream_.doConvertBuffer[1]) {
\r
3512 // Always interleave ASIO input data.
\r
3513 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3514 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3515 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3516 handle->bufferInfos[i].buffers[bufferIndex],
\r
3520 if ( stream_.doByteSwap[1] )
\r
3521 byteSwapBuffer( stream_.deviceBuffer,
\r
3522 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3523 stream_.deviceFormat[1] );
\r
3524 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3528 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3529 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3530 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3531 handle->bufferInfos[i].buffers[bufferIndex],
\r
3536 if ( stream_.doByteSwap[1] )
\r
3537 byteSwapBuffer( stream_.userBuffer[1],
\r
3538 stream_.bufferSize * stream_.nUserChannels[1],
\r
3539 stream_.userFormat );
\r
3544 // The following call was suggested by Malte Clasen. While the API
\r
3545 // documentation indicates it should not be required, some device
\r
3546 // drivers apparently do not function correctly without it.
\r
3547 ASIOOutputReady();
\r
3549 RtApi::tickStreamTime();
\r
3553 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3555 // The ASIO documentation says that this usually only happens during
\r
3556 // external sync. Audio processing is not stopped by the driver,
\r
3557 // actual sample rate might not have even changed, maybe only the
\r
3558 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3561 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3563 object->stopStream();
\r
3565 catch ( RtAudioError &exception ) {
\r
3566 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3570 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3573 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3577 switch( selector ) {
\r
3578 case kAsioSelectorSupported:
\r
3579 if ( value == kAsioResetRequest
\r
3580 || value == kAsioEngineVersion
\r
3581 || value == kAsioResyncRequest
\r
3582 || value == kAsioLatenciesChanged
\r
3583 // The following three were added for ASIO 2.0, you don't
\r
3584 // necessarily have to support them.
\r
3585 || value == kAsioSupportsTimeInfo
\r
3586 || value == kAsioSupportsTimeCode
\r
3587 || value == kAsioSupportsInputMonitor)
\r
3590 case kAsioResetRequest:
\r
3591 // Defer the task and perform the reset of the driver during the
\r
3592 // next "safe" situation. You cannot reset the driver right now,
\r
3593 // as this code is called from the driver. Reset the driver is
\r
3594 // done by completely destruct is. I.e. ASIOStop(),
\r
3595 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3597 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3600 case kAsioResyncRequest:
\r
3601 // This informs the application that the driver encountered some
\r
3602 // non-fatal data loss. It is used for synchronization purposes
\r
3603 // of different media. Added mainly to work around the Win16Mutex
\r
3604 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3605 // which could lose data because the Mutex was held too long by
\r
3606 // another thread. However a driver can issue it in other
\r
3607 // situations, too.
\r
3608 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3612 case kAsioLatenciesChanged:
\r
3613 // This will inform the host application that the drivers were
\r
3614 // latencies changed. Beware, it this does not mean that the
\r
3615 // buffer sizes have changed! You might need to update internal
\r
3617 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3620 case kAsioEngineVersion:
\r
3621 // Return the supported ASIO version of the host application. If
\r
3622 // a host application does not implement this selector, ASIO 1.0
\r
3623 // is assumed by the driver.
\r
3626 case kAsioSupportsTimeInfo:
\r
3627 // Informs the driver whether the
\r
3628 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3629 // For compatibility with ASIO 1.0 drivers the host application
\r
3630 // should always support the "old" bufferSwitch method, too.
\r
3633 case kAsioSupportsTimeCode:
\r
3634 // Informs the driver whether application is interested in time
\r
3635 // code info. If an application does not need to know about time
\r
3636 // code, the driver has less work to do.
\r
3643 static const char* getAsioErrorString( ASIOError result )
\r
3648 const char*message;
\r
3651 static const Messages m[] =
\r
3653 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3654 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3655 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3656 { ASE_InvalidMode, "Invalid mode." },
\r
3657 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3658 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3659 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3662 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3663 if ( m[i].value == result ) return m[i].message;
\r
3665 return "Unknown error.";
\r
3668 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3672 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3674 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3675 // - Introduces support for the Windows WASAPI API
\r
3676 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3677 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3678 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3683 #include <audioclient.h>
\r
3685 #include <mmdeviceapi.h>
\r
3686 #include <functiondiscoverykeys_devpkey.h>
\r
3688 //=============================================================================
\r
3690 #define SAFE_RELEASE( objectPtr )\
\r
3693 objectPtr->Release();\
\r
3694 objectPtr = NULL;\
\r
3697 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3699 //-----------------------------------------------------------------------------
\r
3701 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3702 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3703 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3704 // provide intermediate storage for read / write synchronization.
\r
3705 class WasapiBuffer
\r
3709 : buffer_( NULL ),
\r
3718 // sets the length of the internal ring buffer
\r
3719 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3722 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3724 bufferSize_ = bufferSize;
\r
3729 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3730 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3732 if ( !buffer || // incoming buffer is NULL
\r
3733 bufferSize == 0 || // incoming buffer has no data
\r
3734 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3739 unsigned int relOutIndex = outIndex_;
\r
3740 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3741 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3742 relOutIndex += bufferSize_;
\r
3745 // "in" index can end on the "out" index but cannot begin at it
\r
3746 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3747 return false; // not enough space between "in" index and "out" index
\r
3750 // copy buffer from external to internal
\r
3751 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3752 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3753 int fromInSize = bufferSize - fromZeroSize;
\r
3757 case RTAUDIO_SINT8:
\r
3758 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3759 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3761 case RTAUDIO_SINT16:
\r
3762 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3763 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3765 case RTAUDIO_SINT24:
\r
3766 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3767 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3769 case RTAUDIO_SINT32:
\r
3770 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3771 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3773 case RTAUDIO_FLOAT32:
\r
3774 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3775 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3777 case RTAUDIO_FLOAT64:
\r
3778 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3779 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3783 // update "in" index
\r
3784 inIndex_ += bufferSize;
\r
3785 inIndex_ %= bufferSize_;
\r
3790 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3791 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3793 if ( !buffer || // incoming buffer is NULL
\r
3794 bufferSize == 0 || // incoming buffer has no data
\r
3795 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3800 unsigned int relInIndex = inIndex_;
\r
3801 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3802 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3803 relInIndex += bufferSize_;
\r
3806 // "out" index can begin at and end on the "in" index
\r
3807 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3808 return false; // not enough space between "out" index and "in" index
\r
3811 // copy buffer from internal to external
\r
3812 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3813 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3814 int fromOutSize = bufferSize - fromZeroSize;
\r
3818 case RTAUDIO_SINT8:
\r
3819 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3820 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3822 case RTAUDIO_SINT16:
\r
3823 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3824 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3826 case RTAUDIO_SINT24:
\r
3827 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3828 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3830 case RTAUDIO_SINT32:
\r
3831 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3832 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3834 case RTAUDIO_FLOAT32:
\r
3835 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3836 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3838 case RTAUDIO_FLOAT64:
\r
3839 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3840 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3844 // update "out" index
\r
3845 outIndex_ += bufferSize;
\r
3846 outIndex_ %= bufferSize_;
\r
3853 unsigned int bufferSize_;
\r
3854 unsigned int inIndex_;
\r
3855 unsigned int outIndex_;
\r
3858 //-----------------------------------------------------------------------------
\r
3860 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3861 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3862 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3863 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3864 // one rate and its multiple.
\r
3865 void convertBufferWasapi( char* outBuffer,
\r
3866 const char* inBuffer,
\r
3867 const unsigned int& channelCount,
\r
3868 const unsigned int& inSampleRate,
\r
3869 const unsigned int& outSampleRate,
\r
3870 const unsigned int& inSampleCount,
\r
3871 unsigned int& outSampleCount,
\r
3872 const RtAudioFormat& format )
\r
3874 // calculate the new outSampleCount and relative sampleStep
\r
3875 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3876 float sampleStep = 1.0f / sampleRatio;
\r
3877 float inSampleFraction = 0.0f;
\r
3879 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3881 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3882 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3884 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3888 case RTAUDIO_SINT8:
\r
3889 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3891 case RTAUDIO_SINT16:
\r
3892 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3894 case RTAUDIO_SINT24:
\r
3895 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3897 case RTAUDIO_SINT32:
\r
3898 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3900 case RTAUDIO_FLOAT32:
\r
3901 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3903 case RTAUDIO_FLOAT64:
\r
3904 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3908 // jump to next in sample
\r
3909 inSampleFraction += sampleStep;
\r
3913 //-----------------------------------------------------------------------------
\r
3915 // A structure to hold various information related to the WASAPI implementation.
\r
3916 struct WasapiHandle
\r
3918 IAudioClient* captureAudioClient;
\r
3919 IAudioClient* renderAudioClient;
\r
3920 IAudioCaptureClient* captureClient;
\r
3921 IAudioRenderClient* renderClient;
\r
3922 HANDLE captureEvent;
\r
3923 HANDLE renderEvent;
\r
3926 : captureAudioClient( NULL ),
\r
3927 renderAudioClient( NULL ),
\r
3928 captureClient( NULL ),
\r
3929 renderClient( NULL ),
\r
3930 captureEvent( NULL ),
\r
3931 renderEvent( NULL ) {}
\r
3934 //=============================================================================
\r
3936 RtApiWasapi::RtApiWasapi()
\r
3937 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3939 // WASAPI can run either apartment or multi-threaded
\r
3940 HRESULT hr = CoInitialize( NULL );
\r
3941 if ( !FAILED( hr ) )
\r
3942 coInitialized_ = true;
\r
3944 // Instantiate device enumerator
\r
3945 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3946 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3947 ( void** ) &deviceEnumerator_ );
\r
3949 if ( FAILED( hr ) ) {
\r
3950 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3951 error( RtAudioError::DRIVER_ERROR );
\r
3955 //-----------------------------------------------------------------------------
\r
3957 RtApiWasapi::~RtApiWasapi()
\r
3959 if ( stream_.state != STREAM_CLOSED )
\r
3962 SAFE_RELEASE( deviceEnumerator_ );
\r
3964 // If this object previously called CoInitialize()
\r
3965 if ( coInitialized_ )
\r
3969 //=============================================================================
\r
3971 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3973 unsigned int captureDeviceCount = 0;
\r
3974 unsigned int renderDeviceCount = 0;
\r
3976 IMMDeviceCollection* captureDevices = NULL;
\r
3977 IMMDeviceCollection* renderDevices = NULL;
\r
3979 // Count capture devices
\r
3980 errorText_.clear();
\r
3981 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3982 if ( FAILED( hr ) ) {
\r
3983 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3987 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3988 if ( FAILED( hr ) ) {
\r
3989 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3993 // Count render devices
\r
3994 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3995 if ( FAILED( hr ) ) {
\r
3996 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4000 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4001 if ( FAILED( hr ) ) {
\r
4002 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4007 // release all references
\r
4008 SAFE_RELEASE( captureDevices );
\r
4009 SAFE_RELEASE( renderDevices );
\r
4011 if ( errorText_.empty() )
\r
4012 return captureDeviceCount + renderDeviceCount;
\r
4014 error( RtAudioError::DRIVER_ERROR );
\r
4018 //-----------------------------------------------------------------------------
\r
4020 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4022 RtAudio::DeviceInfo info;
\r
4023 unsigned int captureDeviceCount = 0;
\r
4024 unsigned int renderDeviceCount = 0;
\r
4025 std::string defaultDeviceName;
\r
4026 bool isCaptureDevice = false;
\r
4028 PROPVARIANT deviceNameProp;
\r
4029 PROPVARIANT defaultDeviceNameProp;
\r
4031 IMMDeviceCollection* captureDevices = NULL;
\r
4032 IMMDeviceCollection* renderDevices = NULL;
\r
4033 IMMDevice* devicePtr = NULL;
\r
4034 IMMDevice* defaultDevicePtr = NULL;
\r
4035 IAudioClient* audioClient = NULL;
\r
4036 IPropertyStore* devicePropStore = NULL;
\r
4037 IPropertyStore* defaultDevicePropStore = NULL;
\r
4039 WAVEFORMATEX* deviceFormat = NULL;
\r
4040 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4043 info.probed = false;
\r
4045 // Count capture devices
\r
4046 errorText_.clear();
\r
4047 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4048 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4049 if ( FAILED( hr ) ) {
\r
4050 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4054 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4055 if ( FAILED( hr ) ) {
\r
4056 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4060 // Count render devices
\r
4061 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4062 if ( FAILED( hr ) ) {
\r
4063 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4067 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4068 if ( FAILED( hr ) ) {
\r
4069 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4073 // validate device index
\r
4074 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4076 errorType = RtAudioError::INVALID_USE;
\r
4080 // determine whether index falls within capture or render devices
\r
4081 if ( device >= renderDeviceCount ) {
\r
4082 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4083 if ( FAILED( hr ) ) {
\r
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4087 isCaptureDevice = true;
\r
4090 hr = renderDevices->Item( device, &devicePtr );
\r
4091 if ( FAILED( hr ) ) {
\r
4092 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4095 isCaptureDevice = false;
\r
4098 // get default device name
\r
4099 if ( isCaptureDevice ) {
\r
4100 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4101 if ( FAILED( hr ) ) {
\r
4102 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4107 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4108 if ( FAILED( hr ) ) {
\r
4109 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4114 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4115 if ( FAILED( hr ) ) {
\r
4116 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4119 PropVariantInit( &defaultDeviceNameProp );
\r
4121 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4122 if ( FAILED( hr ) ) {
\r
4123 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4127 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4130 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4131 if ( FAILED( hr ) ) {
\r
4132 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4136 PropVariantInit( &deviceNameProp );
\r
4138 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4139 if ( FAILED( hr ) ) {
\r
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4144 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4147 if ( isCaptureDevice ) {
\r
4148 info.isDefaultInput = info.name == defaultDeviceName;
\r
4149 info.isDefaultOutput = false;
\r
4152 info.isDefaultInput = false;
\r
4153 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4157 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4158 if ( FAILED( hr ) ) {
\r
4159 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4163 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4164 if ( FAILED( hr ) ) {
\r
4165 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4169 if ( isCaptureDevice ) {
\r
4170 info.inputChannels = deviceFormat->nChannels;
\r
4171 info.outputChannels = 0;
\r
4172 info.duplexChannels = 0;
\r
4175 info.inputChannels = 0;
\r
4176 info.outputChannels = deviceFormat->nChannels;
\r
4177 info.duplexChannels = 0;
\r
4181 info.sampleRates.clear();
\r
4183 // allow support for all sample rates as we have a built-in sample rate converter
\r
4184 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4185 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4187 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4190 info.nativeFormats = 0;
\r
4192 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4193 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4194 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4196 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4197 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4199 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4200 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4203 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4204 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4205 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4207 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4208 info.nativeFormats |= RTAUDIO_SINT8;
\r
4210 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4211 info.nativeFormats |= RTAUDIO_SINT16;
\r
4213 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4214 info.nativeFormats |= RTAUDIO_SINT24;
\r
4216 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4217 info.nativeFormats |= RTAUDIO_SINT32;
\r
4222 info.probed = true;
\r
4225 // release all references
\r
4226 PropVariantClear( &deviceNameProp );
\r
4227 PropVariantClear( &defaultDeviceNameProp );
\r
4229 SAFE_RELEASE( captureDevices );
\r
4230 SAFE_RELEASE( renderDevices );
\r
4231 SAFE_RELEASE( devicePtr );
\r
4232 SAFE_RELEASE( defaultDevicePtr );
\r
4233 SAFE_RELEASE( audioClient );
\r
4234 SAFE_RELEASE( devicePropStore );
\r
4235 SAFE_RELEASE( defaultDevicePropStore );
\r
4237 CoTaskMemFree( deviceFormat );
\r
4238 CoTaskMemFree( closestMatchFormat );
\r
4240 if ( !errorText_.empty() )
\r
4241 error( errorType );
\r
4245 //-----------------------------------------------------------------------------
\r
4247 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4249 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4250 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4258 //-----------------------------------------------------------------------------
\r
4260 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4262 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4263 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4271 //-----------------------------------------------------------------------------
\r
4273 void RtApiWasapi::closeStream( void )
\r
4275 if ( stream_.state == STREAM_CLOSED ) {
\r
4276 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4277 error( RtAudioError::WARNING );
\r
4281 if ( stream_.state != STREAM_STOPPED )
\r
4284 // clean up stream memory
\r
4285 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4286 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4288 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4289 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4291 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4292 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4294 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4295 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4297 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4298 stream_.apiHandle = NULL;
\r
4300 for ( int i = 0; i < 2; i++ ) {
\r
4301 if ( stream_.userBuffer[i] ) {
\r
4302 free( stream_.userBuffer[i] );
\r
4303 stream_.userBuffer[i] = 0;
\r
4307 if ( stream_.deviceBuffer ) {
\r
4308 free( stream_.deviceBuffer );
\r
4309 stream_.deviceBuffer = 0;
\r
4312 // update stream state
\r
4313 stream_.state = STREAM_CLOSED;
\r
4316 //-----------------------------------------------------------------------------
\r
4318 void RtApiWasapi::startStream( void )
\r
4322 if ( stream_.state == STREAM_RUNNING ) {
\r
4323 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4324 error( RtAudioError::WARNING );
\r
4328 // update stream state
\r
4329 stream_.state = STREAM_RUNNING;
\r
4331 // create WASAPI stream thread
\r
4332 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4334 if ( !stream_.callbackInfo.thread ) {
\r
4335 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4336 error( RtAudioError::THREAD_ERROR );
\r
4339 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4340 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4344 //-----------------------------------------------------------------------------
\r
4346 void RtApiWasapi::stopStream( void )
\r
4350 if ( stream_.state == STREAM_STOPPED ) {
\r
4351 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4352 error( RtAudioError::WARNING );
\r
4356 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4357 stream_.state = STREAM_STOPPING;
\r
4359 // wait until stream thread is stopped
\r
4360 while( stream_.state != STREAM_STOPPED ) {
\r
4364 // Wait for the last buffer to play before stopping.
\r
4365 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4367 // stop capture client if applicable
\r
4368 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4369 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4370 if ( FAILED( hr ) ) {
\r
4371 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4372 error( RtAudioError::DRIVER_ERROR );
\r
4377 // stop render client if applicable
\r
4378 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4379 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4380 if ( FAILED( hr ) ) {
\r
4381 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4382 error( RtAudioError::DRIVER_ERROR );
\r
4387 // close thread handle
\r
4388 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4389 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4390 error( RtAudioError::THREAD_ERROR );
\r
4394 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4397 //-----------------------------------------------------------------------------
\r
4399 void RtApiWasapi::abortStream( void )
\r
4403 if ( stream_.state == STREAM_STOPPED ) {
\r
4404 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4405 error( RtAudioError::WARNING );
\r
4409 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4410 stream_.state = STREAM_STOPPING;
\r
4412 // wait until stream thread is stopped
\r
4413 while ( stream_.state != STREAM_STOPPED ) {
\r
4417 // stop capture client if applicable
\r
4418 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4419 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4420 if ( FAILED( hr ) ) {
\r
4421 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4422 error( RtAudioError::DRIVER_ERROR );
\r
4427 // stop render client if applicable
\r
4428 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4429 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4430 if ( FAILED( hr ) ) {
\r
4431 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4432 error( RtAudioError::DRIVER_ERROR );
\r
4437 // close thread handle
\r
4438 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4439 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4440 error( RtAudioError::THREAD_ERROR );
\r
4444 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4447 //-----------------------------------------------------------------------------
\r
4449 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4450 unsigned int firstChannel, unsigned int sampleRate,
\r
4451 RtAudioFormat format, unsigned int* bufferSize,
\r
4452 RtAudio::StreamOptions* options )
\r
4454 bool methodResult = FAILURE;
\r
4455 unsigned int captureDeviceCount = 0;
\r
4456 unsigned int renderDeviceCount = 0;
\r
4458 IMMDeviceCollection* captureDevices = NULL;
\r
4459 IMMDeviceCollection* renderDevices = NULL;
\r
4460 IMMDevice* devicePtr = NULL;
\r
4461 WAVEFORMATEX* deviceFormat = NULL;
\r
4462 unsigned int bufferBytes;
\r
4463 stream_.state = STREAM_STOPPED;
\r
4465 // create API Handle if not already created
\r
4466 if ( !stream_.apiHandle )
\r
4467 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4469 // Count capture devices
\r
4470 errorText_.clear();
\r
4471 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4472 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4473 if ( FAILED( hr ) ) {
\r
4474 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4478 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4479 if ( FAILED( hr ) ) {
\r
4480 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4484 // Count render devices
\r
4485 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4486 if ( FAILED( hr ) ) {
\r
4487 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4491 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4492 if ( FAILED( hr ) ) {
\r
4493 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4497 // validate device index
\r
4498 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4499 errorType = RtAudioError::INVALID_USE;
\r
4500 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4504 // determine whether index falls within capture or render devices
\r
4505 if ( device >= renderDeviceCount ) {
\r
4506 if ( mode != INPUT ) {
\r
4507 errorType = RtAudioError::INVALID_USE;
\r
4508 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4512 // retrieve captureAudioClient from devicePtr
\r
4513 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4515 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4516 if ( FAILED( hr ) ) {
\r
4517 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4521 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4522 NULL, ( void** ) &captureAudioClient );
\r
4523 if ( FAILED( hr ) ) {
\r
4524 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4528 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4529 if ( FAILED( hr ) ) {
\r
4530 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4534 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4535 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4538 if ( mode != OUTPUT ) {
\r
4539 errorType = RtAudioError::INVALID_USE;
\r
4540 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4544 // retrieve renderAudioClient from devicePtr
\r
4545 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4547 hr = renderDevices->Item( device, &devicePtr );
\r
4548 if ( FAILED( hr ) ) {
\r
4549 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4553 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4554 NULL, ( void** ) &renderAudioClient );
\r
4555 if ( FAILED( hr ) ) {
\r
4556 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4560 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4561 if ( FAILED( hr ) ) {
\r
4562 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4566 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4567 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4570 // fill stream data
\r
4571 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4572 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4573 stream_.mode = DUPLEX;
\r
4576 stream_.mode = mode;
\r
4579 stream_.device[mode] = device;
\r
4580 stream_.doByteSwap[mode] = false;
\r
4581 stream_.sampleRate = sampleRate;
\r
4582 stream_.bufferSize = *bufferSize;
\r
4583 stream_.nBuffers = 1;
\r
4584 stream_.nUserChannels[mode] = channels;
\r
4585 stream_.channelOffset[mode] = firstChannel;
\r
4586 stream_.userFormat = format;
\r
4587 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4589 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4590 stream_.userInterleaved = false;
\r
4592 stream_.userInterleaved = true;
\r
4593 stream_.deviceInterleaved[mode] = true;
\r
4595 // Set flags for buffer conversion.
\r
4596 stream_.doConvertBuffer[mode] = false;
\r
4597 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4598 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4599 stream_.doConvertBuffer[mode] = true;
\r
4600 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4601 stream_.nUserChannels[mode] > 1 )
\r
4602 stream_.doConvertBuffer[mode] = true;
\r
4604 if ( stream_.doConvertBuffer[mode] )
\r
4605 setConvertInfo( mode, 0 );
\r
4607 // Allocate necessary internal buffers
\r
4608 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4610 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4611 if ( !stream_.userBuffer[mode] ) {
\r
4612 errorType = RtAudioError::MEMORY_ERROR;
\r
4613 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4617 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4618 stream_.callbackInfo.priority = 15;
\r
4620 stream_.callbackInfo.priority = 0;
\r
4622 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4623 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4625 methodResult = SUCCESS;
\r
4629 SAFE_RELEASE( captureDevices );
\r
4630 SAFE_RELEASE( renderDevices );
\r
4631 SAFE_RELEASE( devicePtr );
\r
4632 CoTaskMemFree( deviceFormat );
\r
4634 // if method failed, close the stream
\r
4635 if ( methodResult == FAILURE )
\r
4638 if ( !errorText_.empty() )
\r
4639 error( errorType );
\r
4640 return methodResult;
\r
4643 //=============================================================================
\r
4645 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4648 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4653 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4656 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4661 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4664 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4669 //-----------------------------------------------------------------------------
\r
4671 void RtApiWasapi::wasapiThread()
\r
4673 // as this is a new thread, we must CoInitialize it
\r
4674 CoInitialize( NULL );
\r
4678 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4679 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4680 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4681 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4682 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4683 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4685 WAVEFORMATEX* captureFormat = NULL;
\r
4686 WAVEFORMATEX* renderFormat = NULL;
\r
4687 float captureSrRatio = 0.0f;
\r
4688 float renderSrRatio = 0.0f;
\r
4689 WasapiBuffer captureBuffer;
\r
4690 WasapiBuffer renderBuffer;
\r
4692 // declare local stream variables
\r
4693 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4694 BYTE* streamBuffer = NULL;
\r
4695 unsigned long captureFlags = 0;
\r
4696 unsigned int bufferFrameCount = 0;
\r
4697 unsigned int numFramesPadding = 0;
\r
4698 unsigned int convBufferSize = 0;
\r
4699 bool callbackPushed = false;
\r
4700 bool callbackPulled = false;
\r
4701 bool callbackStopped = false;
\r
4702 int callbackResult = 0;
\r
4704 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4705 char* convBuffer = NULL;
\r
4706 unsigned int convBuffSize = 0;
\r
4707 unsigned int deviceBuffSize = 0;
\r
4709 errorText_.clear();
\r
4710 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4712 // Attempt to assign "Pro Audio" characteristic to thread
\r
4713 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4715 DWORD taskIndex = 0;
\r
4716 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4717 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4718 FreeLibrary( AvrtDll );
\r
4721 // start capture stream if applicable
\r
4722 if ( captureAudioClient ) {
\r
4723 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4724 if ( FAILED( hr ) ) {
\r
4725 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4729 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4731 // initialize capture stream according to desire buffer size
\r
4732 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4733 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4735 if ( !captureClient ) {
\r
4736 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4737 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4738 desiredBufferPeriod,
\r
4739 desiredBufferPeriod,
\r
4742 if ( FAILED( hr ) ) {
\r
4743 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4747 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4748 ( void** ) &captureClient );
\r
4749 if ( FAILED( hr ) ) {
\r
4750 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4754 // configure captureEvent to trigger on every available capture buffer
\r
4755 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4756 if ( !captureEvent ) {
\r
4757 errorType = RtAudioError::SYSTEM_ERROR;
\r
4758 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4762 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4763 if ( FAILED( hr ) ) {
\r
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4768 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4769 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4772 unsigned int inBufferSize = 0;
\r
4773 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4774 if ( FAILED( hr ) ) {
\r
4775 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4779 // scale outBufferSize according to stream->user sample rate ratio
\r
4780 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4781 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4783 // set captureBuffer size
\r
4784 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4786 // reset the capture stream
\r
4787 hr = captureAudioClient->Reset();
\r
4788 if ( FAILED( hr ) ) {
\r
4789 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4793 // start the capture stream
\r
4794 hr = captureAudioClient->Start();
\r
4795 if ( FAILED( hr ) ) {
\r
4796 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4801 // start render stream if applicable
\r
4802 if ( renderAudioClient ) {
\r
4803 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4804 if ( FAILED( hr ) ) {
\r
4805 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4809 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4811 // initialize render stream according to desire buffer size
\r
4812 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4813 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4815 if ( !renderClient ) {
\r
4816 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4817 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4818 desiredBufferPeriod,
\r
4819 desiredBufferPeriod,
\r
4822 if ( FAILED( hr ) ) {
\r
4823 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4827 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4828 ( void** ) &renderClient );
\r
4829 if ( FAILED( hr ) ) {
\r
4830 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4834 // configure renderEvent to trigger on every available render buffer
\r
4835 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4836 if ( !renderEvent ) {
\r
4837 errorType = RtAudioError::SYSTEM_ERROR;
\r
4838 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4842 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4843 if ( FAILED( hr ) ) {
\r
4844 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4848 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4849 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4852 unsigned int outBufferSize = 0;
\r
4853 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4854 if ( FAILED( hr ) ) {
\r
4855 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4859 // scale inBufferSize according to user->stream sample rate ratio
\r
4860 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4861 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4863 // set renderBuffer size
\r
4864 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4866 // reset the render stream
\r
4867 hr = renderAudioClient->Reset();
\r
4868 if ( FAILED( hr ) ) {
\r
4869 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4873 // start the render stream
\r
4874 hr = renderAudioClient->Start();
\r
4875 if ( FAILED( hr ) ) {
\r
4876 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4881 if ( stream_.mode == INPUT ) {
\r
4882 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4883 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4885 else if ( stream_.mode == OUTPUT ) {
\r
4886 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4887 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4889 else if ( stream_.mode == DUPLEX ) {
\r
4890 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4891 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4892 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4893 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4896 convBuffer = ( char* ) malloc( convBuffSize );
\r
4897 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4898 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4899 errorType = RtAudioError::MEMORY_ERROR;
\r
4900 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4904 // stream process loop
\r
4905 while ( stream_.state != STREAM_STOPPING ) {
\r
4906 if ( !callbackPulled ) {
\r
4909 // 1. Pull callback buffer from inputBuffer
\r
4910 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4911 // Convert callback buffer to user format
\r
4913 if ( captureAudioClient ) {
\r
4914 // Pull callback buffer from inputBuffer
\r
4915 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4916 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4917 stream_.deviceFormat[INPUT] );
\r
4919 if ( callbackPulled ) {
\r
4920 // Convert callback buffer to user sample rate
\r
4921 convertBufferWasapi( stream_.deviceBuffer,
\r
4923 stream_.nDeviceChannels[INPUT],
\r
4924 captureFormat->nSamplesPerSec,
\r
4925 stream_.sampleRate,
\r
4926 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4928 stream_.deviceFormat[INPUT] );
\r
4930 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4931 // Convert callback buffer to user format
\r
4932 convertBuffer( stream_.userBuffer[INPUT],
\r
4933 stream_.deviceBuffer,
\r
4934 stream_.convertInfo[INPUT] );
\r
4937 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4938 memcpy( stream_.userBuffer[INPUT],
\r
4939 stream_.deviceBuffer,
\r
4940 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4945 // if there is no capture stream, set callbackPulled flag
\r
4946 callbackPulled = true;
\r
4949 // Execute Callback
\r
4950 // ================
\r
4951 // 1. Execute user callback method
\r
4952 // 2. Handle return value from callback
\r
4954 // if callback has not requested the stream to stop
\r
4955 if ( callbackPulled && !callbackStopped ) {
\r
4956 // Execute user callback method
\r
4957 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4958 stream_.userBuffer[INPUT],
\r
4959 stream_.bufferSize,
\r
4961 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4962 stream_.callbackInfo.userData );
\r
4964 // Handle return value from callback
\r
4965 if ( callbackResult == 1 ) {
\r
4966 // instantiate a thread to stop this thread
\r
4967 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4968 if ( !threadHandle ) {
\r
4969 errorType = RtAudioError::THREAD_ERROR;
\r
4970 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4973 else if ( !CloseHandle( threadHandle ) ) {
\r
4974 errorType = RtAudioError::THREAD_ERROR;
\r
4975 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4979 callbackStopped = true;
\r
4981 else if ( callbackResult == 2 ) {
\r
4982 // instantiate a thread to stop this thread
\r
4983 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4984 if ( !threadHandle ) {
\r
4985 errorType = RtAudioError::THREAD_ERROR;
\r
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4989 else if ( !CloseHandle( threadHandle ) ) {
\r
4990 errorType = RtAudioError::THREAD_ERROR;
\r
4991 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4995 callbackStopped = true;
\r
5000 // Callback Output
\r
5001 // ===============
\r
5002 // 1. Convert callback buffer to stream format
\r
5003 // 2. Convert callback buffer to stream sample rate and channel count
\r
5004 // 3. Push callback buffer into outputBuffer
\r
5006 if ( renderAudioClient && callbackPulled ) {
\r
5007 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5008 // Convert callback buffer to stream format
\r
5009 convertBuffer( stream_.deviceBuffer,
\r
5010 stream_.userBuffer[OUTPUT],
\r
5011 stream_.convertInfo[OUTPUT] );
\r
5015 // Convert callback buffer to stream sample rate
\r
5016 convertBufferWasapi( convBuffer,
\r
5017 stream_.deviceBuffer,
\r
5018 stream_.nDeviceChannels[OUTPUT],
\r
5019 stream_.sampleRate,
\r
5020 renderFormat->nSamplesPerSec,
\r
5021 stream_.bufferSize,
\r
5023 stream_.deviceFormat[OUTPUT] );
\r
5025 // Push callback buffer into outputBuffer
\r
5026 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5027 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5028 stream_.deviceFormat[OUTPUT] );
\r
5031 // if there is no render stream, set callbackPushed flag
\r
5032 callbackPushed = true;
\r
5037 // 1. Get capture buffer from stream
\r
5038 // 2. Push capture buffer into inputBuffer
\r
5039 // 3. If 2. was successful: Release capture buffer
\r
5041 if ( captureAudioClient ) {
\r
5042 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5043 if ( !callbackPulled ) {
\r
5044 WaitForSingleObject( captureEvent, INFINITE );
\r
5047 // Get capture buffer from stream
\r
5048 hr = captureClient->GetBuffer( &streamBuffer,
\r
5049 &bufferFrameCount,
\r
5050 &captureFlags, NULL, NULL );
\r
5051 if ( FAILED( hr ) ) {
\r
5052 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5056 if ( bufferFrameCount != 0 ) {
\r
5057 // Push capture buffer into inputBuffer
\r
5058 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5059 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5060 stream_.deviceFormat[INPUT] ) )
\r
5062 // Release capture buffer
\r
5063 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5064 if ( FAILED( hr ) ) {
\r
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5071 // Inform WASAPI that capture was unsuccessful
\r
5072 hr = captureClient->ReleaseBuffer( 0 );
\r
5073 if ( FAILED( hr ) ) {
\r
5074 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5081 // Inform WASAPI that capture was unsuccessful
\r
5082 hr = captureClient->ReleaseBuffer( 0 );
\r
5083 if ( FAILED( hr ) ) {
\r
5084 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5092 // 1. Get render buffer from stream
\r
5093 // 2. Pull next buffer from outputBuffer
\r
5094 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5095 // Release render buffer
\r
5097 if ( renderAudioClient ) {
\r
5098 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5099 if ( callbackPulled && !callbackPushed ) {
\r
5100 WaitForSingleObject( renderEvent, INFINITE );
\r
5103 // Get render buffer from stream
\r
5104 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5105 if ( FAILED( hr ) ) {
\r
5106 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5110 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5111 if ( FAILED( hr ) ) {
\r
5112 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5116 bufferFrameCount -= numFramesPadding;
\r
5118 if ( bufferFrameCount != 0 ) {
\r
5119 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5120 if ( FAILED( hr ) ) {
\r
5121 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5125 // Pull next buffer from outputBuffer
\r
5126 // Fill render buffer with next buffer
\r
5127 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5128 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5129 stream_.deviceFormat[OUTPUT] ) )
\r
5131 // Release render buffer
\r
5132 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5133 if ( FAILED( hr ) ) {
\r
5134 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5140 // Inform WASAPI that render was unsuccessful
\r
5141 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5142 if ( FAILED( hr ) ) {
\r
5143 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5150 // Inform WASAPI that render was unsuccessful
\r
5151 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5152 if ( FAILED( hr ) ) {
\r
5153 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5159 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5160 if ( callbackPushed ) {
\r
5161 callbackPulled = false;
\r
5164 // tick stream time
\r
5165 RtApi::tickStreamTime();
\r
5170 CoTaskMemFree( captureFormat );
\r
5171 CoTaskMemFree( renderFormat );
\r
5173 free ( convBuffer );
\r
5177 // update stream state
\r
5178 stream_.state = STREAM_STOPPED;
\r
5180 if ( errorText_.empty() )
\r
5183 error( errorType );
\r
5186 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5190 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5192 // Modified by Robin Davies, October 2005
\r
5193 // - Improvements to DirectX pointer chasing.
\r
5194 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5195 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5196 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5197 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5199 #include <dsound.h>
\r
5200 #include <assert.h>
\r
5201 #include <algorithm>
\r
5203 #if defined(__MINGW32__)
\r
5204 // missing from latest mingw winapi
\r
5205 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5206 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5207 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5208 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5211 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5213 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5214 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5217 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5219 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5220 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5221 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5222 return pointer >= earlierPointer && pointer < laterPointer;
\r
5225 // A structure to hold various information related to the DirectSound
\r
5226 // API implementation.
\r
5228 unsigned int drainCounter; // Tracks callback counts when draining
\r
5229 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5233 UINT bufferPointer[2];
\r
5234 DWORD dsBufferSize[2];
\r
5235 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5239 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5242 // Declarations for utility functions, callbacks, and structures
\r
5243 // specific to the DirectSound implementation.
\r
5244 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5245 LPCTSTR description,
\r
5247 LPVOID lpContext );
\r
5249 static const char* getErrorString( int code );
\r
5251 static unsigned __stdcall callbackHandler( void *ptr );
\r
5260 : found(false) { validId[0] = false; validId[1] = false; }
\r
5263 struct DsProbeData {
\r
5265 std::vector<struct DsDevice>* dsDevices;
\r
5268 RtApiDs :: RtApiDs()
\r
5270 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5271 // accept whatever the mainline chose for a threading model.
\r
5272 coInitialized_ = false;
\r
5273 HRESULT hr = CoInitialize( NULL );
\r
5274 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5277 RtApiDs :: ~RtApiDs()
\r
5279 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5280 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5283 // The DirectSound default output is always the first device.
\r
5284 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5289 // The DirectSound default input is always the first input device,
\r
5290 // which is the first capture device enumerated.
\r
5291 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5296 unsigned int RtApiDs :: getDeviceCount( void )
\r
5298 // Set query flag for previously found devices to false, so that we
\r
5299 // can check for any devices that have disappeared.
\r
5300 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5301 dsDevices[i].found = false;
\r
5303 // Query DirectSound devices.
\r
5304 struct DsProbeData probeInfo;
\r
5305 probeInfo.isInput = false;
\r
5306 probeInfo.dsDevices = &dsDevices;
\r
5307 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5308 if ( FAILED( result ) ) {
\r
5309 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5310 errorText_ = errorStream_.str();
\r
5311 error( RtAudioError::WARNING );
\r
5314 // Query DirectSoundCapture devices.
\r
5315 probeInfo.isInput = true;
\r
5316 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5317 if ( FAILED( result ) ) {
\r
5318 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5319 errorText_ = errorStream_.str();
\r
5320 error( RtAudioError::WARNING );
\r
5323 // Clean out any devices that may have disappeared.
\r
5324 std::vector< int > indices;
\r
5325 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5326 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5327 //unsigned int nErased = 0;
\r
5328 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5329 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5330 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5332 return static_cast<unsigned int>(dsDevices.size());
\r
5335 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5337 RtAudio::DeviceInfo info;
\r
5338 info.probed = false;
\r
5340 if ( dsDevices.size() == 0 ) {
\r
5341 // Force a query of all devices
\r
5343 if ( dsDevices.size() == 0 ) {
\r
5344 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5345 error( RtAudioError::INVALID_USE );
\r
5350 if ( device >= dsDevices.size() ) {
\r
5351 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5352 error( RtAudioError::INVALID_USE );
\r
5357 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5359 LPDIRECTSOUND output;
\r
5361 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5362 if ( FAILED( result ) ) {
\r
5363 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5364 errorText_ = errorStream_.str();
\r
5365 error( RtAudioError::WARNING );
\r
5369 outCaps.dwSize = sizeof( outCaps );
\r
5370 result = output->GetCaps( &outCaps );
\r
5371 if ( FAILED( result ) ) {
\r
5372 output->Release();
\r
5373 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5374 errorText_ = errorStream_.str();
\r
5375 error( RtAudioError::WARNING );
\r
5379 // Get output channel information.
\r
5380 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5382 // Get sample rate information.
\r
5383 info.sampleRates.clear();
\r
5384 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5385 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5386 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5387 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5389 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5390 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5394 // Get format information.
\r
5395 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5396 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5398 output->Release();
\r
5400 if ( getDefaultOutputDevice() == device )
\r
5401 info.isDefaultOutput = true;
\r
5403 if ( dsDevices[ device ].validId[1] == false ) {
\r
5404 info.name = dsDevices[ device ].name;
\r
5405 info.probed = true;
\r
5411 LPDIRECTSOUNDCAPTURE input;
\r
5412 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5413 if ( FAILED( result ) ) {
\r
5414 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5415 errorText_ = errorStream_.str();
\r
5416 error( RtAudioError::WARNING );
\r
5421 inCaps.dwSize = sizeof( inCaps );
\r
5422 result = input->GetCaps( &inCaps );
\r
5423 if ( FAILED( result ) ) {
\r
5425 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5426 errorText_ = errorStream_.str();
\r
5427 error( RtAudioError::WARNING );
\r
5431 // Get input channel information.
\r
5432 info.inputChannels = inCaps.dwChannels;
\r
5434 // Get sample rate and format information.
\r
5435 std::vector<unsigned int> rates;
\r
5436 if ( inCaps.dwChannels >= 2 ) {
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5441 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5442 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5443 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5444 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5446 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5447 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5448 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5449 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5452 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5453 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5454 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5455 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5456 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5459 else if ( inCaps.dwChannels == 1 ) {
\r
5460 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5461 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5462 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5463 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5464 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5465 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5466 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5467 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5469 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5470 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5471 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5472 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5473 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5475 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5476 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5477 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5478 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5479 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5482 else info.inputChannels = 0; // technically, this would be an error
\r
5486 if ( info.inputChannels == 0 ) return info;
\r
5488 // Copy the supported rates to the info structure but avoid duplication.
\r
5490 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5492 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5493 if ( rates[i] == info.sampleRates[j] ) {
\r
5498 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5500 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5502 // If device opens for both playback and capture, we determine the channels.
\r
5503 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5504 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5506 if ( device == 0 ) info.isDefaultInput = true;
\r
5508 // Copy name and return.
\r
5509 info.name = dsDevices[ device ].name;
\r
5510 info.probed = true;
\r
5514 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5515 unsigned int firstChannel, unsigned int sampleRate,
\r
5516 RtAudioFormat format, unsigned int *bufferSize,
\r
5517 RtAudio::StreamOptions *options )
\r
5519 if ( channels + firstChannel > 2 ) {
\r
5520 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5524 size_t nDevices = dsDevices.size();
\r
5525 if ( nDevices == 0 ) {
\r
5526 // This should not happen because a check is made before this function is called.
\r
5527 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5531 if ( device >= nDevices ) {
\r
5532 // This should not happen because a check is made before this function is called.
\r
5533 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5537 if ( mode == OUTPUT ) {
\r
5538 if ( dsDevices[ device ].validId[0] == false ) {
\r
5539 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5540 errorText_ = errorStream_.str();
\r
5544 else { // mode == INPUT
\r
5545 if ( dsDevices[ device ].validId[1] == false ) {
\r
5546 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5547 errorText_ = errorStream_.str();
\r
5552 // According to a note in PortAudio, using GetDesktopWindow()
\r
5553 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5554 // that occur when the application's window is not the foreground
\r
5555 // window. Also, if the application window closes before the
\r
5556 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5557 // problems when using GetDesktopWindow() but it seems fine now
\r
5558 // (January 2010). I'll leave it commented here.
\r
5559 // HWND hWnd = GetForegroundWindow();
\r
5560 HWND hWnd = GetDesktopWindow();
\r
5562 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5563 // two. This is a judgement call and a value of two is probably too
\r
5564 // low for capture, but it should work for playback.
\r
5566 if ( options ) nBuffers = options->numberOfBuffers;
\r
5567 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5568 if ( nBuffers < 2 ) nBuffers = 3;
\r
5570 // Check the lower range of the user-specified buffer size and set
\r
5571 // (arbitrarily) to a lower bound of 32.
\r
5572 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5574 // Create the wave format structure. The data format setting will
\r
5575 // be determined later.
\r
5576 WAVEFORMATEX waveFormat;
\r
5577 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5578 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5579 waveFormat.nChannels = channels + firstChannel;
\r
5580 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5582 // Determine the device buffer size. By default, we'll use the value
\r
5583 // defined above (32K), but we will grow it to make allowances for
\r
5584 // very large software buffer sizes.
\r
5585 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5586 DWORD dsPointerLeadTime = 0;
\r
5588 void *ohandle = 0, *bhandle = 0;
\r
5590 if ( mode == OUTPUT ) {
\r
5592 LPDIRECTSOUND output;
\r
5593 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5594 if ( FAILED( result ) ) {
\r
5595 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5596 errorText_ = errorStream_.str();
\r
5601 outCaps.dwSize = sizeof( outCaps );
\r
5602 result = output->GetCaps( &outCaps );
\r
5603 if ( FAILED( result ) ) {
\r
5604 output->Release();
\r
5605 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5606 errorText_ = errorStream_.str();
\r
5610 // Check channel information.
\r
5611 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5612 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5613 errorText_ = errorStream_.str();
\r
5617 // Check format information. Use 16-bit format unless not
\r
5618 // supported or user requests 8-bit.
\r
5619 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5620 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5621 waveFormat.wBitsPerSample = 16;
\r
5622 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5625 waveFormat.wBitsPerSample = 8;
\r
5626 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5628 stream_.userFormat = format;
\r
5630 // Update wave format structure and buffer information.
\r
5631 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5632 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5633 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5635 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5636 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5637 dsBufferSize *= 2;
\r
5639 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5640 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5641 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5642 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5643 if ( FAILED( result ) ) {
\r
5644 output->Release();
\r
5645 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5646 errorText_ = errorStream_.str();
\r
5650 // Even though we will write to the secondary buffer, we need to
\r
5651 // access the primary buffer to set the correct output format
\r
5652 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5653 // buffer description.
\r
5654 DSBUFFERDESC bufferDescription;
\r
5655 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5656 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5657 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5659 // Obtain the primary buffer
\r
5660 LPDIRECTSOUNDBUFFER buffer;
\r
5661 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5662 if ( FAILED( result ) ) {
\r
5663 output->Release();
\r
5664 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5665 errorText_ = errorStream_.str();
\r
5669 // Set the primary DS buffer sound format.
\r
5670 result = buffer->SetFormat( &waveFormat );
\r
5671 if ( FAILED( result ) ) {
\r
5672 output->Release();
\r
5673 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5674 errorText_ = errorStream_.str();
\r
5678 // Setup the secondary DS buffer description.
\r
5679 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5680 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5681 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5682 DSBCAPS_GLOBALFOCUS |
\r
5683 DSBCAPS_GETCURRENTPOSITION2 |
\r
5684 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5685 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5686 bufferDescription.lpwfxFormat = &waveFormat;
\r
5688 // Try to create the secondary DS buffer. If that doesn't work,
\r
5689 // try to use software mixing. Otherwise, there's a problem.
\r
5690 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5691 if ( FAILED( result ) ) {
\r
5692 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5693 DSBCAPS_GLOBALFOCUS |
\r
5694 DSBCAPS_GETCURRENTPOSITION2 |
\r
5695 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5696 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5697 if ( FAILED( result ) ) {
\r
5698 output->Release();
\r
5699 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5700 errorText_ = errorStream_.str();
\r
5705 // Get the buffer size ... might be different from what we specified.
\r
5707 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5708 result = buffer->GetCaps( &dsbcaps );
\r
5709 if ( FAILED( result ) ) {
\r
5710 output->Release();
\r
5711 buffer->Release();
\r
5712 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5713 errorText_ = errorStream_.str();
\r
5717 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5719 // Lock the DS buffer
\r
5722 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5723 if ( FAILED( result ) ) {
\r
5724 output->Release();
\r
5725 buffer->Release();
\r
5726 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5727 errorText_ = errorStream_.str();
\r
5731 // Zero the DS buffer
\r
5732 ZeroMemory( audioPtr, dataLen );
\r
5734 // Unlock the DS buffer
\r
5735 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5736 if ( FAILED( result ) ) {
\r
5737 output->Release();
\r
5738 buffer->Release();
\r
5739 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5740 errorText_ = errorStream_.str();
\r
5744 ohandle = (void *) output;
\r
5745 bhandle = (void *) buffer;
\r
5748 if ( mode == INPUT ) {
\r
5750 LPDIRECTSOUNDCAPTURE input;
\r
5751 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5752 if ( FAILED( result ) ) {
\r
5753 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5754 errorText_ = errorStream_.str();
\r
5759 inCaps.dwSize = sizeof( inCaps );
\r
5760 result = input->GetCaps( &inCaps );
\r
5761 if ( FAILED( result ) ) {
\r
5763 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5764 errorText_ = errorStream_.str();
\r
5768 // Check channel information.
\r
5769 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5770 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5774 // Check format information. Use 16-bit format unless user
\r
5775 // requests 8-bit.
\r
5776 DWORD deviceFormats;
\r
5777 if ( channels + firstChannel == 2 ) {
\r
5778 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5779 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5780 waveFormat.wBitsPerSample = 8;
\r
5781 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5783 else { // assume 16-bit is supported
\r
5784 waveFormat.wBitsPerSample = 16;
\r
5785 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5788 else { // channel == 1
\r
5789 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5790 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5791 waveFormat.wBitsPerSample = 8;
\r
5792 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5794 else { // assume 16-bit is supported
\r
5795 waveFormat.wBitsPerSample = 16;
\r
5796 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5799 stream_.userFormat = format;
\r
5801 // Update wave format structure and buffer information.
\r
5802 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5803 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5804 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5806 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5807 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5808 dsBufferSize *= 2;
\r
5810 // Setup the secondary DS buffer description.
\r
5811 DSCBUFFERDESC bufferDescription;
\r
5812 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5813 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5814 bufferDescription.dwFlags = 0;
\r
5815 bufferDescription.dwReserved = 0;
\r
5816 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5817 bufferDescription.lpwfxFormat = &waveFormat;
\r
5819 // Create the capture buffer.
\r
5820 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5821 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5822 if ( FAILED( result ) ) {
\r
5824 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5825 errorText_ = errorStream_.str();
\r
5829 // Get the buffer size ... might be different from what we specified.
\r
5830 DSCBCAPS dscbcaps;
\r
5831 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5832 result = buffer->GetCaps( &dscbcaps );
\r
5833 if ( FAILED( result ) ) {
\r
5835 buffer->Release();
\r
5836 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5837 errorText_ = errorStream_.str();
\r
5841 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5843 // NOTE: We could have a problem here if this is a duplex stream
\r
5844 // and the play and capture hardware buffer sizes are different
\r
5845 // (I'm actually not sure if that is a problem or not).
\r
5846 // Currently, we are not verifying that.
\r
5848 // Lock the capture buffer
\r
5851 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5852 if ( FAILED( result ) ) {
\r
5854 buffer->Release();
\r
5855 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5856 errorText_ = errorStream_.str();
\r
5860 // Zero the buffer
\r
5861 ZeroMemory( audioPtr, dataLen );
\r
5863 // Unlock the buffer
\r
5864 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5865 if ( FAILED( result ) ) {
\r
5867 buffer->Release();
\r
5868 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5869 errorText_ = errorStream_.str();
\r
5873 ohandle = (void *) input;
\r
5874 bhandle = (void *) buffer;
\r
5877 // Set various stream parameters
\r
5878 DsHandle *handle = 0;
\r
5879 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5880 stream_.nUserChannels[mode] = channels;
\r
5881 stream_.bufferSize = *bufferSize;
\r
5882 stream_.channelOffset[mode] = firstChannel;
\r
5883 stream_.deviceInterleaved[mode] = true;
\r
5884 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5885 else stream_.userInterleaved = true;
\r
5887 // Set flag for buffer conversion
\r
5888 stream_.doConvertBuffer[mode] = false;
\r
5889 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5890 stream_.doConvertBuffer[mode] = true;
\r
5891 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5892 stream_.doConvertBuffer[mode] = true;
\r
5893 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5894 stream_.nUserChannels[mode] > 1 )
\r
5895 stream_.doConvertBuffer[mode] = true;
\r
5897 // Allocate necessary internal buffers
\r
5898 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5899 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5900 if ( stream_.userBuffer[mode] == NULL ) {
\r
5901 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5905 if ( stream_.doConvertBuffer[mode] ) {
\r
5907 bool makeBuffer = true;
\r
5908 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5909 if ( mode == INPUT ) {
\r
5910 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5911 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5912 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5916 if ( makeBuffer ) {
\r
5917 bufferBytes *= *bufferSize;
\r
5918 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5919 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5920 if ( stream_.deviceBuffer == NULL ) {
\r
5921 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5927 // Allocate our DsHandle structures for the stream.
\r
5928 if ( stream_.apiHandle == 0 ) {
\r
5930 handle = new DsHandle;
\r
5932 catch ( std::bad_alloc& ) {
\r
5933 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5937 // Create a manual-reset event.
\r
5938 handle->condition = CreateEvent( NULL, // no security
\r
5939 TRUE, // manual-reset
\r
5940 FALSE, // non-signaled initially
\r
5941 NULL ); // unnamed
\r
5942 stream_.apiHandle = (void *) handle;
\r
5945 handle = (DsHandle *) stream_.apiHandle;
\r
5946 handle->id[mode] = ohandle;
\r
5947 handle->buffer[mode] = bhandle;
\r
5948 handle->dsBufferSize[mode] = dsBufferSize;
\r
5949 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5951 stream_.device[mode] = device;
\r
5952 stream_.state = STREAM_STOPPED;
\r
5953 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5954 // We had already set up an output stream.
\r
5955 stream_.mode = DUPLEX;
\r
5957 stream_.mode = mode;
\r
5958 stream_.nBuffers = nBuffers;
\r
5959 stream_.sampleRate = sampleRate;
\r
5961 // Setup the buffer conversion information structure.
\r
5962 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5964 // Setup the callback thread.
\r
5965 if ( stream_.callbackInfo.isRunning == false ) {
\r
5966 unsigned threadId;
\r
5967 stream_.callbackInfo.isRunning = true;
\r
5968 stream_.callbackInfo.object = (void *) this;
\r
5969 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5970 &stream_.callbackInfo, 0, &threadId );
\r
5971 if ( stream_.callbackInfo.thread == 0 ) {
\r
5972 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5976 // Boost DS thread priority
\r
5977 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5983 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5984 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5985 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5986 if ( buffer ) buffer->Release();
\r
5987 object->Release();
\r
5989 if ( handle->buffer[1] ) {
\r
5990 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5991 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5992 if ( buffer ) buffer->Release();
\r
5993 object->Release();
\r
5995 CloseHandle( handle->condition );
\r
5997 stream_.apiHandle = 0;
\r
6000 for ( int i=0; i<2; i++ ) {
\r
6001 if ( stream_.userBuffer[i] ) {
\r
6002 free( stream_.userBuffer[i] );
\r
6003 stream_.userBuffer[i] = 0;
\r
6007 if ( stream_.deviceBuffer ) {
\r
6008 free( stream_.deviceBuffer );
\r
6009 stream_.deviceBuffer = 0;
\r
6012 stream_.state = STREAM_CLOSED;
\r
6016 void RtApiDs :: closeStream()
\r
6018 if ( stream_.state == STREAM_CLOSED ) {
\r
6019 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6020 error( RtAudioError::WARNING );
\r
6024 // Stop the callback thread.
\r
6025 stream_.callbackInfo.isRunning = false;
\r
6026 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6027 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6029 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6031 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6032 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6033 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6036 buffer->Release();
\r
6038 object->Release();
\r
6040 if ( handle->buffer[1] ) {
\r
6041 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6042 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6045 buffer->Release();
\r
6047 object->Release();
\r
6049 CloseHandle( handle->condition );
\r
6051 stream_.apiHandle = 0;
\r
6054 for ( int i=0; i<2; i++ ) {
\r
6055 if ( stream_.userBuffer[i] ) {
\r
6056 free( stream_.userBuffer[i] );
\r
6057 stream_.userBuffer[i] = 0;
\r
6061 if ( stream_.deviceBuffer ) {
\r
6062 free( stream_.deviceBuffer );
\r
6063 stream_.deviceBuffer = 0;
\r
6066 stream_.mode = UNINITIALIZED;
\r
6067 stream_.state = STREAM_CLOSED;
\r
6070 void RtApiDs :: startStream()
\r
6073 if ( stream_.state == STREAM_RUNNING ) {
\r
6074 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6075 error( RtAudioError::WARNING );
\r
6079 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6081 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6082 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6083 // this is already in effect.
\r
6084 timeBeginPeriod( 1 );
\r
6086 buffersRolling = false;
\r
6087 duplexPrerollBytes = 0;
\r
6089 if ( stream_.mode == DUPLEX ) {
\r
6090 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6091 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6094 HRESULT result = 0;
\r
6095 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6097 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6098 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6099 if ( FAILED( result ) ) {
\r
6100 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6101 errorText_ = errorStream_.str();
\r
6106 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6108 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6109 result = buffer->Start( DSCBSTART_LOOPING );
\r
6110 if ( FAILED( result ) ) {
\r
6111 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6112 errorText_ = errorStream_.str();
\r
6117 handle->drainCounter = 0;
\r
6118 handle->internalDrain = false;
\r
6119 ResetEvent( handle->condition );
\r
6120 stream_.state = STREAM_RUNNING;
\r
6123 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6126 void RtApiDs :: stopStream()
\r
6129 if ( stream_.state == STREAM_STOPPED ) {
\r
6130 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6131 error( RtAudioError::WARNING );
\r
6135 HRESULT result = 0;
\r
6138 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6139 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6140 if ( handle->drainCounter == 0 ) {
\r
6141 handle->drainCounter = 2;
\r
6142 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6145 stream_.state = STREAM_STOPPED;
\r
6147 MUTEX_LOCK( &stream_.mutex );
\r
6149 // Stop the buffer and clear memory
\r
6150 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6151 result = buffer->Stop();
\r
6152 if ( FAILED( result ) ) {
\r
6153 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6154 errorText_ = errorStream_.str();
\r
6158 // Lock the buffer and clear it so that if we start to play again,
\r
6159 // we won't have old data playing.
\r
6160 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6161 if ( FAILED( result ) ) {
\r
6162 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6163 errorText_ = errorStream_.str();
\r
6167 // Zero the DS buffer
\r
6168 ZeroMemory( audioPtr, dataLen );
\r
6170 // Unlock the DS buffer
\r
6171 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6172 if ( FAILED( result ) ) {
\r
6173 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6174 errorText_ = errorStream_.str();
\r
6178 // If we start playing again, we must begin at beginning of buffer.
\r
6179 handle->bufferPointer[0] = 0;
\r
6182 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6183 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6187 stream_.state = STREAM_STOPPED;
\r
6189 if ( stream_.mode != DUPLEX )
\r
6190 MUTEX_LOCK( &stream_.mutex );
\r
6192 result = buffer->Stop();
\r
6193 if ( FAILED( result ) ) {
\r
6194 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6195 errorText_ = errorStream_.str();
\r
6199 // Lock the buffer and clear it so that if we start to play again,
\r
6200 // we won't have old data playing.
\r
6201 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6202 if ( FAILED( result ) ) {
\r
6203 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6204 errorText_ = errorStream_.str();
\r
6208 // Zero the DS buffer
\r
6209 ZeroMemory( audioPtr, dataLen );
\r
6211 // Unlock the DS buffer
\r
6212 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6213 if ( FAILED( result ) ) {
\r
6214 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6215 errorText_ = errorStream_.str();
\r
6219 // If we start recording again, we must begin at beginning of buffer.
\r
6220 handle->bufferPointer[1] = 0;
\r
6224 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6225 MUTEX_UNLOCK( &stream_.mutex );
\r
6227 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6230 void RtApiDs :: abortStream()
\r
6233 if ( stream_.state == STREAM_STOPPED ) {
\r
6234 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6235 error( RtAudioError::WARNING );
\r
6239 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6240 handle->drainCounter = 2;
\r
6245 void RtApiDs :: callbackEvent()
\r
6247 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6248 Sleep( 50 ); // sleep 50 milliseconds
\r
6252 if ( stream_.state == STREAM_CLOSED ) {
\r
6253 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6254 error( RtAudioError::WARNING );
\r
6258 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6259 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6261 // Check if we were draining the stream and signal is finished.
\r
6262 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6264 stream_.state = STREAM_STOPPING;
\r
6265 if ( handle->internalDrain == false )
\r
6266 SetEvent( handle->condition );
\r
6272 // Invoke user callback to get fresh output data UNLESS we are
\r
6273 // draining stream.
\r
6274 if ( handle->drainCounter == 0 ) {
\r
6275 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6276 double streamTime = getStreamTime();
\r
6277 RtAudioStreamStatus status = 0;
\r
6278 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6279 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6280 handle->xrun[0] = false;
\r
6282 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6283 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6284 handle->xrun[1] = false;
\r
6286 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6287 stream_.bufferSize, streamTime, status, info->userData );
\r
6288 if ( cbReturnValue == 2 ) {
\r
6289 stream_.state = STREAM_STOPPING;
\r
6290 handle->drainCounter = 2;
\r
6294 else if ( cbReturnValue == 1 ) {
\r
6295 handle->drainCounter = 1;
\r
6296 handle->internalDrain = true;
\r
6301 DWORD currentWritePointer, safeWritePointer;
\r
6302 DWORD currentReadPointer, safeReadPointer;
\r
6303 UINT nextWritePointer;
\r
6305 LPVOID buffer1 = NULL;
\r
6306 LPVOID buffer2 = NULL;
\r
6307 DWORD bufferSize1 = 0;
\r
6308 DWORD bufferSize2 = 0;
\r
6313 MUTEX_LOCK( &stream_.mutex );
\r
6314 if ( stream_.state == STREAM_STOPPED ) {
\r
6315 MUTEX_UNLOCK( &stream_.mutex );
\r
6319 if ( buffersRolling == false ) {
\r
6320 if ( stream_.mode == DUPLEX ) {
\r
6321 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6323 // It takes a while for the devices to get rolling. As a result,
\r
6324 // there's no guarantee that the capture and write device pointers
\r
6325 // will move in lockstep. Wait here for both devices to start
\r
6326 // rolling, and then set our buffer pointers accordingly.
\r
6327 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6328 // bytes later than the write buffer.
\r
6330 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6331 // take place between the two GetCurrentPosition calls... but I'm
\r
6332 // really not sure how to solve the problem. Temporarily boost to
\r
6333 // Realtime priority, maybe; but I'm not sure what priority the
\r
6334 // DirectSound service threads run at. We *should* be roughly
\r
6335 // within a ms or so of correct.
\r
6337 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6338 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6340 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6342 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6343 if ( FAILED( result ) ) {
\r
6344 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6345 errorText_ = errorStream_.str();
\r
6346 MUTEX_UNLOCK( &stream_.mutex );
\r
6347 error( RtAudioError::SYSTEM_ERROR );
\r
6350 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6351 if ( FAILED( result ) ) {
\r
6352 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6353 errorText_ = errorStream_.str();
\r
6354 MUTEX_UNLOCK( &stream_.mutex );
\r
6355 error( RtAudioError::SYSTEM_ERROR );
\r
6359 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6360 if ( FAILED( result ) ) {
\r
6361 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6362 errorText_ = errorStream_.str();
\r
6363 MUTEX_UNLOCK( &stream_.mutex );
\r
6364 error( RtAudioError::SYSTEM_ERROR );
\r
6367 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6368 if ( FAILED( result ) ) {
\r
6369 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6370 errorText_ = errorStream_.str();
\r
6371 MUTEX_UNLOCK( &stream_.mutex );
\r
6372 error( RtAudioError::SYSTEM_ERROR );
\r
6375 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6379 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6381 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6382 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6383 handle->bufferPointer[1] = safeReadPointer;
\r
6385 else if ( stream_.mode == OUTPUT ) {
\r
6387 // Set the proper nextWritePosition after initial startup.
\r
6388 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6389 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6390 if ( FAILED( result ) ) {
\r
6391 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6392 errorText_ = errorStream_.str();
\r
6393 MUTEX_UNLOCK( &stream_.mutex );
\r
6394 error( RtAudioError::SYSTEM_ERROR );
\r
6397 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6398 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6401 buffersRolling = true;
\r
6404 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6406 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6408 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6409 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6410 bufferBytes *= formatBytes( stream_.userFormat );
\r
6411 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6414 // Setup parameters and do buffer conversion if necessary.
\r
6415 if ( stream_.doConvertBuffer[0] ) {
\r
6416 buffer = stream_.deviceBuffer;
\r
6417 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6418 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6419 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6422 buffer = stream_.userBuffer[0];
\r
6423 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6424 bufferBytes *= formatBytes( stream_.userFormat );
\r
6427 // No byte swapping necessary in DirectSound implementation.
\r
6429 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6430 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6432 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6433 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6435 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6436 nextWritePointer = handle->bufferPointer[0];
\r
6438 DWORD endWrite, leadPointer;
\r
6440 // Find out where the read and "safe write" pointers are.
\r
6441 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6442 if ( FAILED( result ) ) {
\r
6443 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6444 errorText_ = errorStream_.str();
\r
6445 error( RtAudioError::SYSTEM_ERROR );
\r
6449 // We will copy our output buffer into the region between
\r
6450 // safeWritePointer and leadPointer. If leadPointer is not
\r
6451 // beyond the next endWrite position, wait until it is.
\r
6452 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6453 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6454 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6455 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6456 endWrite = nextWritePointer + bufferBytes;
\r
6458 // Check whether the entire write region is behind the play pointer.
\r
6459 if ( leadPointer >= endWrite ) break;
\r
6461 // If we are here, then we must wait until the leadPointer advances
\r
6462 // beyond the end of our next write region. We use the
\r
6463 // Sleep() function to suspend operation until that happens.
\r
6464 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6465 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6466 if ( millis < 1.0 ) millis = 1.0;
\r
6467 Sleep( (DWORD) millis );
\r
6470 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6471 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6472 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6473 handle->xrun[0] = true;
\r
6474 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6475 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6476 handle->bufferPointer[0] = nextWritePointer;
\r
6477 endWrite = nextWritePointer + bufferBytes;
\r
6480 // Lock free space in the buffer
\r
6481 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6482 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6483 if ( FAILED( result ) ) {
\r
6484 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6485 errorText_ = errorStream_.str();
\r
6486 MUTEX_UNLOCK( &stream_.mutex );
\r
6487 error( RtAudioError::SYSTEM_ERROR );
\r
6491 // Copy our buffer into the DS buffer
\r
6492 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6493 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6495 // Update our buffer offset and unlock sound buffer
\r
6496 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6497 if ( FAILED( result ) ) {
\r
6498 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6499 errorText_ = errorStream_.str();
\r
6500 MUTEX_UNLOCK( &stream_.mutex );
\r
6501 error( RtAudioError::SYSTEM_ERROR );
\r
6504 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6505 handle->bufferPointer[0] = nextWritePointer;
\r
6508 // Don't bother draining input
\r
6509 if ( handle->drainCounter ) {
\r
6510 handle->drainCounter++;
\r
6514 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6516 // Setup parameters.
\r
6517 if ( stream_.doConvertBuffer[1] ) {
\r
6518 buffer = stream_.deviceBuffer;
\r
6519 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6520 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6523 buffer = stream_.userBuffer[1];
\r
6524 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6525 bufferBytes *= formatBytes( stream_.userFormat );
\r
6528 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6529 long nextReadPointer = handle->bufferPointer[1];
\r
6530 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6532 // Find out where the write and "safe read" pointers are.
\r
6533 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6534 if ( FAILED( result ) ) {
\r
6535 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6536 errorText_ = errorStream_.str();
\r
6537 MUTEX_UNLOCK( &stream_.mutex );
\r
6538 error( RtAudioError::SYSTEM_ERROR );
\r
6542 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6543 DWORD endRead = nextReadPointer + bufferBytes;
\r
6545 // Handling depends on whether we are INPUT or DUPLEX.
\r
6546 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6547 // then a wait here will drag the write pointers into the forbidden zone.
\r
6549 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6550 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6551 // practical way to sync up the read and write pointers reliably, given the
\r
6552 // the very complex relationship between phase and increment of the read and write
\r
6555 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6556 // provide a pre-roll period of 0.5 seconds in which we return
\r
6557 // zeros from the read buffer while the pointers sync up.
\r
6559 if ( stream_.mode == DUPLEX ) {
\r
6560 if ( safeReadPointer < endRead ) {
\r
6561 if ( duplexPrerollBytes <= 0 ) {
\r
6562 // Pre-roll time over. Be more agressive.
\r
6563 int adjustment = endRead-safeReadPointer;
\r
6565 handle->xrun[1] = true;
\r
6567 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6568 // and perform fine adjustments later.
\r
6569 // - small adjustments: back off by twice as much.
\r
6570 if ( adjustment >= 2*bufferBytes )
\r
6571 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6573 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6575 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6579 // In pre=roll time. Just do it.
\r
6580 nextReadPointer = safeReadPointer - bufferBytes;
\r
6581 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6583 endRead = nextReadPointer + bufferBytes;
\r
6586 else { // mode == INPUT
\r
6587 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6588 // See comments for playback.
\r
6589 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6590 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6591 if ( millis < 1.0 ) millis = 1.0;
\r
6592 Sleep( (DWORD) millis );
\r
6594 // Wake up and find out where we are now.
\r
6595 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6596 if ( FAILED( result ) ) {
\r
6597 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6598 errorText_ = errorStream_.str();
\r
6599 MUTEX_UNLOCK( &stream_.mutex );
\r
6600 error( RtAudioError::SYSTEM_ERROR );
\r
6604 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6608 // Lock free space in the buffer
\r
6609 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6610 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6611 if ( FAILED( result ) ) {
\r
6612 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6613 errorText_ = errorStream_.str();
\r
6614 MUTEX_UNLOCK( &stream_.mutex );
\r
6615 error( RtAudioError::SYSTEM_ERROR );
\r
6619 if ( duplexPrerollBytes <= 0 ) {
\r
6620 // Copy our buffer into the DS buffer
\r
6621 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6622 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6625 memset( buffer, 0, bufferSize1 );
\r
6626 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6627 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6630 // Update our buffer offset and unlock sound buffer
\r
6631 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6632 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6633 if ( FAILED( result ) ) {
\r
6634 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6635 errorText_ = errorStream_.str();
\r
6636 MUTEX_UNLOCK( &stream_.mutex );
\r
6637 error( RtAudioError::SYSTEM_ERROR );
\r
6640 handle->bufferPointer[1] = nextReadPointer;
\r
6642 // No byte swapping necessary in DirectSound implementation.
\r
6644 // If necessary, convert 8-bit data from unsigned to signed.
\r
6645 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6646 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6648 // Do buffer conversion if necessary.
\r
6649 if ( stream_.doConvertBuffer[1] )
\r
6650 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6654 MUTEX_UNLOCK( &stream_.mutex );
\r
6655 RtApi::tickStreamTime();
\r
6658 // Definitions for utility functions and callbacks
\r
6659 // specific to the DirectSound implementation.
\r
6661 static unsigned __stdcall callbackHandler( void *ptr )
\r
6663 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6664 RtApiDs *object = (RtApiDs *) info->object;
\r
6665 bool* isRunning = &info->isRunning;
\r
6667 while ( *isRunning == true ) {
\r
6668 object->callbackEvent();
\r
6671 _endthreadex( 0 );
\r
6675 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6676 LPCTSTR description,
\r
6677 LPCTSTR /*module*/,
\r
6678 LPVOID lpContext )
\r
6680 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6681 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6684 bool validDevice = false;
\r
6685 if ( probeInfo.isInput == true ) {
\r
6687 LPDIRECTSOUNDCAPTURE object;
\r
6689 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6690 if ( hr != DS_OK ) return TRUE;
\r
6692 caps.dwSize = sizeof(caps);
\r
6693 hr = object->GetCaps( &caps );
\r
6694 if ( hr == DS_OK ) {
\r
6695 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6696 validDevice = true;
\r
6698 object->Release();
\r
6702 LPDIRECTSOUND object;
\r
6703 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6704 if ( hr != DS_OK ) return TRUE;
\r
6706 caps.dwSize = sizeof(caps);
\r
6707 hr = object->GetCaps( &caps );
\r
6708 if ( hr == DS_OK ) {
\r
6709 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6710 validDevice = true;
\r
6712 object->Release();
\r
6715 // If good device, then save its name and guid.
\r
6716 std::string name = convertCharPointerToStdString( description );
\r
6717 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6718 if ( lpguid == NULL )
\r
6719 name = "Default Device";
\r
6720 if ( validDevice ) {
\r
6721 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6722 if ( dsDevices[i].name == name ) {
\r
6723 dsDevices[i].found = true;
\r
6724 if ( probeInfo.isInput ) {
\r
6725 dsDevices[i].id[1] = lpguid;
\r
6726 dsDevices[i].validId[1] = true;
\r
6729 dsDevices[i].id[0] = lpguid;
\r
6730 dsDevices[i].validId[0] = true;
\r
6737 device.name = name;
\r
6738 device.found = true;
\r
6739 if ( probeInfo.isInput ) {
\r
6740 device.id[1] = lpguid;
\r
6741 device.validId[1] = true;
\r
6744 device.id[0] = lpguid;
\r
6745 device.validId[0] = true;
\r
6747 dsDevices.push_back( device );
\r
6753 static const char* getErrorString( int code )
\r
6757 case DSERR_ALLOCATED:
\r
6758 return "Already allocated";
\r
6760 case DSERR_CONTROLUNAVAIL:
\r
6761 return "Control unavailable";
\r
6763 case DSERR_INVALIDPARAM:
\r
6764 return "Invalid parameter";
\r
6766 case DSERR_INVALIDCALL:
\r
6767 return "Invalid call";
\r
6769 case DSERR_GENERIC:
\r
6770 return "Generic error";
\r
6772 case DSERR_PRIOLEVELNEEDED:
\r
6773 return "Priority level needed";
\r
6775 case DSERR_OUTOFMEMORY:
\r
6776 return "Out of memory";
\r
6778 case DSERR_BADFORMAT:
\r
6779 return "The sample rate or the channel format is not supported";
\r
6781 case DSERR_UNSUPPORTED:
\r
6782 return "Not supported";
\r
6784 case DSERR_NODRIVER:
\r
6785 return "No driver";
\r
6787 case DSERR_ALREADYINITIALIZED:
\r
6788 return "Already initialized";
\r
6790 case DSERR_NOAGGREGATION:
\r
6791 return "No aggregation";
\r
6793 case DSERR_BUFFERLOST:
\r
6794 return "Buffer lost";
\r
6796 case DSERR_OTHERAPPHASPRIO:
\r
6797 return "Another application already has priority";
\r
6799 case DSERR_UNINITIALIZED:
\r
6800 return "Uninitialized";
\r
6803 return "DirectSound unknown error";
\r
6806 //******************** End of __WINDOWS_DS__ *********************//
\r
6810 #if defined(__LINUX_ALSA__)
\r
6812 #include <alsa/asoundlib.h>
\r
6813 #include <unistd.h>
\r
6815 // A structure to hold various information related to the ALSA API
\r
6816 // implementation.
\r
6817 struct AlsaHandle {
\r
6818 snd_pcm_t *handles[2];
\r
6819 bool synchronized;
\r
6821 pthread_cond_t runnable_cv;
\r
6825 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6828 static void *alsaCallbackHandler( void * ptr );
\r
6830 RtApiAlsa :: RtApiAlsa()
\r
6832 // Nothing to do here.
\r
6835 RtApiAlsa :: ~RtApiAlsa()
\r
6837 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6840 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6842 unsigned nDevices = 0;
\r
6843 int result, subdevice, card;
\r
6845 snd_ctl_t *handle;
\r
6847 // Count cards and devices
\r
6849 snd_card_next( &card );
\r
6850 while ( card >= 0 ) {
\r
6851 sprintf( name, "hw:%d", card );
\r
6852 result = snd_ctl_open( &handle, name, 0 );
\r
6853 if ( result < 0 ) {
\r
6854 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6855 errorText_ = errorStream_.str();
\r
6856 error( RtAudioError::WARNING );
\r
6861 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6862 if ( result < 0 ) {
\r
6863 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6864 errorText_ = errorStream_.str();
\r
6865 error( RtAudioError::WARNING );
\r
6868 if ( subdevice < 0 )
\r
6873 snd_ctl_close( handle );
\r
6874 snd_card_next( &card );
\r
6877 result = snd_ctl_open( &handle, "default", 0 );
\r
6878 if (result == 0) {
\r
6880 snd_ctl_close( handle );
\r
6886 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6888 RtAudio::DeviceInfo info;
\r
6889 info.probed = false;
\r
6891 unsigned nDevices = 0;
\r
6892 int result, subdevice, card;
\r
6894 snd_ctl_t *chandle;
\r
6896 // Count cards and devices
\r
6899 snd_card_next( &card );
\r
6900 while ( card >= 0 ) {
\r
6901 sprintf( name, "hw:%d", card );
\r
6902 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6903 if ( result < 0 ) {
\r
6904 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6905 errorText_ = errorStream_.str();
\r
6906 error( RtAudioError::WARNING );
\r
6911 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6912 if ( result < 0 ) {
\r
6913 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6914 errorText_ = errorStream_.str();
\r
6915 error( RtAudioError::WARNING );
\r
6918 if ( subdevice < 0 ) break;
\r
6919 if ( nDevices == device ) {
\r
6920 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6926 snd_ctl_close( chandle );
\r
6927 snd_card_next( &card );
\r
6930 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6931 if ( result == 0 ) {
\r
6932 if ( nDevices == device ) {
\r
6933 strcpy( name, "default" );
\r
6939 if ( nDevices == 0 ) {
\r
6940 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6941 error( RtAudioError::INVALID_USE );
\r
6945 if ( device >= nDevices ) {
\r
6946 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6947 error( RtAudioError::INVALID_USE );
\r
6953 // If a stream is already open, we cannot probe the stream devices.
\r
6954 // Thus, use the saved results.
\r
6955 if ( stream_.state != STREAM_CLOSED &&
\r
6956 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6957 snd_ctl_close( chandle );
\r
6958 if ( device >= devices_.size() ) {
\r
6959 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6960 error( RtAudioError::WARNING );
\r
6963 return devices_[ device ];
\r
6966 int openMode = SND_PCM_ASYNC;
\r
6967 snd_pcm_stream_t stream;
\r
6968 snd_pcm_info_t *pcminfo;
\r
6969 snd_pcm_info_alloca( &pcminfo );
\r
6970 snd_pcm_t *phandle;
\r
6971 snd_pcm_hw_params_t *params;
\r
6972 snd_pcm_hw_params_alloca( ¶ms );
\r
6974 // First try for playback unless default device (which has subdev -1)
\r
6975 stream = SND_PCM_STREAM_PLAYBACK;
\r
6976 snd_pcm_info_set_stream( pcminfo, stream );
\r
6977 if ( subdevice != -1 ) {
\r
6978 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6979 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6981 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6982 if ( result < 0 ) {
\r
6983 // Device probably doesn't support playback.
\r
6984 goto captureProbe;
\r
6988 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6989 if ( result < 0 ) {
\r
6990 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6991 errorText_ = errorStream_.str();
\r
6992 error( RtAudioError::WARNING );
\r
6993 goto captureProbe;
\r
6996 // The device is open ... fill the parameter structure.
\r
6997 result = snd_pcm_hw_params_any( phandle, params );
\r
6998 if ( result < 0 ) {
\r
6999 snd_pcm_close( phandle );
\r
7000 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7001 errorText_ = errorStream_.str();
\r
7002 error( RtAudioError::WARNING );
\r
7003 goto captureProbe;
\r
7006 // Get output channel information.
\r
7007 unsigned int value;
\r
7008 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7009 if ( result < 0 ) {
\r
7010 snd_pcm_close( phandle );
\r
7011 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7012 errorText_ = errorStream_.str();
\r
7013 error( RtAudioError::WARNING );
\r
7014 goto captureProbe;
\r
7016 info.outputChannels = value;
\r
7017 snd_pcm_close( phandle );
\r
7020 stream = SND_PCM_STREAM_CAPTURE;
\r
7021 snd_pcm_info_set_stream( pcminfo, stream );
\r
7023 // Now try for capture unless default device (with subdev = -1)
\r
7024 if ( subdevice != -1 ) {
\r
7025 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7026 snd_ctl_close( chandle );
\r
7027 if ( result < 0 ) {
\r
7028 // Device probably doesn't support capture.
\r
7029 if ( info.outputChannels == 0 ) return info;
\r
7030 goto probeParameters;
\r
7034 snd_ctl_close( chandle );
\r
7036 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7037 if ( result < 0 ) {
\r
7038 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7039 errorText_ = errorStream_.str();
\r
7040 error( RtAudioError::WARNING );
\r
7041 if ( info.outputChannels == 0 ) return info;
\r
7042 goto probeParameters;
\r
7045 // The device is open ... fill the parameter structure.
\r
7046 result = snd_pcm_hw_params_any( phandle, params );
\r
7047 if ( result < 0 ) {
\r
7048 snd_pcm_close( phandle );
\r
7049 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7050 errorText_ = errorStream_.str();
\r
7051 error( RtAudioError::WARNING );
\r
7052 if ( info.outputChannels == 0 ) return info;
\r
7053 goto probeParameters;
\r
7056 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7057 if ( result < 0 ) {
\r
7058 snd_pcm_close( phandle );
\r
7059 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7060 errorText_ = errorStream_.str();
\r
7061 error( RtAudioError::WARNING );
\r
7062 if ( info.outputChannels == 0 ) return info;
\r
7063 goto probeParameters;
\r
7065 info.inputChannels = value;
\r
7066 snd_pcm_close( phandle );
\r
7068 // If device opens for both playback and capture, we determine the channels.
\r
7069 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7070 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7072 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7073 if ( device == 0 && info.outputChannels > 0 )
\r
7074 info.isDefaultOutput = true;
\r
7075 if ( device == 0 && info.inputChannels > 0 )
\r
7076 info.isDefaultInput = true;
\r
7079 // At this point, we just need to figure out the supported data
\r
7080 // formats and sample rates. We'll proceed by opening the device in
\r
7081 // the direction with the maximum number of channels, or playback if
\r
7082 // they are equal. This might limit our sample rate options, but so
\r
7085 if ( info.outputChannels >= info.inputChannels )
\r
7086 stream = SND_PCM_STREAM_PLAYBACK;
\r
7088 stream = SND_PCM_STREAM_CAPTURE;
\r
7089 snd_pcm_info_set_stream( pcminfo, stream );
\r
7091 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7092 if ( result < 0 ) {
\r
7093 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7094 errorText_ = errorStream_.str();
\r
7095 error( RtAudioError::WARNING );
\r
7099 // The device is open ... fill the parameter structure.
\r
7100 result = snd_pcm_hw_params_any( phandle, params );
\r
7101 if ( result < 0 ) {
\r
7102 snd_pcm_close( phandle );
\r
7103 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7104 errorText_ = errorStream_.str();
\r
7105 error( RtAudioError::WARNING );
\r
7109 // Test our discrete set of sample rate values.
\r
7110 info.sampleRates.clear();
\r
7111 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7112 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7113 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7115 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7116 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7119 if ( info.sampleRates.size() == 0 ) {
\r
7120 snd_pcm_close( phandle );
\r
7121 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7122 errorText_ = errorStream_.str();
\r
7123 error( RtAudioError::WARNING );
\r
7127 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7128 snd_pcm_format_t format;
\r
7129 info.nativeFormats = 0;
\r
7130 format = SND_PCM_FORMAT_S8;
\r
7131 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7132 info.nativeFormats |= RTAUDIO_SINT8;
\r
7133 format = SND_PCM_FORMAT_S16;
\r
7134 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7135 info.nativeFormats |= RTAUDIO_SINT16;
\r
7136 format = SND_PCM_FORMAT_S24;
\r
7137 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7138 info.nativeFormats |= RTAUDIO_SINT24;
\r
7139 format = SND_PCM_FORMAT_S32;
\r
7140 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7141 info.nativeFormats |= RTAUDIO_SINT32;
\r
7142 format = SND_PCM_FORMAT_FLOAT;
\r
7143 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7144 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7145 format = SND_PCM_FORMAT_FLOAT64;
\r
7146 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7147 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7149 // Check that we have at least one supported format
\r
7150 if ( info.nativeFormats == 0 ) {
\r
7151 snd_pcm_close( phandle );
\r
7152 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7153 errorText_ = errorStream_.str();
\r
7154 error( RtAudioError::WARNING );
\r
7158 // Get the device name
\r
7160 result = snd_card_get_name( card, &cardname );
\r
7161 if ( result >= 0 ) {
\r
7162 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7167 // That's all ... close the device and return
\r
7168 snd_pcm_close( phandle );
\r
7169 info.probed = true;
\r
7173 void RtApiAlsa :: saveDeviceInfo( void )
\r
7177 unsigned int nDevices = getDeviceCount();
\r
7178 devices_.resize( nDevices );
\r
7179 for ( unsigned int i=0; i<nDevices; i++ )
\r
7180 devices_[i] = getDeviceInfo( i );
\r
7183 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7184 unsigned int firstChannel, unsigned int sampleRate,
\r
7185 RtAudioFormat format, unsigned int *bufferSize,
\r
7186 RtAudio::StreamOptions *options )
\r
7189 #if defined(__RTAUDIO_DEBUG__)
\r
7190 snd_output_t *out;
\r
7191 snd_output_stdio_attach(&out, stderr, 0);
\r
7194 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7196 unsigned nDevices = 0;
\r
7197 int result, subdevice, card;
\r
7199 snd_ctl_t *chandle;
\r
7201 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7202 snprintf(name, sizeof(name), "%s", "default");
\r
7204 // Count cards and devices
\r
7206 snd_card_next( &card );
\r
7207 while ( card >= 0 ) {
\r
7208 sprintf( name, "hw:%d", card );
\r
7209 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7210 if ( result < 0 ) {
\r
7211 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7212 errorText_ = errorStream_.str();
\r
7217 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7218 if ( result < 0 ) break;
\r
7219 if ( subdevice < 0 ) break;
\r
7220 if ( nDevices == device ) {
\r
7221 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7222 snd_ctl_close( chandle );
\r
7227 snd_ctl_close( chandle );
\r
7228 snd_card_next( &card );
\r
7231 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7232 if ( result == 0 ) {
\r
7233 if ( nDevices == device ) {
\r
7234 strcpy( name, "default" );
\r
7240 if ( nDevices == 0 ) {
\r
7241 // This should not happen because a check is made before this function is called.
\r
7242 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7246 if ( device >= nDevices ) {
\r
7247 // This should not happen because a check is made before this function is called.
\r
7248 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7255 // The getDeviceInfo() function will not work for a device that is
\r
7256 // already open. Thus, we'll probe the system before opening a
\r
7257 // stream and save the results for use by getDeviceInfo().
\r
7258 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7259 this->saveDeviceInfo();
\r
7261 snd_pcm_stream_t stream;
\r
7262 if ( mode == OUTPUT )
\r
7263 stream = SND_PCM_STREAM_PLAYBACK;
\r
7265 stream = SND_PCM_STREAM_CAPTURE;
\r
7267 snd_pcm_t *phandle;
\r
7268 int openMode = SND_PCM_ASYNC;
\r
7269 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7270 if ( result < 0 ) {
\r
7271 if ( mode == OUTPUT )
\r
7272 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7274 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7275 errorText_ = errorStream_.str();
\r
7279 // Fill the parameter structure.
\r
7280 snd_pcm_hw_params_t *hw_params;
\r
7281 snd_pcm_hw_params_alloca( &hw_params );
\r
7282 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7283 if ( result < 0 ) {
\r
7284 snd_pcm_close( phandle );
\r
7285 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7286 errorText_ = errorStream_.str();
\r
7290 #if defined(__RTAUDIO_DEBUG__)
\r
7291 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7292 snd_pcm_hw_params_dump( hw_params, out );
\r
7295 // Set access ... check user preference.
\r
7296 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7297 stream_.userInterleaved = false;
\r
7298 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7299 if ( result < 0 ) {
\r
7300 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7301 stream_.deviceInterleaved[mode] = true;
\r
7304 stream_.deviceInterleaved[mode] = false;
\r
7307 stream_.userInterleaved = true;
\r
7308 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7309 if ( result < 0 ) {
\r
7310 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7311 stream_.deviceInterleaved[mode] = false;
\r
7314 stream_.deviceInterleaved[mode] = true;
\r
7317 if ( result < 0 ) {
\r
7318 snd_pcm_close( phandle );
\r
7319 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7320 errorText_ = errorStream_.str();
\r
7324 // Determine how to set the device format.
\r
7325 stream_.userFormat = format;
\r
7326 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7328 if ( format == RTAUDIO_SINT8 )
\r
7329 deviceFormat = SND_PCM_FORMAT_S8;
\r
7330 else if ( format == RTAUDIO_SINT16 )
\r
7331 deviceFormat = SND_PCM_FORMAT_S16;
\r
7332 else if ( format == RTAUDIO_SINT24 )
\r
7333 deviceFormat = SND_PCM_FORMAT_S24;
\r
7334 else if ( format == RTAUDIO_SINT32 )
\r
7335 deviceFormat = SND_PCM_FORMAT_S32;
\r
7336 else if ( format == RTAUDIO_FLOAT32 )
\r
7337 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7338 else if ( format == RTAUDIO_FLOAT64 )
\r
7339 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7341 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7342 stream_.deviceFormat[mode] = format;
\r
7346 // The user requested format is not natively supported by the device.
\r
7347 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7348 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7349 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7353 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7354 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7355 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7359 deviceFormat = SND_PCM_FORMAT_S32;
\r
7360 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7361 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7365 deviceFormat = SND_PCM_FORMAT_S24;
\r
7366 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7367 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7371 deviceFormat = SND_PCM_FORMAT_S16;
\r
7372 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7373 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7377 deviceFormat = SND_PCM_FORMAT_S8;
\r
7378 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7379 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7383 // If we get here, no supported format was found.
\r
7384 snd_pcm_close( phandle );
\r
7385 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7386 errorText_ = errorStream_.str();
\r
7390 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7391 if ( result < 0 ) {
\r
7392 snd_pcm_close( phandle );
\r
7393 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7394 errorText_ = errorStream_.str();
\r
7398 // Determine whether byte-swaping is necessary.
\r
7399 stream_.doByteSwap[mode] = false;
\r
7400 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7401 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7402 if ( result == 0 )
\r
7403 stream_.doByteSwap[mode] = true;
\r
7404 else if (result < 0) {
\r
7405 snd_pcm_close( phandle );
\r
7406 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7407 errorText_ = errorStream_.str();
\r
7412 // Set the sample rate.
\r
7413 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7414 if ( result < 0 ) {
\r
7415 snd_pcm_close( phandle );
\r
7416 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7417 errorText_ = errorStream_.str();
\r
7421 // Determine the number of channels for this device. We support a possible
\r
7422 // minimum device channel number > than the value requested by the user.
\r
7423 stream_.nUserChannels[mode] = channels;
\r
7424 unsigned int value;
\r
7425 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7426 unsigned int deviceChannels = value;
\r
7427 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7428 snd_pcm_close( phandle );
\r
7429 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7430 errorText_ = errorStream_.str();
\r
7434 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7435 if ( result < 0 ) {
\r
7436 snd_pcm_close( phandle );
\r
7437 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7438 errorText_ = errorStream_.str();
\r
7441 deviceChannels = value;
\r
7442 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7443 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7445 // Set the device channels.
\r
7446 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7447 if ( result < 0 ) {
\r
7448 snd_pcm_close( phandle );
\r
7449 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7450 errorText_ = errorStream_.str();
\r
7454 // Set the buffer (or period) size.
\r
7456 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7457 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7458 if ( result < 0 ) {
\r
7459 snd_pcm_close( phandle );
\r
7460 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7461 errorText_ = errorStream_.str();
\r
7464 *bufferSize = periodSize;
\r
7466 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7467 unsigned int periods = 0;
\r
7468 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7469 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7470 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7471 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7472 if ( result < 0 ) {
\r
7473 snd_pcm_close( phandle );
\r
7474 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7475 errorText_ = errorStream_.str();
\r
7479 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7480 // MUST be the same in both directions!
\r
7481 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7482 snd_pcm_close( phandle );
\r
7483 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7484 errorText_ = errorStream_.str();
\r
7488 stream_.bufferSize = *bufferSize;
\r
7490 // Install the hardware configuration
\r
7491 result = snd_pcm_hw_params( phandle, hw_params );
\r
7492 if ( result < 0 ) {
\r
7493 snd_pcm_close( phandle );
\r
7494 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7495 errorText_ = errorStream_.str();
\r
7499 #if defined(__RTAUDIO_DEBUG__)
\r
7500 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7501 snd_pcm_hw_params_dump( hw_params, out );
\r
7504 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7505 snd_pcm_sw_params_t *sw_params = NULL;
\r
7506 snd_pcm_sw_params_alloca( &sw_params );
\r
7507 snd_pcm_sw_params_current( phandle, sw_params );
\r
7508 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7509 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7510 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7512 // The following two settings were suggested by Theo Veenker
\r
7513 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7514 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7516 // here are two options for a fix
\r
7517 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7518 snd_pcm_uframes_t val;
\r
7519 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7520 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7522 result = snd_pcm_sw_params( phandle, sw_params );
\r
7523 if ( result < 0 ) {
\r
7524 snd_pcm_close( phandle );
\r
7525 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7526 errorText_ = errorStream_.str();
\r
7530 #if defined(__RTAUDIO_DEBUG__)
\r
7531 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7532 snd_pcm_sw_params_dump( sw_params, out );
\r
7535 // Set flags for buffer conversion
\r
7536 stream_.doConvertBuffer[mode] = false;
\r
7537 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7538 stream_.doConvertBuffer[mode] = true;
\r
7539 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7540 stream_.doConvertBuffer[mode] = true;
\r
7541 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7542 stream_.nUserChannels[mode] > 1 )
\r
7543 stream_.doConvertBuffer[mode] = true;
\r
7545 // Allocate the ApiHandle if necessary and then save.
\r
7546 AlsaHandle *apiInfo = 0;
\r
7547 if ( stream_.apiHandle == 0 ) {
\r
7549 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7551 catch ( std::bad_alloc& ) {
\r
7552 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7556 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7557 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7561 stream_.apiHandle = (void *) apiInfo;
\r
7562 apiInfo->handles[0] = 0;
\r
7563 apiInfo->handles[1] = 0;
\r
7566 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7568 apiInfo->handles[mode] = phandle;
\r
7571 // Allocate necessary internal buffers.
\r
7572 unsigned long bufferBytes;
\r
7573 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7574 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7575 if ( stream_.userBuffer[mode] == NULL ) {
\r
7576 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7580 if ( stream_.doConvertBuffer[mode] ) {
\r
7582 bool makeBuffer = true;
\r
7583 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7584 if ( mode == INPUT ) {
\r
7585 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7586 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7587 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7591 if ( makeBuffer ) {
\r
7592 bufferBytes *= *bufferSize;
\r
7593 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7594 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7595 if ( stream_.deviceBuffer == NULL ) {
\r
7596 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7602 stream_.sampleRate = sampleRate;
\r
7603 stream_.nBuffers = periods;
\r
7604 stream_.device[mode] = device;
\r
7605 stream_.state = STREAM_STOPPED;
\r
7607 // Setup the buffer conversion information structure.
\r
7608 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7610 // Setup thread if necessary.
\r
7611 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7612 // We had already set up an output stream.
\r
7613 stream_.mode = DUPLEX;
\r
7614 // Link the streams if possible.
\r
7615 apiInfo->synchronized = false;
\r
7616 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7617 apiInfo->synchronized = true;
\r
7619 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7620 error( RtAudioError::WARNING );
\r
7624 stream_.mode = mode;
\r
7626 // Setup callback thread.
\r
7627 stream_.callbackInfo.object = (void *) this;
\r
7629 // Set the thread attributes for joinable and realtime scheduling
\r
7630 // priority (optional). The higher priority will only take affect
\r
7631 // if the program is run as root or suid. Note, under Linux
\r
7632 // processes with CAP_SYS_NICE privilege, a user can change
\r
7633 // scheduling policy and priority (thus need not be root). See
\r
7634 // POSIX "capabilities".
\r
7635 pthread_attr_t attr;
\r
7636 pthread_attr_init( &attr );
\r
7637 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7639 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7640 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7641 // We previously attempted to increase the audio callback priority
\r
7642 // to SCHED_RR here via the attributes. However, while no errors
\r
7643 // were reported in doing so, it did not work. So, now this is
\r
7644 // done in the alsaCallbackHandler function.
\r
7645 stream_.callbackInfo.doRealtime = true;
\r
7646 int priority = options->priority;
\r
7647 int min = sched_get_priority_min( SCHED_RR );
\r
7648 int max = sched_get_priority_max( SCHED_RR );
\r
7649 if ( priority < min ) priority = min;
\r
7650 else if ( priority > max ) priority = max;
\r
7651 stream_.callbackInfo.priority = priority;
\r
7655 stream_.callbackInfo.isRunning = true;
\r
7656 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7657 pthread_attr_destroy( &attr );
\r
7659 stream_.callbackInfo.isRunning = false;
\r
7660 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7669 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7670 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7671 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7673 stream_.apiHandle = 0;
\r
7676 if ( phandle) snd_pcm_close( phandle );
\r
7678 for ( int i=0; i<2; i++ ) {
\r
7679 if ( stream_.userBuffer[i] ) {
\r
7680 free( stream_.userBuffer[i] );
\r
7681 stream_.userBuffer[i] = 0;
\r
7685 if ( stream_.deviceBuffer ) {
\r
7686 free( stream_.deviceBuffer );
\r
7687 stream_.deviceBuffer = 0;
\r
7690 stream_.state = STREAM_CLOSED;
\r
7694 void RtApiAlsa :: closeStream()
\r
7696 if ( stream_.state == STREAM_CLOSED ) {
\r
7697 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7698 error( RtAudioError::WARNING );
\r
7702 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7703 stream_.callbackInfo.isRunning = false;
\r
7704 MUTEX_LOCK( &stream_.mutex );
\r
7705 if ( stream_.state == STREAM_STOPPED ) {
\r
7706 apiInfo->runnable = true;
\r
7707 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7709 MUTEX_UNLOCK( &stream_.mutex );
\r
7710 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7712 if ( stream_.state == STREAM_RUNNING ) {
\r
7713 stream_.state = STREAM_STOPPED;
\r
7714 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7715 snd_pcm_drop( apiInfo->handles[0] );
\r
7716 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7717 snd_pcm_drop( apiInfo->handles[1] );
\r
7721 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7722 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7723 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7725 stream_.apiHandle = 0;
\r
7728 for ( int i=0; i<2; i++ ) {
\r
7729 if ( stream_.userBuffer[i] ) {
\r
7730 free( stream_.userBuffer[i] );
\r
7731 stream_.userBuffer[i] = 0;
\r
7735 if ( stream_.deviceBuffer ) {
\r
7736 free( stream_.deviceBuffer );
\r
7737 stream_.deviceBuffer = 0;
\r
7740 stream_.mode = UNINITIALIZED;
\r
7741 stream_.state = STREAM_CLOSED;
\r
7744 void RtApiAlsa :: startStream()
\r
7746 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7749 if ( stream_.state == STREAM_RUNNING ) {
\r
7750 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7751 error( RtAudioError::WARNING );
\r
7755 MUTEX_LOCK( &stream_.mutex );
\r
7758 snd_pcm_state_t state;
\r
7759 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7760 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7761 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7762 state = snd_pcm_state( handle[0] );
\r
7763 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7764 result = snd_pcm_prepare( handle[0] );
\r
7765 if ( result < 0 ) {
\r
7766 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7767 errorText_ = errorStream_.str();
\r
7773 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7774 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7775 state = snd_pcm_state( handle[1] );
\r
7776 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7777 result = snd_pcm_prepare( handle[1] );
\r
7778 if ( result < 0 ) {
\r
7779 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7780 errorText_ = errorStream_.str();
\r
7786 stream_.state = STREAM_RUNNING;
\r
7789 apiInfo->runnable = true;
\r
7790 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7791 MUTEX_UNLOCK( &stream_.mutex );
\r
7793 if ( result >= 0 ) return;
\r
7794 error( RtAudioError::SYSTEM_ERROR );
\r
7797 void RtApiAlsa :: stopStream()
\r
7800 if ( stream_.state == STREAM_STOPPED ) {
\r
7801 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7802 error( RtAudioError::WARNING );
\r
7806 stream_.state = STREAM_STOPPED;
\r
7807 MUTEX_LOCK( &stream_.mutex );
\r
7810 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7811 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7812 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7813 if ( apiInfo->synchronized )
\r
7814 result = snd_pcm_drop( handle[0] );
\r
7816 result = snd_pcm_drain( handle[0] );
\r
7817 if ( result < 0 ) {
\r
7818 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7819 errorText_ = errorStream_.str();
\r
7824 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7825 result = snd_pcm_drop( handle[1] );
\r
7826 if ( result < 0 ) {
\r
7827 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7828 errorText_ = errorStream_.str();
\r
7834 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7835 MUTEX_UNLOCK( &stream_.mutex );
\r
7837 if ( result >= 0 ) return;
\r
7838 error( RtAudioError::SYSTEM_ERROR );
\r
7841 void RtApiAlsa :: abortStream()
\r
7844 if ( stream_.state == STREAM_STOPPED ) {
\r
7845 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7846 error( RtAudioError::WARNING );
\r
7850 stream_.state = STREAM_STOPPED;
\r
7851 MUTEX_LOCK( &stream_.mutex );
\r
7854 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7855 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7856 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7857 result = snd_pcm_drop( handle[0] );
\r
7858 if ( result < 0 ) {
\r
7859 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7860 errorText_ = errorStream_.str();
\r
7865 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7866 result = snd_pcm_drop( handle[1] );
\r
7867 if ( result < 0 ) {
\r
7868 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7869 errorText_ = errorStream_.str();
\r
7875 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7876 MUTEX_UNLOCK( &stream_.mutex );
\r
7878 if ( result >= 0 ) return;
\r
7879 error( RtAudioError::SYSTEM_ERROR );
\r
7882 void RtApiAlsa :: callbackEvent()
\r
7884 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7885 if ( stream_.state == STREAM_STOPPED ) {
\r
7886 MUTEX_LOCK( &stream_.mutex );
\r
7887 while ( !apiInfo->runnable )
\r
7888 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7890 if ( stream_.state != STREAM_RUNNING ) {
\r
7891 MUTEX_UNLOCK( &stream_.mutex );
\r
7894 MUTEX_UNLOCK( &stream_.mutex );
\r
7897 if ( stream_.state == STREAM_CLOSED ) {
\r
7898 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7899 error( RtAudioError::WARNING );
\r
7903 int doStopStream = 0;
\r
7904 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7905 double streamTime = getStreamTime();
\r
7906 RtAudioStreamStatus status = 0;
\r
7907 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7908 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7909 apiInfo->xrun[0] = false;
\r
7911 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7912 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7913 apiInfo->xrun[1] = false;
\r
7915 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7916 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7918 if ( doStopStream == 2 ) {
\r
7923 MUTEX_LOCK( &stream_.mutex );
\r
7925 // The state might change while waiting on a mutex.
\r
7926 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7931 snd_pcm_t **handle;
\r
7932 snd_pcm_sframes_t frames;
\r
7933 RtAudioFormat format;
\r
7934 handle = (snd_pcm_t **) apiInfo->handles;
\r
7936 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7938 // Setup parameters.
\r
7939 if ( stream_.doConvertBuffer[1] ) {
\r
7940 buffer = stream_.deviceBuffer;
\r
7941 channels = stream_.nDeviceChannels[1];
\r
7942 format = stream_.deviceFormat[1];
\r
7945 buffer = stream_.userBuffer[1];
\r
7946 channels = stream_.nUserChannels[1];
\r
7947 format = stream_.userFormat;
\r
7950 // Read samples from device in interleaved/non-interleaved format.
\r
7951 if ( stream_.deviceInterleaved[1] )
\r
7952 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7954 void *bufs[channels];
\r
7955 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7956 for ( int i=0; i<channels; i++ )
\r
7957 bufs[i] = (void *) (buffer + (i * offset));
\r
7958 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7961 if ( result < (int) stream_.bufferSize ) {
\r
7962 // Either an error or overrun occured.
\r
7963 if ( result == -EPIPE ) {
\r
7964 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7965 if ( state == SND_PCM_STATE_XRUN ) {
\r
7966 apiInfo->xrun[1] = true;
\r
7967 result = snd_pcm_prepare( handle[1] );
\r
7968 if ( result < 0 ) {
\r
7969 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7970 errorText_ = errorStream_.str();
\r
7974 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7975 errorText_ = errorStream_.str();
\r
7979 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7980 errorText_ = errorStream_.str();
\r
7982 error( RtAudioError::WARNING );
\r
7986 // Do byte swapping if necessary.
\r
7987 if ( stream_.doByteSwap[1] )
\r
7988 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7990 // Do buffer conversion if necessary.
\r
7991 if ( stream_.doConvertBuffer[1] )
\r
7992 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7994 // Check stream latency
\r
7995 result = snd_pcm_delay( handle[1], &frames );
\r
7996 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8001 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8003 // Setup parameters and do buffer conversion if necessary.
\r
8004 if ( stream_.doConvertBuffer[0] ) {
\r
8005 buffer = stream_.deviceBuffer;
\r
8006 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8007 channels = stream_.nDeviceChannels[0];
\r
8008 format = stream_.deviceFormat[0];
\r
8011 buffer = stream_.userBuffer[0];
\r
8012 channels = stream_.nUserChannels[0];
\r
8013 format = stream_.userFormat;
\r
8016 // Do byte swapping if necessary.
\r
8017 if ( stream_.doByteSwap[0] )
\r
8018 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8020 // Write samples to device in interleaved/non-interleaved format.
\r
8021 if ( stream_.deviceInterleaved[0] )
\r
8022 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8024 void *bufs[channels];
\r
8025 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8026 for ( int i=0; i<channels; i++ )
\r
8027 bufs[i] = (void *) (buffer + (i * offset));
\r
8028 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8031 if ( result < (int) stream_.bufferSize ) {
\r
8032 // Either an error or underrun occured.
\r
8033 if ( result == -EPIPE ) {
\r
8034 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8035 if ( state == SND_PCM_STATE_XRUN ) {
\r
8036 apiInfo->xrun[0] = true;
\r
8037 result = snd_pcm_prepare( handle[0] );
\r
8038 if ( result < 0 ) {
\r
8039 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8040 errorText_ = errorStream_.str();
\r
8044 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8045 errorText_ = errorStream_.str();
\r
8049 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8050 errorText_ = errorStream_.str();
\r
8052 error( RtAudioError::WARNING );
\r
8056 // Check stream latency
\r
8057 result = snd_pcm_delay( handle[0], &frames );
\r
8058 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8062 MUTEX_UNLOCK( &stream_.mutex );
\r
8064 RtApi::tickStreamTime();
\r
8065 if ( doStopStream == 1 ) this->stopStream();
\r
8068 static void *alsaCallbackHandler( void *ptr )
\r
8070 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8071 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8072 bool *isRunning = &info->isRunning;
\r
8074 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8075 if ( &info->doRealtime ) {
\r
8076 pthread_t tID = pthread_self(); // ID of this thread
\r
8077 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8078 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8082 while ( *isRunning == true ) {
\r
8083 pthread_testcancel();
\r
8084 object->callbackEvent();
\r
8087 pthread_exit( NULL );
\r
8090 //******************** End of __LINUX_ALSA__ *********************//
\r
8093 #if defined(__LINUX_PULSE__)
\r
8095 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8096 // and Tristan Matthews.
\r
8098 #include <pulse/error.h>
\r
8099 #include <pulse/simple.h>
\r
8102 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8103 44100, 48000, 96000, 0};
\r
8105 struct rtaudio_pa_format_mapping_t {
\r
8106 RtAudioFormat rtaudio_format;
\r
8107 pa_sample_format_t pa_format;
\r
8110 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8111 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8112 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8113 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8114 {0, PA_SAMPLE_INVALID}};
\r
8116 struct PulseAudioHandle {
\r
8117 pa_simple *s_play;
\r
8120 pthread_cond_t runnable_cv;
\r
8122 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8125 RtApiPulse::~RtApiPulse()
\r
8127 if ( stream_.state != STREAM_CLOSED )
\r
8131 unsigned int RtApiPulse::getDeviceCount( void )
\r
8136 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8138 RtAudio::DeviceInfo info;
\r
8139 info.probed = true;
\r
8140 info.name = "PulseAudio";
\r
8141 info.outputChannels = 2;
\r
8142 info.inputChannels = 2;
\r
8143 info.duplexChannels = 2;
\r
8144 info.isDefaultOutput = true;
\r
8145 info.isDefaultInput = true;
\r
8147 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8148 info.sampleRates.push_back( *sr );
\r
8150 info.preferredSampleRate = 48000;
\r
8151 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8156 static void *pulseaudio_callback( void * user )
\r
8158 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8159 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8160 volatile bool *isRunning = &cbi->isRunning;
\r
8162 while ( *isRunning ) {
\r
8163 pthread_testcancel();
\r
8164 context->callbackEvent();
\r
8167 pthread_exit( NULL );
\r
8170 void RtApiPulse::closeStream( void )
\r
8172 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8174 stream_.callbackInfo.isRunning = false;
\r
8176 MUTEX_LOCK( &stream_.mutex );
\r
8177 if ( stream_.state == STREAM_STOPPED ) {
\r
8178 pah->runnable = true;
\r
8179 pthread_cond_signal( &pah->runnable_cv );
\r
8181 MUTEX_UNLOCK( &stream_.mutex );
\r
8183 pthread_join( pah->thread, 0 );
\r
8184 if ( pah->s_play ) {
\r
8185 pa_simple_flush( pah->s_play, NULL );
\r
8186 pa_simple_free( pah->s_play );
\r
8189 pa_simple_free( pah->s_rec );
\r
8191 pthread_cond_destroy( &pah->runnable_cv );
\r
8193 stream_.apiHandle = 0;
\r
8196 if ( stream_.userBuffer[0] ) {
\r
8197 free( stream_.userBuffer[0] );
\r
8198 stream_.userBuffer[0] = 0;
\r
8200 if ( stream_.userBuffer[1] ) {
\r
8201 free( stream_.userBuffer[1] );
\r
8202 stream_.userBuffer[1] = 0;
\r
8205 stream_.state = STREAM_CLOSED;
\r
8206 stream_.mode = UNINITIALIZED;
\r
8209 void RtApiPulse::callbackEvent( void )
\r
8211 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8213 if ( stream_.state == STREAM_STOPPED ) {
\r
8214 MUTEX_LOCK( &stream_.mutex );
\r
8215 while ( !pah->runnable )
\r
8216 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8218 if ( stream_.state != STREAM_RUNNING ) {
\r
8219 MUTEX_UNLOCK( &stream_.mutex );
\r
8222 MUTEX_UNLOCK( &stream_.mutex );
\r
8225 if ( stream_.state == STREAM_CLOSED ) {
\r
8226 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8227 "this shouldn't happen!";
\r
8228 error( RtAudioError::WARNING );
\r
8232 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8233 double streamTime = getStreamTime();
\r
8234 RtAudioStreamStatus status = 0;
\r
8235 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8236 stream_.bufferSize, streamTime, status,
\r
8237 stream_.callbackInfo.userData );
\r
8239 if ( doStopStream == 2 ) {
\r
8244 MUTEX_LOCK( &stream_.mutex );
\r
8245 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8246 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8248 if ( stream_.state != STREAM_RUNNING )
\r
8253 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8254 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8255 convertBuffer( stream_.deviceBuffer,
\r
8256 stream_.userBuffer[OUTPUT],
\r
8257 stream_.convertInfo[OUTPUT] );
\r
8258 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8259 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8261 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8262 formatBytes( stream_.userFormat );
\r
8264 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8265 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8266 pa_strerror( pa_error ) << ".";
\r
8267 errorText_ = errorStream_.str();
\r
8268 error( RtAudioError::WARNING );
\r
8272 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8273 if ( stream_.doConvertBuffer[INPUT] )
\r
8274 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8275 formatBytes( stream_.deviceFormat[INPUT] );
\r
8277 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8278 formatBytes( stream_.userFormat );
\r
8280 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8281 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8282 pa_strerror( pa_error ) << ".";
\r
8283 errorText_ = errorStream_.str();
\r
8284 error( RtAudioError::WARNING );
\r
8286 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8287 convertBuffer( stream_.userBuffer[INPUT],
\r
8288 stream_.deviceBuffer,
\r
8289 stream_.convertInfo[INPUT] );
\r
8294 MUTEX_UNLOCK( &stream_.mutex );
\r
8295 RtApi::tickStreamTime();
\r
8297 if ( doStopStream == 1 )
\r
8301 void RtApiPulse::startStream( void )
\r
8303 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8305 if ( stream_.state == STREAM_CLOSED ) {
\r
8306 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8307 error( RtAudioError::INVALID_USE );
\r
8310 if ( stream_.state == STREAM_RUNNING ) {
\r
8311 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8312 error( RtAudioError::WARNING );
\r
8316 MUTEX_LOCK( &stream_.mutex );
\r
8318 stream_.state = STREAM_RUNNING;
\r
8320 pah->runnable = true;
\r
8321 pthread_cond_signal( &pah->runnable_cv );
\r
8322 MUTEX_UNLOCK( &stream_.mutex );
\r
8325 void RtApiPulse::stopStream( void )
\r
8327 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8329 if ( stream_.state == STREAM_CLOSED ) {
\r
8330 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8331 error( RtAudioError::INVALID_USE );
\r
8334 if ( stream_.state == STREAM_STOPPED ) {
\r
8335 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8336 error( RtAudioError::WARNING );
\r
8340 stream_.state = STREAM_STOPPED;
\r
8341 MUTEX_LOCK( &stream_.mutex );
\r
8343 if ( pah && pah->s_play ) {
\r
8345 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8346 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8347 pa_strerror( pa_error ) << ".";
\r
8348 errorText_ = errorStream_.str();
\r
8349 MUTEX_UNLOCK( &stream_.mutex );
\r
8350 error( RtAudioError::SYSTEM_ERROR );
\r
8355 stream_.state = STREAM_STOPPED;
\r
8356 MUTEX_UNLOCK( &stream_.mutex );
\r
8359 void RtApiPulse::abortStream( void )
\r
8361 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8363 if ( stream_.state == STREAM_CLOSED ) {
\r
8364 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8365 error( RtAudioError::INVALID_USE );
\r
8368 if ( stream_.state == STREAM_STOPPED ) {
\r
8369 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8370 error( RtAudioError::WARNING );
\r
8374 stream_.state = STREAM_STOPPED;
\r
8375 MUTEX_LOCK( &stream_.mutex );
\r
8377 if ( pah && pah->s_play ) {
\r
8379 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8380 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8381 pa_strerror( pa_error ) << ".";
\r
8382 errorText_ = errorStream_.str();
\r
8383 MUTEX_UNLOCK( &stream_.mutex );
\r
8384 error( RtAudioError::SYSTEM_ERROR );
\r
8389 stream_.state = STREAM_STOPPED;
\r
8390 MUTEX_UNLOCK( &stream_.mutex );
\r
8393 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8394 unsigned int channels, unsigned int firstChannel,
\r
8395 unsigned int sampleRate, RtAudioFormat format,
\r
8396 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8398 PulseAudioHandle *pah = 0;
\r
8399 unsigned long bufferBytes = 0;
\r
8400 pa_sample_spec ss;
\r
8402 if ( device != 0 ) return false;
\r
8403 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8404 if ( channels != 1 && channels != 2 ) {
\r
8405 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8408 ss.channels = channels;
\r
8410 if ( firstChannel != 0 ) return false;
\r
8412 bool sr_found = false;
\r
8413 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8414 if ( sampleRate == *sr ) {
\r
8416 stream_.sampleRate = sampleRate;
\r
8417 ss.rate = sampleRate;
\r
8421 if ( !sr_found ) {
\r
8422 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8426 bool sf_found = 0;
\r
8427 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8428 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8429 if ( format == sf->rtaudio_format ) {
\r
8431 stream_.userFormat = sf->rtaudio_format;
\r
8432 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8433 ss.format = sf->pa_format;
\r
8437 if ( !sf_found ) { // Use internal data format conversion.
\r
8438 stream_.userFormat = format;
\r
8439 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8440 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8443 // Set other stream parameters.
\r
8444 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8445 else stream_.userInterleaved = true;
\r
8446 stream_.deviceInterleaved[mode] = true;
\r
8447 stream_.nBuffers = 1;
\r
8448 stream_.doByteSwap[mode] = false;
\r
8449 stream_.nUserChannels[mode] = channels;
\r
8450 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8451 stream_.channelOffset[mode] = 0;
\r
8452 std::string streamName = "RtAudio";
\r
8454 // Set flags for buffer conversion.
\r
8455 stream_.doConvertBuffer[mode] = false;
\r
8456 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8457 stream_.doConvertBuffer[mode] = true;
\r
8458 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8459 stream_.doConvertBuffer[mode] = true;
\r
8461 // Allocate necessary internal buffers.
\r
8462 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8463 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8464 if ( stream_.userBuffer[mode] == NULL ) {
\r
8465 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8468 stream_.bufferSize = *bufferSize;
\r
8470 if ( stream_.doConvertBuffer[mode] ) {
\r
8472 bool makeBuffer = true;
\r
8473 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8474 if ( mode == INPUT ) {
\r
8475 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8476 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8477 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8481 if ( makeBuffer ) {
\r
8482 bufferBytes *= *bufferSize;
\r
8483 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8484 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8485 if ( stream_.deviceBuffer == NULL ) {
\r
8486 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8492 stream_.device[mode] = device;
\r
8494 // Setup the buffer conversion information structure.
\r
8495 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8497 if ( !stream_.apiHandle ) {
\r
8498 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8500 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8504 stream_.apiHandle = pah;
\r
8505 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8506 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8510 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8513 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8516 pa_buffer_attr buffer_attr;
\r
8517 buffer_attr.fragsize = bufferBytes;
\r
8518 buffer_attr.maxlength = -1;
\r
8520 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8521 if ( !pah->s_rec ) {
\r
8522 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8527 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8528 if ( !pah->s_play ) {
\r
8529 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8537 if ( stream_.mode == UNINITIALIZED )
\r
8538 stream_.mode = mode;
\r
8539 else if ( stream_.mode == mode )
\r
8542 stream_.mode = DUPLEX;
\r
8544 if ( !stream_.callbackInfo.isRunning ) {
\r
8545 stream_.callbackInfo.object = this;
\r
8546 stream_.callbackInfo.isRunning = true;
\r
8547 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8548 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8553 stream_.state = STREAM_STOPPED;
\r
8557 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8558 pthread_cond_destroy( &pah->runnable_cv );
\r
8560 stream_.apiHandle = 0;
\r
8563 for ( int i=0; i<2; i++ ) {
\r
8564 if ( stream_.userBuffer[i] ) {
\r
8565 free( stream_.userBuffer[i] );
\r
8566 stream_.userBuffer[i] = 0;
\r
8570 if ( stream_.deviceBuffer ) {
\r
8571 free( stream_.deviceBuffer );
\r
8572 stream_.deviceBuffer = 0;
\r
8578 //******************** End of __LINUX_PULSE__ *********************//
\r
8581 #if defined(__LINUX_OSS__)
\r
8583 #include <unistd.h>
\r
8584 #include <sys/ioctl.h>
\r
8585 #include <unistd.h>
\r
8586 #include <fcntl.h>
\r
8587 #include <sys/soundcard.h>
\r
8588 #include <errno.h>
\r
8591 static void *ossCallbackHandler(void * ptr);
\r
8593 // A structure to hold various information related to the OSS API
\r
8594 // implementation.
\r
8595 struct OssHandle {
\r
8596 int id[2]; // device ids
\r
8599 pthread_cond_t runnable;
\r
8602 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8605 RtApiOss :: RtApiOss()
\r
8607 // Nothing to do here.
\r
8610 RtApiOss :: ~RtApiOss()
\r
8612 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8615 unsigned int RtApiOss :: getDeviceCount( void )
\r
8617 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8618 if ( mixerfd == -1 ) {
\r
8619 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8620 error( RtAudioError::WARNING );
\r
8624 oss_sysinfo sysinfo;
\r
8625 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8627 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8628 error( RtAudioError::WARNING );
\r
8633 return sysinfo.numaudios;
\r
8636 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8638 RtAudio::DeviceInfo info;
\r
8639 info.probed = false;
\r
8641 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8642 if ( mixerfd == -1 ) {
\r
8643 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8644 error( RtAudioError::WARNING );
\r
8648 oss_sysinfo sysinfo;
\r
8649 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8650 if ( result == -1 ) {
\r
8652 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8653 error( RtAudioError::WARNING );
\r
8657 unsigned nDevices = sysinfo.numaudios;
\r
8658 if ( nDevices == 0 ) {
\r
8660 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8661 error( RtAudioError::INVALID_USE );
\r
8665 if ( device >= nDevices ) {
\r
8667 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8668 error( RtAudioError::INVALID_USE );
\r
8672 oss_audioinfo ainfo;
\r
8673 ainfo.dev = device;
\r
8674 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8676 if ( result == -1 ) {
\r
8677 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8678 errorText_ = errorStream_.str();
\r
8679 error( RtAudioError::WARNING );
\r
8684 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8685 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8686 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8687 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8688 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8691 // Probe data formats ... do for input
\r
8692 unsigned long mask = ainfo.iformats;
\r
8693 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8694 info.nativeFormats |= RTAUDIO_SINT16;
\r
8695 if ( mask & AFMT_S8 )
\r
8696 info.nativeFormats |= RTAUDIO_SINT8;
\r
8697 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8698 info.nativeFormats |= RTAUDIO_SINT32;
\r
8699 if ( mask & AFMT_FLOAT )
\r
8700 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8701 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8702 info.nativeFormats |= RTAUDIO_SINT24;
\r
8704 // Check that we have at least one supported format
\r
8705 if ( info.nativeFormats == 0 ) {
\r
8706 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8707 errorText_ = errorStream_.str();
\r
8708 error( RtAudioError::WARNING );
\r
8712 // Probe the supported sample rates.
\r
8713 info.sampleRates.clear();
\r
8714 if ( ainfo.nrates ) {
\r
8715 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8716 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8717 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8718 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8720 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8721 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8729 // Check min and max rate values;
\r
8730 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8731 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8732 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8734 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8735 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8740 if ( info.sampleRates.size() == 0 ) {
\r
8741 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8742 errorText_ = errorStream_.str();
\r
8743 error( RtAudioError::WARNING );
\r
8746 info.probed = true;
\r
8747 info.name = ainfo.name;
\r
8754 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8755 unsigned int firstChannel, unsigned int sampleRate,
\r
8756 RtAudioFormat format, unsigned int *bufferSize,
\r
8757 RtAudio::StreamOptions *options )
\r
8759 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8760 if ( mixerfd == -1 ) {
\r
8761 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8765 oss_sysinfo sysinfo;
\r
8766 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8767 if ( result == -1 ) {
\r
8769 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8773 unsigned nDevices = sysinfo.numaudios;
\r
8774 if ( nDevices == 0 ) {
\r
8775 // This should not happen because a check is made before this function is called.
\r
8777 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8781 if ( device >= nDevices ) {
\r
8782 // This should not happen because a check is made before this function is called.
\r
8784 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8788 oss_audioinfo ainfo;
\r
8789 ainfo.dev = device;
\r
8790 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8792 if ( result == -1 ) {
\r
8793 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8794 errorText_ = errorStream_.str();
\r
8798 // Check if device supports input or output
\r
8799 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8800 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8801 if ( mode == OUTPUT )
\r
8802 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8804 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8805 errorText_ = errorStream_.str();
\r
8810 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8811 if ( mode == OUTPUT )
\r
8812 flags |= O_WRONLY;
\r
8813 else { // mode == INPUT
\r
8814 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8815 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8816 close( handle->id[0] );
\r
8817 handle->id[0] = 0;
\r
8818 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8819 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8820 errorText_ = errorStream_.str();
\r
8823 // Check that the number previously set channels is the same.
\r
8824 if ( stream_.nUserChannels[0] != channels ) {
\r
8825 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8826 errorText_ = errorStream_.str();
\r
8832 flags |= O_RDONLY;
\r
8835 // Set exclusive access if specified.
\r
8836 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8838 // Try to open the device.
\r
8840 fd = open( ainfo.devnode, flags, 0 );
\r
8842 if ( errno == EBUSY )
\r
8843 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8845 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8846 errorText_ = errorStream_.str();
\r
8850 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8852 if ( flags | O_RDWR ) {
\r
8853 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8854 if ( result == -1) {
\r
8855 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8856 errorText_ = errorStream_.str();
\r
8862 // Check the device channel support.
\r
8863 stream_.nUserChannels[mode] = channels;
\r
8864 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8866 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8867 errorText_ = errorStream_.str();
\r
8871 // Set the number of channels.
\r
8872 int deviceChannels = channels + firstChannel;
\r
8873 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8874 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8876 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8877 errorText_ = errorStream_.str();
\r
8880 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8882 // Get the data format mask
\r
8884 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8885 if ( result == -1 ) {
\r
8887 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8888 errorText_ = errorStream_.str();
\r
8892 // Determine how to set the device format.
\r
8893 stream_.userFormat = format;
\r
8894 int deviceFormat = -1;
\r
8895 stream_.doByteSwap[mode] = false;
\r
8896 if ( format == RTAUDIO_SINT8 ) {
\r
8897 if ( mask & AFMT_S8 ) {
\r
8898 deviceFormat = AFMT_S8;
\r
8899 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8902 else if ( format == RTAUDIO_SINT16 ) {
\r
8903 if ( mask & AFMT_S16_NE ) {
\r
8904 deviceFormat = AFMT_S16_NE;
\r
8905 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8907 else if ( mask & AFMT_S16_OE ) {
\r
8908 deviceFormat = AFMT_S16_OE;
\r
8909 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8910 stream_.doByteSwap[mode] = true;
\r
8913 else if ( format == RTAUDIO_SINT24 ) {
\r
8914 if ( mask & AFMT_S24_NE ) {
\r
8915 deviceFormat = AFMT_S24_NE;
\r
8916 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8918 else if ( mask & AFMT_S24_OE ) {
\r
8919 deviceFormat = AFMT_S24_OE;
\r
8920 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8921 stream_.doByteSwap[mode] = true;
\r
8924 else if ( format == RTAUDIO_SINT32 ) {
\r
8925 if ( mask & AFMT_S32_NE ) {
\r
8926 deviceFormat = AFMT_S32_NE;
\r
8927 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8929 else if ( mask & AFMT_S32_OE ) {
\r
8930 deviceFormat = AFMT_S32_OE;
\r
8931 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8932 stream_.doByteSwap[mode] = true;
\r
8936 if ( deviceFormat == -1 ) {
\r
8937 // The user requested format is not natively supported by the device.
\r
8938 if ( mask & AFMT_S16_NE ) {
\r
8939 deviceFormat = AFMT_S16_NE;
\r
8940 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8942 else if ( mask & AFMT_S32_NE ) {
\r
8943 deviceFormat = AFMT_S32_NE;
\r
8944 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8946 else if ( mask & AFMT_S24_NE ) {
\r
8947 deviceFormat = AFMT_S24_NE;
\r
8948 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8950 else if ( mask & AFMT_S16_OE ) {
\r
8951 deviceFormat = AFMT_S16_OE;
\r
8952 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8953 stream_.doByteSwap[mode] = true;
\r
8955 else if ( mask & AFMT_S32_OE ) {
\r
8956 deviceFormat = AFMT_S32_OE;
\r
8957 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8958 stream_.doByteSwap[mode] = true;
\r
8960 else if ( mask & AFMT_S24_OE ) {
\r
8961 deviceFormat = AFMT_S24_OE;
\r
8962 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8963 stream_.doByteSwap[mode] = true;
\r
8965 else if ( mask & AFMT_S8) {
\r
8966 deviceFormat = AFMT_S8;
\r
8967 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8971 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8972 // This really shouldn't happen ...
\r
8974 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8975 errorText_ = errorStream_.str();
\r
8979 // Set the data format.
\r
8980 int temp = deviceFormat;
\r
8981 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8982 if ( result == -1 || deviceFormat != temp ) {
\r
8984 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8985 errorText_ = errorStream_.str();
\r
8989 // Attempt to set the buffer size. According to OSS, the minimum
\r
8990 // number of buffers is two. The supposed minimum buffer size is 16
\r
8991 // bytes, so that will be our lower bound. The argument to this
\r
8992 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8993 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8994 // We'll check the actual value used near the end of the setup
\r
8996 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8997 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8999 if ( options ) buffers = options->numberOfBuffers;
\r
9000 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9001 if ( buffers < 2 ) buffers = 3;
\r
9002 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9003 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9004 if ( result == -1 ) {
\r
9006 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9007 errorText_ = errorStream_.str();
\r
9010 stream_.nBuffers = buffers;
\r
9012 // Save buffer size (in sample frames).
\r
9013 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9014 stream_.bufferSize = *bufferSize;
\r
9016 // Set the sample rate.
\r
9017 int srate = sampleRate;
\r
9018 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9019 if ( result == -1 ) {
\r
9021 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9022 errorText_ = errorStream_.str();
\r
9026 // Verify the sample rate setup worked.
\r
9027 if ( abs( srate - sampleRate ) > 100 ) {
\r
9029 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9030 errorText_ = errorStream_.str();
\r
9033 stream_.sampleRate = sampleRate;
\r
9035 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9036 // We're doing duplex setup here.
\r
9037 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9038 stream_.nDeviceChannels[0] = deviceChannels;
\r
9041 // Set interleaving parameters.
\r
9042 stream_.userInterleaved = true;
\r
9043 stream_.deviceInterleaved[mode] = true;
\r
9044 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9045 stream_.userInterleaved = false;
\r
9047 // Set flags for buffer conversion
\r
9048 stream_.doConvertBuffer[mode] = false;
\r
9049 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9050 stream_.doConvertBuffer[mode] = true;
\r
9051 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9052 stream_.doConvertBuffer[mode] = true;
\r
9053 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9054 stream_.nUserChannels[mode] > 1 )
\r
9055 stream_.doConvertBuffer[mode] = true;
\r
9057 // Allocate the stream handles if necessary and then save.
\r
9058 if ( stream_.apiHandle == 0 ) {
\r
9060 handle = new OssHandle;
\r
9062 catch ( std::bad_alloc& ) {
\r
9063 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9067 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9068 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9072 stream_.apiHandle = (void *) handle;
\r
9075 handle = (OssHandle *) stream_.apiHandle;
\r
9077 handle->id[mode] = fd;
\r
9079 // Allocate necessary internal buffers.
\r
9080 unsigned long bufferBytes;
\r
9081 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9082 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9083 if ( stream_.userBuffer[mode] == NULL ) {
\r
9084 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9088 if ( stream_.doConvertBuffer[mode] ) {
\r
9090 bool makeBuffer = true;
\r
9091 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9092 if ( mode == INPUT ) {
\r
9093 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9094 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9095 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9099 if ( makeBuffer ) {
\r
9100 bufferBytes *= *bufferSize;
\r
9101 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9102 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9103 if ( stream_.deviceBuffer == NULL ) {
\r
9104 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9110 stream_.device[mode] = device;
\r
9111 stream_.state = STREAM_STOPPED;
\r
9113 // Setup the buffer conversion information structure.
\r
9114 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9116 // Setup thread if necessary.
\r
9117 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9118 // We had already set up an output stream.
\r
9119 stream_.mode = DUPLEX;
\r
9120 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9123 stream_.mode = mode;
\r
9125 // Setup callback thread.
\r
9126 stream_.callbackInfo.object = (void *) this;
\r
9128 // Set the thread attributes for joinable and realtime scheduling
\r
9129 // priority. The higher priority will only take affect if the
\r
9130 // program is run as root or suid.
\r
9131 pthread_attr_t attr;
\r
9132 pthread_attr_init( &attr );
\r
9133 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9134 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9135 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9136 struct sched_param param;
\r
9137 int priority = options->priority;
\r
9138 int min = sched_get_priority_min( SCHED_RR );
\r
9139 int max = sched_get_priority_max( SCHED_RR );
\r
9140 if ( priority < min ) priority = min;
\r
9141 else if ( priority > max ) priority = max;
\r
9142 param.sched_priority = priority;
\r
9143 pthread_attr_setschedparam( &attr, ¶m );
\r
9144 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9147 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9149 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9152 stream_.callbackInfo.isRunning = true;
\r
9153 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9154 pthread_attr_destroy( &attr );
\r
9156 stream_.callbackInfo.isRunning = false;
\r
9157 errorText_ = "RtApiOss::error creating callback thread!";
\r
9166 pthread_cond_destroy( &handle->runnable );
\r
9167 if ( handle->id[0] ) close( handle->id[0] );
\r
9168 if ( handle->id[1] ) close( handle->id[1] );
\r
9170 stream_.apiHandle = 0;
\r
9173 for ( int i=0; i<2; i++ ) {
\r
9174 if ( stream_.userBuffer[i] ) {
\r
9175 free( stream_.userBuffer[i] );
\r
9176 stream_.userBuffer[i] = 0;
\r
9180 if ( stream_.deviceBuffer ) {
\r
9181 free( stream_.deviceBuffer );
\r
9182 stream_.deviceBuffer = 0;
\r
9188 void RtApiOss :: closeStream()
\r
9190 if ( stream_.state == STREAM_CLOSED ) {
\r
9191 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9192 error( RtAudioError::WARNING );
\r
9196 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9197 stream_.callbackInfo.isRunning = false;
\r
9198 MUTEX_LOCK( &stream_.mutex );
\r
9199 if ( stream_.state == STREAM_STOPPED )
\r
9200 pthread_cond_signal( &handle->runnable );
\r
9201 MUTEX_UNLOCK( &stream_.mutex );
\r
9202 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9204 if ( stream_.state == STREAM_RUNNING ) {
\r
9205 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9206 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9208 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9209 stream_.state = STREAM_STOPPED;
\r
9213 pthread_cond_destroy( &handle->runnable );
\r
9214 if ( handle->id[0] ) close( handle->id[0] );
\r
9215 if ( handle->id[1] ) close( handle->id[1] );
\r
9217 stream_.apiHandle = 0;
\r
9220 for ( int i=0; i<2; i++ ) {
\r
9221 if ( stream_.userBuffer[i] ) {
\r
9222 free( stream_.userBuffer[i] );
\r
9223 stream_.userBuffer[i] = 0;
\r
9227 if ( stream_.deviceBuffer ) {
\r
9228 free( stream_.deviceBuffer );
\r
9229 stream_.deviceBuffer = 0;
\r
9232 stream_.mode = UNINITIALIZED;
\r
9233 stream_.state = STREAM_CLOSED;
\r
9236 void RtApiOss :: startStream()
\r
9239 if ( stream_.state == STREAM_RUNNING ) {
\r
9240 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9241 error( RtAudioError::WARNING );
\r
9245 MUTEX_LOCK( &stream_.mutex );
\r
9247 stream_.state = STREAM_RUNNING;
\r
9249 // No need to do anything else here ... OSS automatically starts
\r
9250 // when fed samples.
\r
9252 MUTEX_UNLOCK( &stream_.mutex );
\r
9254 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9255 pthread_cond_signal( &handle->runnable );
\r
9258 void RtApiOss :: stopStream()
\r
9261 if ( stream_.state == STREAM_STOPPED ) {
\r
9262 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9263 error( RtAudioError::WARNING );
\r
9267 MUTEX_LOCK( &stream_.mutex );
\r
9269 // The state might change while waiting on a mutex.
\r
9270 if ( stream_.state == STREAM_STOPPED ) {
\r
9271 MUTEX_UNLOCK( &stream_.mutex );
\r
9276 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9277 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9279 // Flush the output with zeros a few times.
\r
9282 RtAudioFormat format;
\r
9284 if ( stream_.doConvertBuffer[0] ) {
\r
9285 buffer = stream_.deviceBuffer;
\r
9286 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9287 format = stream_.deviceFormat[0];
\r
9290 buffer = stream_.userBuffer[0];
\r
9291 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9292 format = stream_.userFormat;
\r
9295 memset( buffer, 0, samples * formatBytes(format) );
\r
9296 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9297 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9298 if ( result == -1 ) {
\r
9299 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9300 error( RtAudioError::WARNING );
\r
9304 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9305 if ( result == -1 ) {
\r
9306 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9307 errorText_ = errorStream_.str();
\r
9310 handle->triggered = false;
\r
9313 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9314 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9315 if ( result == -1 ) {
\r
9316 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9317 errorText_ = errorStream_.str();
\r
9323 stream_.state = STREAM_STOPPED;
\r
9324 MUTEX_UNLOCK( &stream_.mutex );
\r
9326 if ( result != -1 ) return;
\r
9327 error( RtAudioError::SYSTEM_ERROR );
\r
9330 void RtApiOss :: abortStream()
\r
9333 if ( stream_.state == STREAM_STOPPED ) {
\r
9334 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9335 error( RtAudioError::WARNING );
\r
9339 MUTEX_LOCK( &stream_.mutex );
\r
9341 // The state might change while waiting on a mutex.
\r
9342 if ( stream_.state == STREAM_STOPPED ) {
\r
9343 MUTEX_UNLOCK( &stream_.mutex );
\r
9348 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9349 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9350 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9351 if ( result == -1 ) {
\r
9352 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9353 errorText_ = errorStream_.str();
\r
9356 handle->triggered = false;
\r
9359 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9360 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9361 if ( result == -1 ) {
\r
9362 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9363 errorText_ = errorStream_.str();
\r
9369 stream_.state = STREAM_STOPPED;
\r
9370 MUTEX_UNLOCK( &stream_.mutex );
\r
9372 if ( result != -1 ) return;
\r
9373 error( RtAudioError::SYSTEM_ERROR );
\r
9376 void RtApiOss :: callbackEvent()
\r
9378 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9379 if ( stream_.state == STREAM_STOPPED ) {
\r
9380 MUTEX_LOCK( &stream_.mutex );
\r
9381 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9382 if ( stream_.state != STREAM_RUNNING ) {
\r
9383 MUTEX_UNLOCK( &stream_.mutex );
\r
9386 MUTEX_UNLOCK( &stream_.mutex );
\r
9389 if ( stream_.state == STREAM_CLOSED ) {
\r
9390 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9391 error( RtAudioError::WARNING );
\r
9395 // Invoke user callback to get fresh output data.
\r
9396 int doStopStream = 0;
\r
9397 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9398 double streamTime = getStreamTime();
\r
9399 RtAudioStreamStatus status = 0;
\r
9400 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9401 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9402 handle->xrun[0] = false;
\r
9404 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9405 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9406 handle->xrun[1] = false;
\r
9408 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9409 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9410 if ( doStopStream == 2 ) {
\r
9411 this->abortStream();
\r
9415 MUTEX_LOCK( &stream_.mutex );
\r
9417 // The state might change while waiting on a mutex.
\r
9418 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9423 RtAudioFormat format;
\r
9425 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9427 // Setup parameters and do buffer conversion if necessary.
\r
9428 if ( stream_.doConvertBuffer[0] ) {
\r
9429 buffer = stream_.deviceBuffer;
\r
9430 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9431 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9432 format = stream_.deviceFormat[0];
\r
9435 buffer = stream_.userBuffer[0];
\r
9436 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9437 format = stream_.userFormat;
\r
9440 // Do byte swapping if necessary.
\r
9441 if ( stream_.doByteSwap[0] )
\r
9442 byteSwapBuffer( buffer, samples, format );
\r
9444 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9446 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9447 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9448 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9449 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9450 handle->triggered = true;
\r
9453 // Write samples to device.
\r
9454 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9456 if ( result == -1 ) {
\r
9457 // We'll assume this is an underrun, though there isn't a
\r
9458 // specific means for determining that.
\r
9459 handle->xrun[0] = true;
\r
9460 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9461 error( RtAudioError::WARNING );
\r
9462 // Continue on to input section.
\r
9466 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9468 // Setup parameters.
\r
9469 if ( stream_.doConvertBuffer[1] ) {
\r
9470 buffer = stream_.deviceBuffer;
\r
9471 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9472 format = stream_.deviceFormat[1];
\r
9475 buffer = stream_.userBuffer[1];
\r
9476 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9477 format = stream_.userFormat;
\r
9480 // Read samples from device.
\r
9481 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9483 if ( result == -1 ) {
\r
9484 // We'll assume this is an overrun, though there isn't a
\r
9485 // specific means for determining that.
\r
9486 handle->xrun[1] = true;
\r
9487 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9488 error( RtAudioError::WARNING );
\r
9492 // Do byte swapping if necessary.
\r
9493 if ( stream_.doByteSwap[1] )
\r
9494 byteSwapBuffer( buffer, samples, format );
\r
9496 // Do buffer conversion if necessary.
\r
9497 if ( stream_.doConvertBuffer[1] )
\r
9498 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9502 MUTEX_UNLOCK( &stream_.mutex );
\r
9504 RtApi::tickStreamTime();
\r
9505 if ( doStopStream == 1 ) this->stopStream();
\r
9508 static void *ossCallbackHandler( void *ptr )
\r
9510 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9511 RtApiOss *object = (RtApiOss *) info->object;
\r
9512 bool *isRunning = &info->isRunning;
\r
9514 while ( *isRunning == true ) {
\r
9515 pthread_testcancel();
\r
9516 object->callbackEvent();
\r
9519 pthread_exit( NULL );
\r
9522 //******************** End of __LINUX_OSS__ *********************//
\r
9526 // *************************************************** //
\r
9528 // Protected common (OS-independent) RtAudio methods.
\r
9530 // *************************************************** //
\r
9532 // This method can be modified to control the behavior of error
\r
9533 // message printing.
\r
9534 void RtApi :: error( RtAudioError::Type type )
\r
9536 errorStream_.str(""); // clear the ostringstream
\r
9538 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9539 if ( errorCallback ) {
\r
9540 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9542 if ( firstErrorOccurred_ )
\r
9545 firstErrorOccurred_ = true;
\r
9546 const std::string errorMessage = errorText_;
\r
9548 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9549 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9553 errorCallback( type, errorMessage );
\r
9554 firstErrorOccurred_ = false;
\r
9558 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9559 std::cerr << '\n' << errorText_ << "\n\n";
\r
9560 else if ( type != RtAudioError::WARNING )
\r
9561 throw( RtAudioError( errorText_, type ) );
\r
9564 void RtApi :: verifyStream()
\r
9566 if ( stream_.state == STREAM_CLOSED ) {
\r
9567 errorText_ = "RtApi:: a stream is not open!";
\r
9568 error( RtAudioError::INVALID_USE );
\r
9572 void RtApi :: clearStreamInfo()
\r
9574 stream_.mode = UNINITIALIZED;
\r
9575 stream_.state = STREAM_CLOSED;
\r
9576 stream_.sampleRate = 0;
\r
9577 stream_.bufferSize = 0;
\r
9578 stream_.nBuffers = 0;
\r
9579 stream_.userFormat = 0;
\r
9580 stream_.userInterleaved = true;
\r
9581 stream_.streamTime = 0.0;
\r
9582 stream_.apiHandle = 0;
\r
9583 stream_.deviceBuffer = 0;
\r
9584 stream_.callbackInfo.callback = 0;
\r
9585 stream_.callbackInfo.userData = 0;
\r
9586 stream_.callbackInfo.isRunning = false;
\r
9587 stream_.callbackInfo.errorCallback = 0;
\r
9588 for ( int i=0; i<2; i++ ) {
\r
9589 stream_.device[i] = 11111;
\r
9590 stream_.doConvertBuffer[i] = false;
\r
9591 stream_.deviceInterleaved[i] = true;
\r
9592 stream_.doByteSwap[i] = false;
\r
9593 stream_.nUserChannels[i] = 0;
\r
9594 stream_.nDeviceChannels[i] = 0;
\r
9595 stream_.channelOffset[i] = 0;
\r
9596 stream_.deviceFormat[i] = 0;
\r
9597 stream_.latency[i] = 0;
\r
9598 stream_.userBuffer[i] = 0;
\r
9599 stream_.convertInfo[i].channels = 0;
\r
9600 stream_.convertInfo[i].inJump = 0;
\r
9601 stream_.convertInfo[i].outJump = 0;
\r
9602 stream_.convertInfo[i].inFormat = 0;
\r
9603 stream_.convertInfo[i].outFormat = 0;
\r
9604 stream_.convertInfo[i].inOffset.clear();
\r
9605 stream_.convertInfo[i].outOffset.clear();
\r
9609 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9611 if ( format == RTAUDIO_SINT16 )
\r
9613 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9615 else if ( format == RTAUDIO_FLOAT64 )
\r
9617 else if ( format == RTAUDIO_SINT24 )
\r
9619 else if ( format == RTAUDIO_SINT8 )
\r
9622 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9623 error( RtAudioError::WARNING );
\r
9628 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9630 if ( mode == INPUT ) { // convert device to user buffer
\r
9631 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9632 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9633 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9634 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9636 else { // convert user to device buffer
\r
9637 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9638 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9639 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9640 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9643 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9644 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9646 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9648 // Set up the interleave/deinterleave offsets.
\r
9649 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9650 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9651 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9652 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9653 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9654 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9655 stream_.convertInfo[mode].inJump = 1;
\r
9659 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9660 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9661 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9662 stream_.convertInfo[mode].outJump = 1;
\r
9666 else { // no (de)interleaving
\r
9667 if ( stream_.userInterleaved ) {
\r
9668 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9669 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9670 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9674 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9675 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9676 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9677 stream_.convertInfo[mode].inJump = 1;
\r
9678 stream_.convertInfo[mode].outJump = 1;
\r
9683 // Add channel offset.
\r
9684 if ( firstChannel > 0 ) {
\r
9685 if ( stream_.deviceInterleaved[mode] ) {
\r
9686 if ( mode == OUTPUT ) {
\r
9687 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9688 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9691 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9692 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9696 if ( mode == OUTPUT ) {
\r
9697 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9698 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9701 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9702 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9708 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9710 // This function does format conversion, input/output channel compensation, and
\r
9711 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9712 // the lower three bytes of a 32-bit integer.
\r
9714 // Clear our device buffer when in/out duplex device channels are different
\r
9715 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9716 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9717 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9720 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9722 Float64 *out = (Float64 *)outBuffer;
\r
9724 if (info.inFormat == RTAUDIO_SINT8) {
\r
9725 signed char *in = (signed char *)inBuffer;
\r
9726 scale = 1.0 / 127.5;
\r
9727 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9728 for (j=0; j<info.channels; j++) {
\r
9729 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9730 out[info.outOffset[j]] += 0.5;
\r
9731 out[info.outOffset[j]] *= scale;
\r
9733 in += info.inJump;
\r
9734 out += info.outJump;
\r
9737 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9738 Int16 *in = (Int16 *)inBuffer;
\r
9739 scale = 1.0 / 32767.5;
\r
9740 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9741 for (j=0; j<info.channels; j++) {
\r
9742 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9743 out[info.outOffset[j]] += 0.5;
\r
9744 out[info.outOffset[j]] *= scale;
\r
9746 in += info.inJump;
\r
9747 out += info.outJump;
\r
9750 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9751 Int24 *in = (Int24 *)inBuffer;
\r
9752 scale = 1.0 / 8388607.5;
\r
9753 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9754 for (j=0; j<info.channels; j++) {
\r
9755 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9756 out[info.outOffset[j]] += 0.5;
\r
9757 out[info.outOffset[j]] *= scale;
\r
9759 in += info.inJump;
\r
9760 out += info.outJump;
\r
9763 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9764 Int32 *in = (Int32 *)inBuffer;
\r
9765 scale = 1.0 / 2147483647.5;
\r
9766 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9767 for (j=0; j<info.channels; j++) {
\r
9768 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9769 out[info.outOffset[j]] += 0.5;
\r
9770 out[info.outOffset[j]] *= scale;
\r
9772 in += info.inJump;
\r
9773 out += info.outJump;
\r
9776 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9777 Float32 *in = (Float32 *)inBuffer;
\r
9778 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9779 for (j=0; j<info.channels; j++) {
\r
9780 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9782 in += info.inJump;
\r
9783 out += info.outJump;
\r
9786 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9787 // Channel compensation and/or (de)interleaving only.
\r
9788 Float64 *in = (Float64 *)inBuffer;
\r
9789 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9790 for (j=0; j<info.channels; j++) {
\r
9791 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9793 in += info.inJump;
\r
9794 out += info.outJump;
\r
9798 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9800 Float32 *out = (Float32 *)outBuffer;
\r
9802 if (info.inFormat == RTAUDIO_SINT8) {
\r
9803 signed char *in = (signed char *)inBuffer;
\r
9804 scale = (Float32) ( 1.0 / 127.5 );
\r
9805 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9806 for (j=0; j<info.channels; j++) {
\r
9807 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9808 out[info.outOffset[j]] += 0.5;
\r
9809 out[info.outOffset[j]] *= scale;
\r
9811 in += info.inJump;
\r
9812 out += info.outJump;
\r
9815 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9816 Int16 *in = (Int16 *)inBuffer;
\r
9817 scale = (Float32) ( 1.0 / 32767.5 );
\r
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9819 for (j=0; j<info.channels; j++) {
\r
9820 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9821 out[info.outOffset[j]] += 0.5;
\r
9822 out[info.outOffset[j]] *= scale;
\r
9824 in += info.inJump;
\r
9825 out += info.outJump;
\r
9828 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9829 Int24 *in = (Int24 *)inBuffer;
\r
9830 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9831 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9832 for (j=0; j<info.channels; j++) {
\r
9833 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9834 out[info.outOffset[j]] += 0.5;
\r
9835 out[info.outOffset[j]] *= scale;
\r
9837 in += info.inJump;
\r
9838 out += info.outJump;
\r
9841 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9842 Int32 *in = (Int32 *)inBuffer;
\r
9843 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9844 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9845 for (j=0; j<info.channels; j++) {
\r
9846 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9847 out[info.outOffset[j]] += 0.5;
\r
9848 out[info.outOffset[j]] *= scale;
\r
9850 in += info.inJump;
\r
9851 out += info.outJump;
\r
9854 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9855 // Channel compensation and/or (de)interleaving only.
\r
9856 Float32 *in = (Float32 *)inBuffer;
\r
9857 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9858 for (j=0; j<info.channels; j++) {
\r
9859 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9861 in += info.inJump;
\r
9862 out += info.outJump;
\r
9865 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9866 Float64 *in = (Float64 *)inBuffer;
\r
9867 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9868 for (j=0; j<info.channels; j++) {
\r
9869 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9871 in += info.inJump;
\r
9872 out += info.outJump;
\r
9876 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9877 Int32 *out = (Int32 *)outBuffer;
\r
9878 if (info.inFormat == RTAUDIO_SINT8) {
\r
9879 signed char *in = (signed char *)inBuffer;
\r
9880 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9881 for (j=0; j<info.channels; j++) {
\r
9882 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9883 out[info.outOffset[j]] <<= 24;
\r
9885 in += info.inJump;
\r
9886 out += info.outJump;
\r
9889 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9890 Int16 *in = (Int16 *)inBuffer;
\r
9891 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9892 for (j=0; j<info.channels; j++) {
\r
9893 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9894 out[info.outOffset[j]] <<= 16;
\r
9896 in += info.inJump;
\r
9897 out += info.outJump;
\r
9900 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9901 Int24 *in = (Int24 *)inBuffer;
\r
9902 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9903 for (j=0; j<info.channels; j++) {
\r
9904 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9905 out[info.outOffset[j]] <<= 8;
\r
9907 in += info.inJump;
\r
9908 out += info.outJump;
\r
9911 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9912 // Channel compensation and/or (de)interleaving only.
\r
9913 Int32 *in = (Int32 *)inBuffer;
\r
9914 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9915 for (j=0; j<info.channels; j++) {
\r
9916 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9918 in += info.inJump;
\r
9919 out += info.outJump;
\r
9922 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9923 Float32 *in = (Float32 *)inBuffer;
\r
9924 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9925 for (j=0; j<info.channels; j++) {
\r
9926 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9928 in += info.inJump;
\r
9929 out += info.outJump;
\r
9932 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9933 Float64 *in = (Float64 *)inBuffer;
\r
9934 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9935 for (j=0; j<info.channels; j++) {
\r
9936 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9938 in += info.inJump;
\r
9939 out += info.outJump;
\r
9943 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9944 Int24 *out = (Int24 *)outBuffer;
\r
9945 if (info.inFormat == RTAUDIO_SINT8) {
\r
9946 signed char *in = (signed char *)inBuffer;
\r
9947 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9948 for (j=0; j<info.channels; j++) {
\r
9949 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9950 //out[info.outOffset[j]] <<= 16;
\r
9952 in += info.inJump;
\r
9953 out += info.outJump;
\r
9956 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9957 Int16 *in = (Int16 *)inBuffer;
\r
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9959 for (j=0; j<info.channels; j++) {
\r
9960 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9961 //out[info.outOffset[j]] <<= 8;
\r
9963 in += info.inJump;
\r
9964 out += info.outJump;
\r
9967 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9968 // Channel compensation and/or (de)interleaving only.
\r
9969 Int24 *in = (Int24 *)inBuffer;
\r
9970 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9971 for (j=0; j<info.channels; j++) {
\r
9972 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9974 in += info.inJump;
\r
9975 out += info.outJump;
\r
9978 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9979 Int32 *in = (Int32 *)inBuffer;
\r
9980 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9981 for (j=0; j<info.channels; j++) {
\r
9982 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9983 //out[info.outOffset[j]] >>= 8;
\r
9985 in += info.inJump;
\r
9986 out += info.outJump;
\r
9989 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9990 Float32 *in = (Float32 *)inBuffer;
\r
9991 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9992 for (j=0; j<info.channels; j++) {
\r
9993 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9995 in += info.inJump;
\r
9996 out += info.outJump;
\r
9999 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10000 Float64 *in = (Float64 *)inBuffer;
\r
10001 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10002 for (j=0; j<info.channels; j++) {
\r
10003 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10005 in += info.inJump;
\r
10006 out += info.outJump;
\r
10010 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10011 Int16 *out = (Int16 *)outBuffer;
\r
10012 if (info.inFormat == RTAUDIO_SINT8) {
\r
10013 signed char *in = (signed char *)inBuffer;
\r
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10015 for (j=0; j<info.channels; j++) {
\r
10016 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10017 out[info.outOffset[j]] <<= 8;
\r
10019 in += info.inJump;
\r
10020 out += info.outJump;
\r
10023 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10024 // Channel compensation and/or (de)interleaving only.
\r
10025 Int16 *in = (Int16 *)inBuffer;
\r
10026 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10027 for (j=0; j<info.channels; j++) {
\r
10028 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10030 in += info.inJump;
\r
10031 out += info.outJump;
\r
10034 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10035 Int24 *in = (Int24 *)inBuffer;
\r
10036 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10037 for (j=0; j<info.channels; j++) {
\r
10038 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10040 in += info.inJump;
\r
10041 out += info.outJump;
\r
10044 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10045 Int32 *in = (Int32 *)inBuffer;
\r
10046 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10047 for (j=0; j<info.channels; j++) {
\r
10048 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10050 in += info.inJump;
\r
10051 out += info.outJump;
\r
10054 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10055 Float32 *in = (Float32 *)inBuffer;
\r
10056 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10057 for (j=0; j<info.channels; j++) {
\r
10058 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10060 in += info.inJump;
\r
10061 out += info.outJump;
\r
10064 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10065 Float64 *in = (Float64 *)inBuffer;
\r
10066 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10067 for (j=0; j<info.channels; j++) {
\r
10068 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10070 in += info.inJump;
\r
10071 out += info.outJump;
\r
10075 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10076 signed char *out = (signed char *)outBuffer;
\r
10077 if (info.inFormat == RTAUDIO_SINT8) {
\r
10078 // Channel compensation and/or (de)interleaving only.
\r
10079 signed char *in = (signed char *)inBuffer;
\r
10080 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10081 for (j=0; j<info.channels; j++) {
\r
10082 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10084 in += info.inJump;
\r
10085 out += info.outJump;
\r
10088 if (info.inFormat == RTAUDIO_SINT16) {
\r
10089 Int16 *in = (Int16 *)inBuffer;
\r
10090 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10091 for (j=0; j<info.channels; j++) {
\r
10092 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10094 in += info.inJump;
\r
10095 out += info.outJump;
\r
10098 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10099 Int24 *in = (Int24 *)inBuffer;
\r
10100 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10101 for (j=0; j<info.channels; j++) {
\r
10102 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10104 in += info.inJump;
\r
10105 out += info.outJump;
\r
10108 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10109 Int32 *in = (Int32 *)inBuffer;
\r
10110 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10111 for (j=0; j<info.channels; j++) {
\r
10112 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10114 in += info.inJump;
\r
10115 out += info.outJump;
\r
10118 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10119 Float32 *in = (Float32 *)inBuffer;
\r
10120 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10121 for (j=0; j<info.channels; j++) {
\r
10122 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10124 in += info.inJump;
\r
10125 out += info.outJump;
\r
10128 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10129 Float64 *in = (Float64 *)inBuffer;
\r
10130 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10131 for (j=0; j<info.channels; j++) {
\r
10132 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10134 in += info.inJump;
\r
10135 out += info.outJump;
\r
10141 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10142 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10143 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10145 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10147 register char val;
\r
10148 register char *ptr;
\r
10151 if ( format == RTAUDIO_SINT16 ) {
\r
10152 for ( unsigned int i=0; i<samples; i++ ) {
\r
10153 // Swap 1st and 2nd bytes.
\r
10155 *(ptr) = *(ptr+1);
\r
10158 // Increment 2 bytes.
\r
10162 else if ( format == RTAUDIO_SINT32 ||
\r
10163 format == RTAUDIO_FLOAT32 ) {
\r
10164 for ( unsigned int i=0; i<samples; i++ ) {
\r
10165 // Swap 1st and 4th bytes.
\r
10167 *(ptr) = *(ptr+3);
\r
10170 // Swap 2nd and 3rd bytes.
\r
10173 *(ptr) = *(ptr+1);
\r
10176 // Increment 3 more bytes.
\r
10180 else if ( format == RTAUDIO_SINT24 ) {
\r
10181 for ( unsigned int i=0; i<samples; i++ ) {
\r
10182 // Swap 1st and 3rd bytes.
\r
10184 *(ptr) = *(ptr+2);
\r
10187 // Increment 2 more bytes.
\r
10191 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10192 for ( unsigned int i=0; i<samples; i++ ) {
\r
10193 // Swap 1st and 8th bytes
\r
10195 *(ptr) = *(ptr+7);
\r
10198 // Swap 2nd and 7th bytes
\r
10201 *(ptr) = *(ptr+5);
\r
10204 // Swap 3rd and 6th bytes
\r
10207 *(ptr) = *(ptr+3);
\r
10210 // Swap 4th and 5th bytes
\r
10213 *(ptr) = *(ptr+1);
\r
10216 // Increment 5 more bytes.
\r
10222 // Indentation settings for Vim and Emacs
\r
10224 // Local Variables:
\r
10225 // c-basic-offset: 2
\r
10226 // indent-tabs-mode: nil
\r
10229 // vim: et sts=2 sw=2
\r