1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1409 if ( stream_.state == STREAM_RUNNING )
\r
1410 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1412 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1414 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1415 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1419 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1420 if ( stream_.state == STREAM_RUNNING )
\r
1421 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1422 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1423 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1425 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1426 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1430 for ( int i=0; i<2; i++ ) {
\r
1431 if ( stream_.userBuffer[i] ) {
\r
1432 free( stream_.userBuffer[i] );
\r
1433 stream_.userBuffer[i] = 0;
\r
1437 if ( stream_.deviceBuffer ) {
\r
1438 free( stream_.deviceBuffer );
\r
1439 stream_.deviceBuffer = 0;
\r
1442 // Destroy pthread condition variable.
\r
1443 pthread_cond_destroy( &handle->condition );
\r
1445 stream_.apiHandle = 0;
\r
1447 stream_.mode = UNINITIALIZED;
\r
1448 stream_.state = STREAM_CLOSED;
\r
1451 void RtApiCore :: startStream( void )
\r
1454 if ( stream_.state == STREAM_RUNNING ) {
\r
1455 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1456 error( RtAudioError::WARNING );
\r
1460 OSStatus result = noErr;
\r
1461 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1462 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1464 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1465 if ( result != noErr ) {
\r
1466 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1467 errorText_ = errorStream_.str();
\r
1472 if ( stream_.mode == INPUT ||
\r
1473 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1475 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1476 if ( result != noErr ) {
\r
1477 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1478 errorText_ = errorStream_.str();
\r
1483 handle->drainCounter = 0;
\r
1484 handle->internalDrain = false;
\r
1485 stream_.state = STREAM_RUNNING;
\r
1488 if ( result == noErr ) return;
\r
1489 error( RtAudioError::SYSTEM_ERROR );
\r
1492 void RtApiCore :: stopStream( void )
\r
1495 if ( stream_.state == STREAM_STOPPED ) {
\r
1496 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1497 error( RtAudioError::WARNING );
\r
1501 OSStatus result = noErr;
\r
1502 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1503 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1505 if ( handle->drainCounter == 0 ) {
\r
1506 handle->drainCounter = 2;
\r
1507 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1510 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1511 if ( result != noErr ) {
\r
1512 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1513 errorText_ = errorStream_.str();
\r
1518 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1520 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1521 if ( result != noErr ) {
\r
1522 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1523 errorText_ = errorStream_.str();
\r
1528 stream_.state = STREAM_STOPPED;
\r
1531 if ( result == noErr ) return;
\r
1532 error( RtAudioError::SYSTEM_ERROR );
\r
1535 void RtApiCore :: abortStream( void )
\r
1538 if ( stream_.state == STREAM_STOPPED ) {
\r
1539 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1540 error( RtAudioError::WARNING );
\r
1544 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1545 handle->drainCounter = 2;
\r
1550 // This function will be called by a spawned thread when the user
\r
1551 // callback function signals that the stream should be stopped or
\r
1552 // aborted. It is better to handle it this way because the
\r
1553 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1554 // function is called.
\r
1555 static void *coreStopStream( void *ptr )
\r
1557 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1558 RtApiCore *object = (RtApiCore *) info->object;
\r
1560 object->stopStream();
\r
1561 pthread_exit( NULL );
\r
1564 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1565 const AudioBufferList *inBufferList,
\r
1566 const AudioBufferList *outBufferList )
\r
1568 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1569 if ( stream_.state == STREAM_CLOSED ) {
\r
1570 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1571 error( RtAudioError::WARNING );
\r
1575 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1576 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1578 // Check if we were draining the stream and signal is finished.
\r
1579 if ( handle->drainCounter > 3 ) {
\r
1580 ThreadHandle threadId;
\r
1582 stream_.state = STREAM_STOPPING;
\r
1583 if ( handle->internalDrain == true )
\r
1584 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1585 else // external call to stopStream()
\r
1586 pthread_cond_signal( &handle->condition );
\r
1590 AudioDeviceID outputDevice = handle->id[0];
\r
1592 // Invoke user callback to get fresh output data UNLESS we are
\r
1593 // draining stream or duplex mode AND the input/output devices are
\r
1594 // different AND this function is called for the input device.
\r
1595 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1596 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1597 double streamTime = getStreamTime();
\r
1598 RtAudioStreamStatus status = 0;
\r
1599 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1600 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1601 handle->xrun[0] = false;
\r
1603 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1604 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1605 handle->xrun[1] = false;
\r
1608 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1609 stream_.bufferSize, streamTime, status, info->userData );
\r
1610 if ( cbReturnValue == 2 ) {
\r
1611 stream_.state = STREAM_STOPPING;
\r
1612 handle->drainCounter = 2;
\r
1616 else if ( cbReturnValue == 1 ) {
\r
1617 handle->drainCounter = 1;
\r
1618 handle->internalDrain = true;
\r
1622 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1624 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1626 if ( handle->nStreams[0] == 1 ) {
\r
1627 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1629 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1631 else { // fill multiple streams with zeros
\r
1632 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1633 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1635 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1639 else if ( handle->nStreams[0] == 1 ) {
\r
1640 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1641 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1642 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1644 else { // copy from user buffer
\r
1645 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1646 stream_.userBuffer[0],
\r
1647 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1650 else { // fill multiple streams
\r
1651 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1652 if ( stream_.doConvertBuffer[0] ) {
\r
1653 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1654 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1657 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1658 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1659 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1660 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1661 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1664 else { // fill multiple multi-channel streams with interleaved data
\r
1665 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1666 Float32 *out, *in;
\r
1668 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1669 UInt32 inChannels = stream_.nUserChannels[0];
\r
1670 if ( stream_.doConvertBuffer[0] ) {
\r
1671 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1672 inChannels = stream_.nDeviceChannels[0];
\r
1675 if ( inInterleaved ) inOffset = 1;
\r
1676 else inOffset = stream_.bufferSize;
\r
1678 channelsLeft = inChannels;
\r
1679 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1681 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1682 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1685 // Account for possible channel offset in first stream
\r
1686 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1687 streamChannels -= stream_.channelOffset[0];
\r
1688 outJump = stream_.channelOffset[0];
\r
1692 // Account for possible unfilled channels at end of the last stream
\r
1693 if ( streamChannels > channelsLeft ) {
\r
1694 outJump = streamChannels - channelsLeft;
\r
1695 streamChannels = channelsLeft;
\r
1698 // Determine input buffer offsets and skips
\r
1699 if ( inInterleaved ) {
\r
1700 inJump = inChannels;
\r
1701 in += inChannels - channelsLeft;
\r
1705 in += (inChannels - channelsLeft) * inOffset;
\r
1708 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1709 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1710 *out++ = in[j*inOffset];
\r
1715 channelsLeft -= streamChannels;
\r
1721 // Don't bother draining input
\r
1722 if ( handle->drainCounter ) {
\r
1723 handle->drainCounter++;
\r
1727 AudioDeviceID inputDevice;
\r
1728 inputDevice = handle->id[1];
\r
1729 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1731 if ( handle->nStreams[1] == 1 ) {
\r
1732 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1733 convertBuffer( stream_.userBuffer[1],
\r
1734 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1735 stream_.convertInfo[1] );
\r
1737 else { // copy to user buffer
\r
1738 memcpy( stream_.userBuffer[1],
\r
1739 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1740 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1743 else { // read from multiple streams
\r
1744 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1745 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1747 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1748 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1749 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1750 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1751 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1754 else { // read from multiple multi-channel streams
\r
1755 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1756 Float32 *out, *in;
\r
1758 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1759 UInt32 outChannels = stream_.nUserChannels[1];
\r
1760 if ( stream_.doConvertBuffer[1] ) {
\r
1761 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1762 outChannels = stream_.nDeviceChannels[1];
\r
1765 if ( outInterleaved ) outOffset = 1;
\r
1766 else outOffset = stream_.bufferSize;
\r
1768 channelsLeft = outChannels;
\r
1769 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1771 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1772 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1775 // Account for possible channel offset in first stream
\r
1776 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1777 streamChannels -= stream_.channelOffset[1];
\r
1778 inJump = stream_.channelOffset[1];
\r
1782 // Account for possible unread channels at end of the last stream
\r
1783 if ( streamChannels > channelsLeft ) {
\r
1784 inJump = streamChannels - channelsLeft;
\r
1785 streamChannels = channelsLeft;
\r
1788 // Determine output buffer offsets and skips
\r
1789 if ( outInterleaved ) {
\r
1790 outJump = outChannels;
\r
1791 out += outChannels - channelsLeft;
\r
1795 out += (outChannels - channelsLeft) * outOffset;
\r
1798 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1799 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1800 out[j*outOffset] = *in++;
\r
1805 channelsLeft -= streamChannels;
\r
1809 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1810 convertBuffer( stream_.userBuffer[1],
\r
1811 stream_.deviceBuffer,
\r
1812 stream_.convertInfo[1] );
\r
1818 //MUTEX_UNLOCK( &stream_.mutex );
\r
1820 RtApi::tickStreamTime();
\r
1824 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1828 case kAudioHardwareNotRunningError:
\r
1829 return "kAudioHardwareNotRunningError";
\r
1831 case kAudioHardwareUnspecifiedError:
\r
1832 return "kAudioHardwareUnspecifiedError";
\r
1834 case kAudioHardwareUnknownPropertyError:
\r
1835 return "kAudioHardwareUnknownPropertyError";
\r
1837 case kAudioHardwareBadPropertySizeError:
\r
1838 return "kAudioHardwareBadPropertySizeError";
\r
1840 case kAudioHardwareIllegalOperationError:
\r
1841 return "kAudioHardwareIllegalOperationError";
\r
1843 case kAudioHardwareBadObjectError:
\r
1844 return "kAudioHardwareBadObjectError";
\r
1846 case kAudioHardwareBadDeviceError:
\r
1847 return "kAudioHardwareBadDeviceError";
\r
1849 case kAudioHardwareBadStreamError:
\r
1850 return "kAudioHardwareBadStreamError";
\r
1852 case kAudioHardwareUnsupportedOperationError:
\r
1853 return "kAudioHardwareUnsupportedOperationError";
\r
1855 case kAudioDeviceUnsupportedFormatError:
\r
1856 return "kAudioDeviceUnsupportedFormatError";
\r
1858 case kAudioDevicePermissionsError:
\r
1859 return "kAudioDevicePermissionsError";
\r
1862 return "CoreAudio unknown error";
\r
1866 //******************** End of __MACOSX_CORE__ *********************//
\r
1869 #if defined(__UNIX_JACK__)
\r
1871 // JACK is a low-latency audio server, originally written for the
\r
1872 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1873 // connect a number of different applications to an audio device, as
\r
1874 // well as allowing them to share audio between themselves.
\r
1876 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1877 // have ports connected to the server. The JACK server is typically
\r
1878 // started in a terminal as follows:
\r
1880 // .jackd -d alsa -d hw:0
\r
1882 // or through an interface program such as qjackctl. Many of the
\r
1883 // parameters normally set for a stream are fixed by the JACK server
\r
1884 // and can be specified when the JACK server is started. In
\r
1887 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1889 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1890 // frames, and number of buffers = 4. Once the server is running, it
\r
1891 // is not possible to override these values. If the values are not
\r
1892 // specified in the command-line, the JACK server uses default values.
\r
1894 // The JACK server does not have to be running when an instance of
\r
1895 // RtApiJack is created, though the function getDeviceCount() will
\r
1896 // report 0 devices found until JACK has been started. When no
\r
1897 // devices are available (i.e., the JACK server is not running), a
\r
1898 // stream cannot be opened.
\r
1900 #include <jack/jack.h>
\r
1901 #include <unistd.h>
\r
1904 // A structure to hold various information related to the Jack API
\r
1905 // implementation.
\r
1906 struct JackHandle {
\r
1907 jack_client_t *client;
\r
1908 jack_port_t **ports[2];
\r
1909 std::string deviceName[2];
\r
1911 pthread_cond_t condition;
\r
1912 int drainCounter; // Tracks callback counts when draining
\r
1913 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1916 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1919 static void jackSilentError( const char * ) {};
\r
1921 RtApiJack :: RtApiJack()
\r
1923 // Nothing to do here.
\r
1924 #if !defined(__RTAUDIO_DEBUG__)
\r
1925 // Turn off Jack's internal error reporting.
\r
1926 jack_set_error_function( &jackSilentError );
\r
1930 RtApiJack :: ~RtApiJack()
\r
1932 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1935 unsigned int RtApiJack :: getDeviceCount( void )
\r
1937 // See if we can become a jack client.
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1941 if ( client == 0 ) return 0;
\r
1943 const char **ports;
\r
1944 std::string port, previousPort;
\r
1945 unsigned int nChannels = 0, nDevices = 0;
\r
1946 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1948 // Parse the port names up to the first colon (:).
\r
1949 size_t iColon = 0;
\r
1951 port = (char *) ports[ nChannels ];
\r
1952 iColon = port.find(":");
\r
1953 if ( iColon != std::string::npos ) {
\r
1954 port = port.substr( 0, iColon + 1 );
\r
1955 if ( port != previousPort ) {
\r
1957 previousPort = port;
\r
1960 } while ( ports[++nChannels] );
\r
1964 jack_client_close( client );
\r
1968 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1970 RtAudio::DeviceInfo info;
\r
1971 info.probed = false;
\r
1973 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1974 jack_status_t *status = NULL;
\r
1975 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1976 if ( client == 0 ) {
\r
1977 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1978 error( RtAudioError::WARNING );
\r
1982 const char **ports;
\r
1983 std::string port, previousPort;
\r
1984 unsigned int nPorts = 0, nDevices = 0;
\r
1985 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1987 // Parse the port names up to the first colon (:).
\r
1988 size_t iColon = 0;
\r
1990 port = (char *) ports[ nPorts ];
\r
1991 iColon = port.find(":");
\r
1992 if ( iColon != std::string::npos ) {
\r
1993 port = port.substr( 0, iColon );
\r
1994 if ( port != previousPort ) {
\r
1995 if ( nDevices == device ) info.name = port;
\r
1997 previousPort = port;
\r
2000 } while ( ports[++nPorts] );
\r
2004 if ( device >= nDevices ) {
\r
2005 jack_client_close( client );
\r
2006 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2007 error( RtAudioError::INVALID_USE );
\r
2011 // Get the current jack server sample rate.
\r
2012 info.sampleRates.clear();
\r
2014 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2015 info.sampleRates.push_back( info.preferredSampleRate );
\r
2017 // Count the available ports containing the client name as device
\r
2018 // channels. Jack "input ports" equal RtAudio output channels.
\r
2019 unsigned int nChannels = 0;
\r
2020 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2022 while ( ports[ nChannels ] ) nChannels++;
\r
2024 info.outputChannels = nChannels;
\r
2027 // Jack "output ports" equal RtAudio input channels.
\r
2029 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2031 while ( ports[ nChannels ] ) nChannels++;
\r
2033 info.inputChannels = nChannels;
\r
2036 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2037 jack_client_close(client);
\r
2038 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2039 error( RtAudioError::WARNING );
\r
2043 // If device opens for both playback and capture, we determine the channels.
\r
2044 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2045 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2047 // Jack always uses 32-bit floats.
\r
2048 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2050 // Jack doesn't provide default devices so we'll use the first available one.
\r
2051 if ( device == 0 && info.outputChannels > 0 )
\r
2052 info.isDefaultOutput = true;
\r
2053 if ( device == 0 && info.inputChannels > 0 )
\r
2054 info.isDefaultInput = true;
\r
2056 jack_client_close(client);
\r
2057 info.probed = true;
\r
2061 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2063 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2065 RtApiJack *object = (RtApiJack *) info->object;
\r
2066 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2071 // This function will be called by a spawned thread when the Jack
\r
2072 // server signals that it is shutting down. It is necessary to handle
\r
2073 // it this way because the jackShutdown() function must return before
\r
2074 // the jack_deactivate() function (in closeStream()) will return.
\r
2075 static void *jackCloseStream( void *ptr )
\r
2077 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2078 RtApiJack *object = (RtApiJack *) info->object;
\r
2080 object->closeStream();
\r
2082 pthread_exit( NULL );
\r
2084 static void jackShutdown( void *infoPointer )
\r
2086 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2087 RtApiJack *object = (RtApiJack *) info->object;
\r
2089 // Check current stream state. If stopped, then we'll assume this
\r
2090 // was called as a result of a call to RtApiJack::stopStream (the
\r
2091 // deactivation of a client handle causes this function to be called).
\r
2092 // If not, we'll assume the Jack server is shutting down or some
\r
2093 // other problem occurred and we should close the stream.
\r
2094 if ( object->isStreamRunning() == false ) return;
\r
2096 ThreadHandle threadId;
\r
2097 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2098 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2101 static int jackXrun( void *infoPointer )
\r
2103 JackHandle *handle = (JackHandle *) infoPointer;
\r
2105 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2106 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2111 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2112 unsigned int firstChannel, unsigned int sampleRate,
\r
2113 RtAudioFormat format, unsigned int *bufferSize,
\r
2114 RtAudio::StreamOptions *options )
\r
2116 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2118 // Look for jack server and try to become a client (only do once per stream).
\r
2119 jack_client_t *client = 0;
\r
2120 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2121 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2122 jack_status_t *status = NULL;
\r
2123 if ( options && !options->streamName.empty() )
\r
2124 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2126 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2127 if ( client == 0 ) {
\r
2128 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2129 error( RtAudioError::WARNING );
\r
2134 // The handle must have been created on an earlier pass.
\r
2135 client = handle->client;
\r
2138 const char **ports;
\r
2139 std::string port, previousPort, deviceName;
\r
2140 unsigned int nPorts = 0, nDevices = 0;
\r
2141 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2143 // Parse the port names up to the first colon (:).
\r
2144 size_t iColon = 0;
\r
2146 port = (char *) ports[ nPorts ];
\r
2147 iColon = port.find(":");
\r
2148 if ( iColon != std::string::npos ) {
\r
2149 port = port.substr( 0, iColon );
\r
2150 if ( port != previousPort ) {
\r
2151 if ( nDevices == device ) deviceName = port;
\r
2153 previousPort = port;
\r
2156 } while ( ports[++nPorts] );
\r
2160 if ( device >= nDevices ) {
\r
2161 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2165 // Count the available ports containing the client name as device
\r
2166 // channels. Jack "input ports" equal RtAudio output channels.
\r
2167 unsigned int nChannels = 0;
\r
2168 unsigned long flag = JackPortIsInput;
\r
2169 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2170 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2172 while ( ports[ nChannels ] ) nChannels++;
\r
2176 // Compare the jack ports for specified client to the requested number of channels.
\r
2177 if ( nChannels < (channels + firstChannel) ) {
\r
2178 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2179 errorText_ = errorStream_.str();
\r
2183 // Check the jack server sample rate.
\r
2184 unsigned int jackRate = jack_get_sample_rate( client );
\r
2185 if ( sampleRate != jackRate ) {
\r
2186 jack_client_close( client );
\r
2187 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2188 errorText_ = errorStream_.str();
\r
2191 stream_.sampleRate = jackRate;
\r
2193 // Get the latency of the JACK port.
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2195 if ( ports[ firstChannel ] ) {
\r
2196 // Added by Ge Wang
\r
2197 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2198 // the range (usually the min and max are equal)
\r
2199 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2200 // get the latency range
\r
2201 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2202 // be optimistic, use the min!
\r
2203 stream_.latency[mode] = latrange.min;
\r
2204 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2208 // The jack server always uses 32-bit floating-point data.
\r
2209 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2210 stream_.userFormat = format;
\r
2212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2213 else stream_.userInterleaved = true;
\r
2215 // Jack always uses non-interleaved buffers.
\r
2216 stream_.deviceInterleaved[mode] = false;
\r
2218 // Jack always provides host byte-ordered data.
\r
2219 stream_.doByteSwap[mode] = false;
\r
2221 // Get the buffer size. The buffer size and number of buffers
\r
2222 // (periods) is set when the jack server is started.
\r
2223 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2224 *bufferSize = stream_.bufferSize;
\r
2226 stream_.nDeviceChannels[mode] = channels;
\r
2227 stream_.nUserChannels[mode] = channels;
\r
2229 // Set flags for buffer conversion.
\r
2230 stream_.doConvertBuffer[mode] = false;
\r
2231 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2232 stream_.doConvertBuffer[mode] = true;
\r
2233 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2234 stream_.nUserChannels[mode] > 1 )
\r
2235 stream_.doConvertBuffer[mode] = true;
\r
2237 // Allocate our JackHandle structure for the stream.
\r
2238 if ( handle == 0 ) {
\r
2240 handle = new JackHandle;
\r
2242 catch ( std::bad_alloc& ) {
\r
2243 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2247 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2248 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2251 stream_.apiHandle = (void *) handle;
\r
2252 handle->client = client;
\r
2254 handle->deviceName[mode] = deviceName;
\r
2256 // Allocate necessary internal buffers.
\r
2257 unsigned long bufferBytes;
\r
2258 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2259 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2260 if ( stream_.userBuffer[mode] == NULL ) {
\r
2261 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2265 if ( stream_.doConvertBuffer[mode] ) {
\r
2267 bool makeBuffer = true;
\r
2268 if ( mode == OUTPUT )
\r
2269 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2270 else { // mode == INPUT
\r
2271 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2272 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2273 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2274 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2278 if ( makeBuffer ) {
\r
2279 bufferBytes *= *bufferSize;
\r
2280 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2281 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2282 if ( stream_.deviceBuffer == NULL ) {
\r
2283 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2289 // Allocate memory for the Jack ports (channels) identifiers.
\r
2290 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2291 if ( handle->ports[mode] == NULL ) {
\r
2292 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2296 stream_.device[mode] = device;
\r
2297 stream_.channelOffset[mode] = firstChannel;
\r
2298 stream_.state = STREAM_STOPPED;
\r
2299 stream_.callbackInfo.object = (void *) this;
\r
2301 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2302 // We had already set up the stream for output.
\r
2303 stream_.mode = DUPLEX;
\r
2305 stream_.mode = mode;
\r
2306 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2307 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2308 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2311 // Register our ports.
\r
2313 if ( mode == OUTPUT ) {
\r
2314 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2315 snprintf( label, 64, "outport %d", i );
\r
2316 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2317 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2321 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2322 snprintf( label, 64, "inport %d", i );
\r
2323 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2324 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2328 // Setup the buffer conversion information structure. We don't use
\r
2329 // buffers to do channel offsets, so we override that parameter
\r
2331 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2337 pthread_cond_destroy( &handle->condition );
\r
2338 jack_client_close( handle->client );
\r
2340 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2341 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2344 stream_.apiHandle = 0;
\r
2347 for ( int i=0; i<2; i++ ) {
\r
2348 if ( stream_.userBuffer[i] ) {
\r
2349 free( stream_.userBuffer[i] );
\r
2350 stream_.userBuffer[i] = 0;
\r
2354 if ( stream_.deviceBuffer ) {
\r
2355 free( stream_.deviceBuffer );
\r
2356 stream_.deviceBuffer = 0;
\r
2362 void RtApiJack :: closeStream( void )
\r
2364 if ( stream_.state == STREAM_CLOSED ) {
\r
2365 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2366 error( RtAudioError::WARNING );
\r
2370 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2373 if ( stream_.state == STREAM_RUNNING )
\r
2374 jack_deactivate( handle->client );
\r
2376 jack_client_close( handle->client );
\r
2380 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2381 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2382 pthread_cond_destroy( &handle->condition );
\r
2384 stream_.apiHandle = 0;
\r
2387 for ( int i=0; i<2; i++ ) {
\r
2388 if ( stream_.userBuffer[i] ) {
\r
2389 free( stream_.userBuffer[i] );
\r
2390 stream_.userBuffer[i] = 0;
\r
2394 if ( stream_.deviceBuffer ) {
\r
2395 free( stream_.deviceBuffer );
\r
2396 stream_.deviceBuffer = 0;
\r
2399 stream_.mode = UNINITIALIZED;
\r
2400 stream_.state = STREAM_CLOSED;
\r
2403 void RtApiJack :: startStream( void )
\r
2406 if ( stream_.state == STREAM_RUNNING ) {
\r
2407 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2408 error( RtAudioError::WARNING );
\r
2412 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2413 int result = jack_activate( handle->client );
\r
2415 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2419 const char **ports;
\r
2421 // Get the list of available ports.
\r
2422 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2424 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2425 if ( ports == NULL) {
\r
2426 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2430 // Now make the port connections. Since RtAudio wasn't designed to
\r
2431 // allow the user to select particular channels of a device, we'll
\r
2432 // just open the first "nChannels" ports with offset.
\r
2433 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2435 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2436 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2439 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2446 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2454 // Now make the port connections. See note above.
\r
2455 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2457 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2458 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2461 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2468 handle->drainCounter = 0;
\r
2469 handle->internalDrain = false;
\r
2470 stream_.state = STREAM_RUNNING;
\r
2473 if ( result == 0 ) return;
\r
2474 error( RtAudioError::SYSTEM_ERROR );
\r
2477 void RtApiJack :: stopStream( void )
\r
2480 if ( stream_.state == STREAM_STOPPED ) {
\r
2481 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2482 error( RtAudioError::WARNING );
\r
2486 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2489 if ( handle->drainCounter == 0 ) {
\r
2490 handle->drainCounter = 2;
\r
2491 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2495 jack_deactivate( handle->client );
\r
2496 stream_.state = STREAM_STOPPED;
\r
2499 void RtApiJack :: abortStream( void )
\r
2502 if ( stream_.state == STREAM_STOPPED ) {
\r
2503 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2504 error( RtAudioError::WARNING );
\r
2508 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 handle->drainCounter = 2;
\r
2514 // This function will be called by a spawned thread when the user
\r
2515 // callback function signals that the stream should be stopped or
\r
2516 // aborted. It is necessary to handle it this way because the
\r
2517 // callbackEvent() function must return before the jack_deactivate()
\r
2518 // function will return.
\r
2519 static void *jackStopStream( void *ptr )
\r
2521 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2522 RtApiJack *object = (RtApiJack *) info->object;
\r
2524 object->stopStream();
\r
2525 pthread_exit( NULL );
\r
2528 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2530 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2531 if ( stream_.state == STREAM_CLOSED ) {
\r
2532 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2533 error( RtAudioError::WARNING );
\r
2536 if ( stream_.bufferSize != nframes ) {
\r
2537 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2538 error( RtAudioError::WARNING );
\r
2542 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2543 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2545 // Check if we were draining the stream and signal is finished.
\r
2546 if ( handle->drainCounter > 3 ) {
\r
2547 ThreadHandle threadId;
\r
2549 stream_.state = STREAM_STOPPING;
\r
2550 if ( handle->internalDrain == true )
\r
2551 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2553 pthread_cond_signal( &handle->condition );
\r
2557 // Invoke user callback first, to get fresh output data.
\r
2558 if ( handle->drainCounter == 0 ) {
\r
2559 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2560 double streamTime = getStreamTime();
\r
2561 RtAudioStreamStatus status = 0;
\r
2562 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2563 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2564 handle->xrun[0] = false;
\r
2566 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2567 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2568 handle->xrun[1] = false;
\r
2570 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2571 stream_.bufferSize, streamTime, status, info->userData );
\r
2572 if ( cbReturnValue == 2 ) {
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 handle->drainCounter = 2;
\r
2576 pthread_create( &id, NULL, jackStopStream, info );
\r
2579 else if ( cbReturnValue == 1 ) {
\r
2580 handle->drainCounter = 1;
\r
2581 handle->internalDrain = true;
\r
2585 jack_default_audio_sample_t *jackbuffer;
\r
2586 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2589 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2591 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2592 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2593 memset( jackbuffer, 0, bufferBytes );
\r
2597 else if ( stream_.doConvertBuffer[0] ) {
\r
2599 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2601 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2602 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2603 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2606 else { // no buffer conversion
\r
2607 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2608 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2609 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2614 // Don't bother draining input
\r
2615 if ( handle->drainCounter ) {
\r
2616 handle->drainCounter++;
\r
2620 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2622 if ( stream_.doConvertBuffer[1] ) {
\r
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2625 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2627 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2629 else { // no buffer conversion
\r
2630 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2631 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2632 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2638 RtApi::tickStreamTime();
\r
2641 //******************** End of __UNIX_JACK__ *********************//
\r
2644 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2646 // The ASIO API is designed around a callback scheme, so this
\r
2647 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2648 // Jack. The primary constraint with ASIO is that it only allows
\r
2649 // access to a single driver at a time. Thus, it is not possible to
\r
2650 // have more than one simultaneous RtAudio stream.
\r
2652 // This implementation also requires a number of external ASIO files
\r
2653 // and a few global variables. The ASIO callback scheme does not
\r
2654 // allow for the passing of user data, so we must create a global
\r
2655 // pointer to our callbackInfo structure.
\r
2657 // On unix systems, we make use of a pthread condition variable.
\r
2658 // Since there is no equivalent in Windows, I hacked something based
\r
2659 // on information found in
\r
2660 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2662 #include "asiosys.h"
\r
2664 #include "iasiothiscallresolver.h"
\r
2665 #include "asiodrivers.h"
\r
2668 static AsioDrivers drivers;
\r
2669 static ASIOCallbacks asioCallbacks;
\r
2670 static ASIODriverInfo driverInfo;
\r
2671 static CallbackInfo *asioCallbackInfo;
\r
2672 static bool asioXRun;
\r
2674 struct AsioHandle {
\r
2675 int drainCounter; // Tracks callback counts when draining
\r
2676 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2677 ASIOBufferInfo *bufferInfos;
\r
2681 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2684 // Function declarations (definitions at end of section)
\r
2685 static const char* getAsioErrorString( ASIOError result );
\r
2686 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2687 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2689 RtApiAsio :: RtApiAsio()
\r
2691 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2692 // CoInitialize beforehand, but it must be for appartment threading
\r
2693 // (in which case, CoInitilialize will return S_FALSE here).
\r
2694 coInitialized_ = false;
\r
2695 HRESULT hr = CoInitialize( NULL );
\r
2696 if ( FAILED(hr) ) {
\r
2697 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2698 error( RtAudioError::WARNING );
\r
2700 coInitialized_ = true;
\r
2702 drivers.removeCurrentDriver();
\r
2703 driverInfo.asioVersion = 2;
\r
2705 // See note in DirectSound implementation about GetDesktopWindow().
\r
2706 driverInfo.sysRef = GetForegroundWindow();
\r
2709 RtApiAsio :: ~RtApiAsio()
\r
2711 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2712 if ( coInitialized_ ) CoUninitialize();
\r
2715 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2717 return (unsigned int) drivers.asioGetNumDev();
\r
2720 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2722 RtAudio::DeviceInfo info;
\r
2723 info.probed = false;
\r
2726 unsigned int nDevices = getDeviceCount();
\r
2727 if ( nDevices == 0 ) {
\r
2728 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2729 error( RtAudioError::INVALID_USE );
\r
2733 if ( device >= nDevices ) {
\r
2734 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2735 error( RtAudioError::INVALID_USE );
\r
2739 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2740 if ( stream_.state != STREAM_CLOSED ) {
\r
2741 if ( device >= devices_.size() ) {
\r
2742 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2743 error( RtAudioError::WARNING );
\r
2746 return devices_[ device ];
\r
2749 char driverName[32];
\r
2750 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2751 if ( result != ASE_OK ) {
\r
2752 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2753 errorText_ = errorStream_.str();
\r
2754 error( RtAudioError::WARNING );
\r
2758 info.name = driverName;
\r
2760 if ( !drivers.loadDriver( driverName ) ) {
\r
2761 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2762 errorText_ = errorStream_.str();
\r
2763 error( RtAudioError::WARNING );
\r
2767 result = ASIOInit( &driverInfo );
\r
2768 if ( result != ASE_OK ) {
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 // Determine the device channel information.
\r
2776 long inputChannels, outputChannels;
\r
2777 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2778 if ( result != ASE_OK ) {
\r
2779 drivers.removeCurrentDriver();
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.outputChannels = outputChannels;
\r
2787 info.inputChannels = inputChannels;
\r
2788 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2789 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2791 // Determine the supported sample rates.
\r
2792 info.sampleRates.clear();
\r
2793 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2794 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2795 if ( result == ASE_OK ) {
\r
2796 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2798 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2799 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2803 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2804 ASIOChannelInfo channelInfo;
\r
2805 channelInfo.channel = 0;
\r
2806 channelInfo.isInput = true;
\r
2807 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2808 result = ASIOGetChannelInfo( &channelInfo );
\r
2809 if ( result != ASE_OK ) {
\r
2810 drivers.removeCurrentDriver();
\r
2811 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2812 errorText_ = errorStream_.str();
\r
2813 error( RtAudioError::WARNING );
\r
2817 info.nativeFormats = 0;
\r
2818 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2819 info.nativeFormats |= RTAUDIO_SINT16;
\r
2820 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2821 info.nativeFormats |= RTAUDIO_SINT32;
\r
2822 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2823 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2824 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2825 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2826 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2827 info.nativeFormats |= RTAUDIO_SINT24;
\r
2829 if ( info.outputChannels > 0 )
\r
2830 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2831 if ( info.inputChannels > 0 )
\r
2832 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2834 info.probed = true;
\r
2835 drivers.removeCurrentDriver();
\r
2839 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2841 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2842 object->callbackEvent( index );
\r
2845 void RtApiAsio :: saveDeviceInfo( void )
\r
2849 unsigned int nDevices = getDeviceCount();
\r
2850 devices_.resize( nDevices );
\r
2851 for ( unsigned int i=0; i<nDevices; i++ )
\r
2852 devices_[i] = getDeviceInfo( i );
\r
2855 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2856 unsigned int firstChannel, unsigned int sampleRate,
\r
2857 RtAudioFormat format, unsigned int *bufferSize,
\r
2858 RtAudio::StreamOptions *options )
\r
2859 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2861 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2863 // For ASIO, a duplex stream MUST use the same driver.
\r
2864 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2865 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2869 char driverName[32];
\r
2870 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2871 if ( result != ASE_OK ) {
\r
2872 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2873 errorText_ = errorStream_.str();
\r
2877 // Only load the driver once for duplex stream.
\r
2878 if ( !isDuplexInput ) {
\r
2879 // The getDeviceInfo() function will not work when a stream is open
\r
2880 // because ASIO does not allow multiple devices to run at the same
\r
2881 // time. Thus, we'll probe the system before opening a stream and
\r
2882 // save the results for use by getDeviceInfo().
\r
2883 this->saveDeviceInfo();
\r
2885 if ( !drivers.loadDriver( driverName ) ) {
\r
2886 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2887 errorText_ = errorStream_.str();
\r
2891 result = ASIOInit( &driverInfo );
\r
2892 if ( result != ASE_OK ) {
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2894 errorText_ = errorStream_.str();
\r
2899 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2900 bool buffersAllocated = false;
\r
2901 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2902 unsigned int nChannels;
\r
2905 // Check the device channel count.
\r
2906 long inputChannels, outputChannels;
\r
2907 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2908 if ( result != ASE_OK ) {
\r
2909 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2910 errorText_ = errorStream_.str();
\r
2914 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2915 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2916 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2917 errorText_ = errorStream_.str();
\r
2920 stream_.nDeviceChannels[mode] = channels;
\r
2921 stream_.nUserChannels[mode] = channels;
\r
2922 stream_.channelOffset[mode] = firstChannel;
\r
2924 // Verify the sample rate is supported.
\r
2925 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2926 if ( result != ASE_OK ) {
\r
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2928 errorText_ = errorStream_.str();
\r
2932 // Get the current sample rate
\r
2933 ASIOSampleRate currentRate;
\r
2934 result = ASIOGetSampleRate( ¤tRate );
\r
2935 if ( result != ASE_OK ) {
\r
2936 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2937 errorText_ = errorStream_.str();
\r
2941 // Set the sample rate only if necessary
\r
2942 if ( currentRate != sampleRate ) {
\r
2943 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2944 if ( result != ASE_OK ) {
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2946 errorText_ = errorStream_.str();
\r
2951 // Determine the driver data type.
\r
2952 ASIOChannelInfo channelInfo;
\r
2953 channelInfo.channel = 0;
\r
2954 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2955 else channelInfo.isInput = true;
\r
2956 result = ASIOGetChannelInfo( &channelInfo );
\r
2957 if ( result != ASE_OK ) {
\r
2958 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2959 errorText_ = errorStream_.str();
\r
2963 // Assuming WINDOWS host is always little-endian.
\r
2964 stream_.doByteSwap[mode] = false;
\r
2965 stream_.userFormat = format;
\r
2966 stream_.deviceFormat[mode] = 0;
\r
2967 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2968 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2969 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2971 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2972 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2973 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2975 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2976 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2977 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2979 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2980 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2981 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2983 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2984 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2985 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2988 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2989 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2990 errorText_ = errorStream_.str();
\r
2994 // Set the buffer size. For a duplex stream, this will end up
\r
2995 // setting the buffer size based on the input constraints, which
\r
2997 long minSize, maxSize, preferSize, granularity;
\r
2998 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2999 if ( result != ASE_OK ) {
\r
3000 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3001 errorText_ = errorStream_.str();
\r
3005 if ( isDuplexInput ) {
\r
3006 // When this is the duplex input (output was opened before), then we have to use the same
\r
3007 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3008 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3009 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3010 // to the "bufferSize" param as usual to set up processing buffers.
\r
3012 *bufferSize = stream_.bufferSize;
\r
3015 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3016 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3017 else if ( granularity == -1 ) {
\r
3018 // Make sure bufferSize is a power of two.
\r
3019 int log2_of_min_size = 0;
\r
3020 int log2_of_max_size = 0;
\r
3022 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3023 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3024 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3027 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3028 int min_delta_num = log2_of_min_size;
\r
3030 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3031 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3032 if (current_delta < min_delta) {
\r
3033 min_delta = current_delta;
\r
3034 min_delta_num = i;
\r
3038 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3039 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3040 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3042 else if ( granularity != 0 ) {
\r
3043 // Set to an even multiple of granularity, rounding up.
\r
3044 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3049 // we don't use it anymore, see above!
\r
3050 // Just left it here for the case...
\r
3051 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3052 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3057 stream_.bufferSize = *bufferSize;
\r
3058 stream_.nBuffers = 2;
\r
3060 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3061 else stream_.userInterleaved = true;
\r
3063 // ASIO always uses non-interleaved buffers.
\r
3064 stream_.deviceInterleaved[mode] = false;
\r
3066 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3067 if ( handle == 0 ) {
\r
3069 handle = new AsioHandle;
\r
3071 catch ( std::bad_alloc& ) {
\r
3072 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3075 handle->bufferInfos = 0;
\r
3077 // Create a manual-reset event.
\r
3078 handle->condition = CreateEvent( NULL, // no security
\r
3079 TRUE, // manual-reset
\r
3080 FALSE, // non-signaled initially
\r
3081 NULL ); // unnamed
\r
3082 stream_.apiHandle = (void *) handle;
\r
3085 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3086 // and output separately, we'll have to dispose of previously
\r
3087 // created output buffers for a duplex stream.
\r
3088 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3089 ASIODisposeBuffers();
\r
3090 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3093 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3095 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3096 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3097 if ( handle->bufferInfos == NULL ) {
\r
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3099 errorText_ = errorStream_.str();
\r
3103 ASIOBufferInfo *infos;
\r
3104 infos = handle->bufferInfos;
\r
3105 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3106 infos->isInput = ASIOFalse;
\r
3107 infos->channelNum = i + stream_.channelOffset[0];
\r
3108 infos->buffers[0] = infos->buffers[1] = 0;
\r
3110 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3111 infos->isInput = ASIOTrue;
\r
3112 infos->channelNum = i + stream_.channelOffset[1];
\r
3113 infos->buffers[0] = infos->buffers[1] = 0;
\r
3116 // prepare for callbacks
\r
3117 stream_.sampleRate = sampleRate;
\r
3118 stream_.device[mode] = device;
\r
3119 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3121 // store this class instance before registering callbacks, that are going to use it
\r
3122 asioCallbackInfo = &stream_.callbackInfo;
\r
3123 stream_.callbackInfo.object = (void *) this;
\r
3125 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3126 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3127 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3128 asioCallbacks.asioMessage = &asioMessages;
\r
3129 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3130 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3131 if ( result != ASE_OK ) {
\r
3132 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3133 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3134 // in that case, let's be naïve and try that instead
\r
3135 *bufferSize = preferSize;
\r
3136 stream_.bufferSize = *bufferSize;
\r
3137 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3140 if ( result != ASE_OK ) {
\r
3141 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3142 errorText_ = errorStream_.str();
\r
3145 buffersAllocated = true;
\r
3146 stream_.state = STREAM_STOPPED;
\r
3148 // Set flags for buffer conversion.
\r
3149 stream_.doConvertBuffer[mode] = false;
\r
3150 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3151 stream_.doConvertBuffer[mode] = true;
\r
3152 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3153 stream_.nUserChannels[mode] > 1 )
\r
3154 stream_.doConvertBuffer[mode] = true;
\r
3156 // Allocate necessary internal buffers
\r
3157 unsigned long bufferBytes;
\r
3158 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3159 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3160 if ( stream_.userBuffer[mode] == NULL ) {
\r
3161 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3165 if ( stream_.doConvertBuffer[mode] ) {
\r
3167 bool makeBuffer = true;
\r
3168 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3169 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3170 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3171 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3174 if ( makeBuffer ) {
\r
3175 bufferBytes *= *bufferSize;
\r
3176 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3177 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3178 if ( stream_.deviceBuffer == NULL ) {
\r
3179 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3185 // Determine device latencies
\r
3186 long inputLatency, outputLatency;
\r
3187 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3188 if ( result != ASE_OK ) {
\r
3189 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3190 errorText_ = errorStream_.str();
\r
3191 error( RtAudioError::WARNING); // warn but don't fail
\r
3194 stream_.latency[0] = outputLatency;
\r
3195 stream_.latency[1] = inputLatency;
\r
3198 // Setup the buffer conversion information structure. We don't use
\r
3199 // buffers to do channel offsets, so we override that parameter
\r
3201 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3206 if ( !isDuplexInput ) {
\r
3207 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3208 // So we clean up for single channel only
\r
3210 if ( buffersAllocated )
\r
3211 ASIODisposeBuffers();
\r
3213 drivers.removeCurrentDriver();
\r
3216 CloseHandle( handle->condition );
\r
3217 if ( handle->bufferInfos )
\r
3218 free( handle->bufferInfos );
\r
3221 stream_.apiHandle = 0;
\r
3225 if ( stream_.userBuffer[mode] ) {
\r
3226 free( stream_.userBuffer[mode] );
\r
3227 stream_.userBuffer[mode] = 0;
\r
3230 if ( stream_.deviceBuffer ) {
\r
3231 free( stream_.deviceBuffer );
\r
3232 stream_.deviceBuffer = 0;
\r
3237 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3239 void RtApiAsio :: closeStream()
\r
3241 if ( stream_.state == STREAM_CLOSED ) {
\r
3242 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3243 error( RtAudioError::WARNING );
\r
3247 if ( stream_.state == STREAM_RUNNING ) {
\r
3248 stream_.state = STREAM_STOPPED;
\r
3251 ASIODisposeBuffers();
\r
3252 drivers.removeCurrentDriver();
\r
3254 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3256 CloseHandle( handle->condition );
\r
3257 if ( handle->bufferInfos )
\r
3258 free( handle->bufferInfos );
\r
3260 stream_.apiHandle = 0;
\r
3263 for ( int i=0; i<2; i++ ) {
\r
3264 if ( stream_.userBuffer[i] ) {
\r
3265 free( stream_.userBuffer[i] );
\r
3266 stream_.userBuffer[i] = 0;
\r
3270 if ( stream_.deviceBuffer ) {
\r
3271 free( stream_.deviceBuffer );
\r
3272 stream_.deviceBuffer = 0;
\r
3275 stream_.mode = UNINITIALIZED;
\r
3276 stream_.state = STREAM_CLOSED;
\r
3279 bool stopThreadCalled = false;
\r
3281 void RtApiAsio :: startStream()
\r
3284 if ( stream_.state == STREAM_RUNNING ) {
\r
3285 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3286 error( RtAudioError::WARNING );
\r
3290 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3291 ASIOError result = ASIOStart();
\r
3292 if ( result != ASE_OK ) {
\r
3293 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3294 errorText_ = errorStream_.str();
\r
3298 handle->drainCounter = 0;
\r
3299 handle->internalDrain = false;
\r
3300 ResetEvent( handle->condition );
\r
3301 stream_.state = STREAM_RUNNING;
\r
3305 stopThreadCalled = false;
\r
3307 if ( result == ASE_OK ) return;
\r
3308 error( RtAudioError::SYSTEM_ERROR );
\r
3311 void RtApiAsio :: stopStream()
\r
3314 if ( stream_.state == STREAM_STOPPED ) {
\r
3315 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3316 error( RtAudioError::WARNING );
\r
3320 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3321 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3322 if ( handle->drainCounter == 0 ) {
\r
3323 handle->drainCounter = 2;
\r
3324 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3328 stream_.state = STREAM_STOPPED;
\r
3330 ASIOError result = ASIOStop();
\r
3331 if ( result != ASE_OK ) {
\r
3332 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3333 errorText_ = errorStream_.str();
\r
3336 if ( result == ASE_OK ) return;
\r
3337 error( RtAudioError::SYSTEM_ERROR );
\r
3340 void RtApiAsio :: abortStream()
\r
3343 if ( stream_.state == STREAM_STOPPED ) {
\r
3344 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3345 error( RtAudioError::WARNING );
\r
3349 // The following lines were commented-out because some behavior was
\r
3350 // noted where the device buffers need to be zeroed to avoid
\r
3351 // continuing sound, even when the device buffers are completely
\r
3352 // disposed. So now, calling abort is the same as calling stop.
\r
3353 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3354 // handle->drainCounter = 2;
\r
3358 // This function will be called by a spawned thread when the user
\r
3359 // callback function signals that the stream should be stopped or
\r
3360 // aborted. It is necessary to handle it this way because the
\r
3361 // callbackEvent() function must return before the ASIOStop()
\r
3362 // function will return.
\r
3363 static unsigned __stdcall asioStopStream( void *ptr )
\r
3365 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3366 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3368 object->stopStream();
\r
3369 _endthreadex( 0 );
\r
3373 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3375 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3376 if ( stream_.state == STREAM_CLOSED ) {
\r
3377 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3378 error( RtAudioError::WARNING );
\r
3382 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3383 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3385 // Check if we were draining the stream and signal if finished.
\r
3386 if ( handle->drainCounter > 3 ) {
\r
3388 stream_.state = STREAM_STOPPING;
\r
3389 if ( handle->internalDrain == false )
\r
3390 SetEvent( handle->condition );
\r
3391 else { // spawn a thread to stop the stream
\r
3392 unsigned threadId;
\r
3393 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3394 &stream_.callbackInfo, 0, &threadId );
\r
3399 // Invoke user callback to get fresh output data UNLESS we are
\r
3400 // draining stream.
\r
3401 if ( handle->drainCounter == 0 ) {
\r
3402 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3403 double streamTime = getStreamTime();
\r
3404 RtAudioStreamStatus status = 0;
\r
3405 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3406 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3409 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3410 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3413 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3414 stream_.bufferSize, streamTime, status, info->userData );
\r
3415 if ( cbReturnValue == 2 ) {
\r
3416 stream_.state = STREAM_STOPPING;
\r
3417 handle->drainCounter = 2;
\r
3418 unsigned threadId;
\r
3419 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3420 &stream_.callbackInfo, 0, &threadId );
\r
3423 else if ( cbReturnValue == 1 ) {
\r
3424 handle->drainCounter = 1;
\r
3425 handle->internalDrain = true;
\r
3429 unsigned int nChannels, bufferBytes, i, j;
\r
3430 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3431 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3433 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3435 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3437 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3438 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3439 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3443 else if ( stream_.doConvertBuffer[0] ) {
\r
3445 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3446 if ( stream_.doByteSwap[0] )
\r
3447 byteSwapBuffer( stream_.deviceBuffer,
\r
3448 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3449 stream_.deviceFormat[0] );
\r
3451 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3452 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3453 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3454 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3460 if ( stream_.doByteSwap[0] )
\r
3461 byteSwapBuffer( stream_.userBuffer[0],
\r
3462 stream_.bufferSize * stream_.nUserChannels[0],
\r
3463 stream_.userFormat );
\r
3465 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3466 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3467 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3468 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3474 // Don't bother draining input
\r
3475 if ( handle->drainCounter ) {
\r
3476 handle->drainCounter++;
\r
3480 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3482 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3484 if (stream_.doConvertBuffer[1]) {
\r
3486 // Always interleave ASIO input data.
\r
3487 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3488 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3489 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3490 handle->bufferInfos[i].buffers[bufferIndex],
\r
3494 if ( stream_.doByteSwap[1] )
\r
3495 byteSwapBuffer( stream_.deviceBuffer,
\r
3496 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3497 stream_.deviceFormat[1] );
\r
3498 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3502 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3503 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3504 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3505 handle->bufferInfos[i].buffers[bufferIndex],
\r
3510 if ( stream_.doByteSwap[1] )
\r
3511 byteSwapBuffer( stream_.userBuffer[1],
\r
3512 stream_.bufferSize * stream_.nUserChannels[1],
\r
3513 stream_.userFormat );
\r
3518 // The following call was suggested by Malte Clasen. While the API
\r
3519 // documentation indicates it should not be required, some device
\r
3520 // drivers apparently do not function correctly without it.
\r
3521 ASIOOutputReady();
\r
3523 RtApi::tickStreamTime();
\r
3527 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3529 // The ASIO documentation says that this usually only happens during
\r
3530 // external sync. Audio processing is not stopped by the driver,
\r
3531 // actual sample rate might not have even changed, maybe only the
\r
3532 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3535 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3537 object->stopStream();
\r
3539 catch ( RtAudioError &exception ) {
\r
3540 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3544 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3547 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3551 switch( selector ) {
\r
3552 case kAsioSelectorSupported:
\r
3553 if ( value == kAsioResetRequest
\r
3554 || value == kAsioEngineVersion
\r
3555 || value == kAsioResyncRequest
\r
3556 || value == kAsioLatenciesChanged
\r
3557 // The following three were added for ASIO 2.0, you don't
\r
3558 // necessarily have to support them.
\r
3559 || value == kAsioSupportsTimeInfo
\r
3560 || value == kAsioSupportsTimeCode
\r
3561 || value == kAsioSupportsInputMonitor)
\r
3564 case kAsioResetRequest:
\r
3565 // Defer the task and perform the reset of the driver during the
\r
3566 // next "safe" situation. You cannot reset the driver right now,
\r
3567 // as this code is called from the driver. Reset the driver is
\r
3568 // done by completely destruct is. I.e. ASIOStop(),
\r
3569 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3571 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3574 case kAsioResyncRequest:
\r
3575 // This informs the application that the driver encountered some
\r
3576 // non-fatal data loss. It is used for synchronization purposes
\r
3577 // of different media. Added mainly to work around the Win16Mutex
\r
3578 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3579 // which could lose data because the Mutex was held too long by
\r
3580 // another thread. However a driver can issue it in other
\r
3581 // situations, too.
\r
3582 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3586 case kAsioLatenciesChanged:
\r
3587 // This will inform the host application that the drivers were
\r
3588 // latencies changed. Beware, it this does not mean that the
\r
3589 // buffer sizes have changed! You might need to update internal
\r
3591 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3594 case kAsioEngineVersion:
\r
3595 // Return the supported ASIO version of the host application. If
\r
3596 // a host application does not implement this selector, ASIO 1.0
\r
3597 // is assumed by the driver.
\r
3600 case kAsioSupportsTimeInfo:
\r
3601 // Informs the driver whether the
\r
3602 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3603 // For compatibility with ASIO 1.0 drivers the host application
\r
3604 // should always support the "old" bufferSwitch method, too.
\r
3607 case kAsioSupportsTimeCode:
\r
3608 // Informs the driver whether application is interested in time
\r
3609 // code info. If an application does not need to know about time
\r
3610 // code, the driver has less work to do.
\r
3617 static const char* getAsioErrorString( ASIOError result )
\r
3622 const char*message;
\r
3625 static const Messages m[] =
\r
3627 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3628 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3629 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3630 { ASE_InvalidMode, "Invalid mode." },
\r
3631 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3632 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3633 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3636 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3637 if ( m[i].value == result ) return m[i].message;
\r
3639 return "Unknown error.";
\r
3642 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3646 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3648 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3649 // - Introduces support for the Windows WASAPI API
\r
3650 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3651 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3652 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3657 #include <audioclient.h>
\r
3659 #include <mmdeviceapi.h>
\r
3660 #include <functiondiscoverykeys_devpkey.h>
\r
3662 //=============================================================================
\r
3664 #define SAFE_RELEASE( objectPtr )\
\r
3667 objectPtr->Release();\
\r
3668 objectPtr = NULL;\
\r
3671 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3673 //-----------------------------------------------------------------------------
\r
3675 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3676 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3677 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3678 // provide intermediate storage for read / write synchronization.
\r
3679 class WasapiBuffer
\r
3683 : buffer_( NULL ),
\r
3692 // sets the length of the internal ring buffer
\r
3693 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3696 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3698 bufferSize_ = bufferSize;
\r
3703 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3704 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3706 if ( !buffer || // incoming buffer is NULL
\r
3707 bufferSize == 0 || // incoming buffer has no data
\r
3708 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3713 unsigned int relOutIndex = outIndex_;
\r
3714 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3715 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3716 relOutIndex += bufferSize_;
\r
3719 // "in" index can end on the "out" index but cannot begin at it
\r
3720 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3721 return false; // not enough space between "in" index and "out" index
\r
3724 // copy buffer from external to internal
\r
3725 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3726 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3727 int fromInSize = bufferSize - fromZeroSize;
\r
3731 case RTAUDIO_SINT8:
\r
3732 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3733 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3735 case RTAUDIO_SINT16:
\r
3736 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3737 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3739 case RTAUDIO_SINT24:
\r
3740 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3741 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3743 case RTAUDIO_SINT32:
\r
3744 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3745 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3747 case RTAUDIO_FLOAT32:
\r
3748 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3749 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3751 case RTAUDIO_FLOAT64:
\r
3752 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3753 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3757 // update "in" index
\r
3758 inIndex_ += bufferSize;
\r
3759 inIndex_ %= bufferSize_;
\r
3764 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3765 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3767 if ( !buffer || // incoming buffer is NULL
\r
3768 bufferSize == 0 || // incoming buffer has no data
\r
3769 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3774 unsigned int relInIndex = inIndex_;
\r
3775 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3776 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3777 relInIndex += bufferSize_;
\r
3780 // "out" index can begin at and end on the "in" index
\r
3781 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3782 return false; // not enough space between "out" index and "in" index
\r
3785 // copy buffer from internal to external
\r
3786 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3787 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3788 int fromOutSize = bufferSize - fromZeroSize;
\r
3792 case RTAUDIO_SINT8:
\r
3793 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3794 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3796 case RTAUDIO_SINT16:
\r
3797 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3798 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3800 case RTAUDIO_SINT24:
\r
3801 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3802 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3804 case RTAUDIO_SINT32:
\r
3805 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3806 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3808 case RTAUDIO_FLOAT32:
\r
3809 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3810 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3812 case RTAUDIO_FLOAT64:
\r
3813 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3814 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3818 // update "out" index
\r
3819 outIndex_ += bufferSize;
\r
3820 outIndex_ %= bufferSize_;
\r
3827 unsigned int bufferSize_;
\r
3828 unsigned int inIndex_;
\r
3829 unsigned int outIndex_;
\r
3832 //-----------------------------------------------------------------------------
\r
3834 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3835 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3836 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3837 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3838 // one rate and its multiple.
\r
3839 void convertBufferWasapi( char* outBuffer,
\r
3840 const char* inBuffer,
\r
3841 const unsigned int& channelCount,
\r
3842 const unsigned int& inSampleRate,
\r
3843 const unsigned int& outSampleRate,
\r
3844 const unsigned int& inSampleCount,
\r
3845 unsigned int& outSampleCount,
\r
3846 const RtAudioFormat& format )
\r
3848 // calculate the new outSampleCount and relative sampleStep
\r
3849 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3850 float sampleStep = 1.0f / sampleRatio;
\r
3851 float inSampleFraction = 0.0f;
\r
3853 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3855 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3856 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3858 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3862 case RTAUDIO_SINT8:
\r
3863 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3865 case RTAUDIO_SINT16:
\r
3866 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3868 case RTAUDIO_SINT24:
\r
3869 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3871 case RTAUDIO_SINT32:
\r
3872 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3874 case RTAUDIO_FLOAT32:
\r
3875 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3877 case RTAUDIO_FLOAT64:
\r
3878 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3882 // jump to next in sample
\r
3883 inSampleFraction += sampleStep;
\r
3887 //-----------------------------------------------------------------------------
\r
3889 // A structure to hold various information related to the WASAPI implementation.
\r
3890 struct WasapiHandle
\r
3892 IAudioClient* captureAudioClient;
\r
3893 IAudioClient* renderAudioClient;
\r
3894 IAudioCaptureClient* captureClient;
\r
3895 IAudioRenderClient* renderClient;
\r
3896 HANDLE captureEvent;
\r
3897 HANDLE renderEvent;
\r
3900 : captureAudioClient( NULL ),
\r
3901 renderAudioClient( NULL ),
\r
3902 captureClient( NULL ),
\r
3903 renderClient( NULL ),
\r
3904 captureEvent( NULL ),
\r
3905 renderEvent( NULL ) {}
\r
3908 //=============================================================================
\r
3910 RtApiWasapi::RtApiWasapi()
\r
3911 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3913 // WASAPI can run either apartment or multi-threaded
\r
3914 HRESULT hr = CoInitialize( NULL );
\r
3915 if ( !FAILED( hr ) )
\r
3916 coInitialized_ = true;
\r
3918 // Instantiate device enumerator
\r
3919 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3920 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3921 ( void** ) &deviceEnumerator_ );
\r
3923 if ( FAILED( hr ) ) {
\r
3924 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3925 error( RtAudioError::DRIVER_ERROR );
\r
3929 //-----------------------------------------------------------------------------
\r
3931 RtApiWasapi::~RtApiWasapi()
\r
3933 if ( stream_.state != STREAM_CLOSED )
\r
3936 SAFE_RELEASE( deviceEnumerator_ );
\r
3938 // If this object previously called CoInitialize()
\r
3939 if ( coInitialized_ )
\r
3943 //=============================================================================
\r
3945 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3947 unsigned int captureDeviceCount = 0;
\r
3948 unsigned int renderDeviceCount = 0;
\r
3950 IMMDeviceCollection* captureDevices = NULL;
\r
3951 IMMDeviceCollection* renderDevices = NULL;
\r
3953 // Count capture devices
\r
3954 errorText_.clear();
\r
3955 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3956 if ( FAILED( hr ) ) {
\r
3957 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3961 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3962 if ( FAILED( hr ) ) {
\r
3963 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3967 // Count render devices
\r
3968 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3969 if ( FAILED( hr ) ) {
\r
3970 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3974 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3975 if ( FAILED( hr ) ) {
\r
3976 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3981 // release all references
\r
3982 SAFE_RELEASE( captureDevices );
\r
3983 SAFE_RELEASE( renderDevices );
\r
3985 if ( errorText_.empty() )
\r
3986 return captureDeviceCount + renderDeviceCount;
\r
3988 error( RtAudioError::DRIVER_ERROR );
\r
3992 //-----------------------------------------------------------------------------
\r
3994 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3996 RtAudio::DeviceInfo info;
\r
3997 unsigned int captureDeviceCount = 0;
\r
3998 unsigned int renderDeviceCount = 0;
\r
3999 std::string defaultDeviceName;
\r
4000 bool isCaptureDevice = false;
\r
4002 PROPVARIANT deviceNameProp;
\r
4003 PROPVARIANT defaultDeviceNameProp;
\r
4005 IMMDeviceCollection* captureDevices = NULL;
\r
4006 IMMDeviceCollection* renderDevices = NULL;
\r
4007 IMMDevice* devicePtr = NULL;
\r
4008 IMMDevice* defaultDevicePtr = NULL;
\r
4009 IAudioClient* audioClient = NULL;
\r
4010 IPropertyStore* devicePropStore = NULL;
\r
4011 IPropertyStore* defaultDevicePropStore = NULL;
\r
4013 WAVEFORMATEX* deviceFormat = NULL;
\r
4014 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4017 info.probed = false;
\r
4019 // Count capture devices
\r
4020 errorText_.clear();
\r
4021 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4022 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4023 if ( FAILED( hr ) ) {
\r
4024 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4028 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4029 if ( FAILED( hr ) ) {
\r
4030 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4034 // Count render devices
\r
4035 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4036 if ( FAILED( hr ) ) {
\r
4037 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4041 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4042 if ( FAILED( hr ) ) {
\r
4043 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4047 // validate device index
\r
4048 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4049 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4050 errorType = RtAudioError::INVALID_USE;
\r
4054 // determine whether index falls within capture or render devices
\r
4055 if ( device >= renderDeviceCount ) {
\r
4056 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4057 if ( FAILED( hr ) ) {
\r
4058 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4061 isCaptureDevice = true;
\r
4064 hr = renderDevices->Item( device, &devicePtr );
\r
4065 if ( FAILED( hr ) ) {
\r
4066 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4069 isCaptureDevice = false;
\r
4072 // get default device name
\r
4073 if ( isCaptureDevice ) {
\r
4074 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4075 if ( FAILED( hr ) ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4081 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4082 if ( FAILED( hr ) ) {
\r
4083 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4088 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4089 if ( FAILED( hr ) ) {
\r
4090 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4093 PropVariantInit( &defaultDeviceNameProp );
\r
4095 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4096 if ( FAILED( hr ) ) {
\r
4097 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4101 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4104 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4105 if ( FAILED( hr ) ) {
\r
4106 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4110 PropVariantInit( &deviceNameProp );
\r
4112 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4113 if ( FAILED( hr ) ) {
\r
4114 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4118 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4121 if ( isCaptureDevice ) {
\r
4122 info.isDefaultInput = info.name == defaultDeviceName;
\r
4123 info.isDefaultOutput = false;
\r
4126 info.isDefaultInput = false;
\r
4127 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4131 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4132 if ( FAILED( hr ) ) {
\r
4133 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4137 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4138 if ( FAILED( hr ) ) {
\r
4139 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4143 if ( isCaptureDevice ) {
\r
4144 info.inputChannels = deviceFormat->nChannels;
\r
4145 info.outputChannels = 0;
\r
4146 info.duplexChannels = 0;
\r
4149 info.inputChannels = 0;
\r
4150 info.outputChannels = deviceFormat->nChannels;
\r
4151 info.duplexChannels = 0;
\r
4155 info.sampleRates.clear();
\r
4157 // allow support for all sample rates as we have a built-in sample rate converter
\r
4158 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4159 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4161 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4164 info.nativeFormats = 0;
\r
4166 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4167 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4168 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4170 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4171 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4173 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4174 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4177 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4178 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4179 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4181 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4182 info.nativeFormats |= RTAUDIO_SINT8;
\r
4184 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4185 info.nativeFormats |= RTAUDIO_SINT16;
\r
4187 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4188 info.nativeFormats |= RTAUDIO_SINT24;
\r
4190 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4191 info.nativeFormats |= RTAUDIO_SINT32;
\r
4196 info.probed = true;
\r
4199 // release all references
\r
4200 PropVariantClear( &deviceNameProp );
\r
4201 PropVariantClear( &defaultDeviceNameProp );
\r
4203 SAFE_RELEASE( captureDevices );
\r
4204 SAFE_RELEASE( renderDevices );
\r
4205 SAFE_RELEASE( devicePtr );
\r
4206 SAFE_RELEASE( defaultDevicePtr );
\r
4207 SAFE_RELEASE( audioClient );
\r
4208 SAFE_RELEASE( devicePropStore );
\r
4209 SAFE_RELEASE( defaultDevicePropStore );
\r
4211 CoTaskMemFree( deviceFormat );
\r
4212 CoTaskMemFree( closestMatchFormat );
\r
4214 if ( !errorText_.empty() )
\r
4215 error( errorType );
\r
4219 //-----------------------------------------------------------------------------
\r
4221 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4223 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4224 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4232 //-----------------------------------------------------------------------------
\r
4234 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4236 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4237 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4245 //-----------------------------------------------------------------------------
\r
4247 void RtApiWasapi::closeStream( void )
\r
4249 if ( stream_.state == STREAM_CLOSED ) {
\r
4250 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4251 error( RtAudioError::WARNING );
\r
4255 if ( stream_.state != STREAM_STOPPED )
\r
4258 // clean up stream memory
\r
4259 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4260 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4262 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4263 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4265 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4266 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4268 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4269 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4271 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4272 stream_.apiHandle = NULL;
\r
4274 for ( int i = 0; i < 2; i++ ) {
\r
4275 if ( stream_.userBuffer[i] ) {
\r
4276 free( stream_.userBuffer[i] );
\r
4277 stream_.userBuffer[i] = 0;
\r
4281 if ( stream_.deviceBuffer ) {
\r
4282 free( stream_.deviceBuffer );
\r
4283 stream_.deviceBuffer = 0;
\r
4286 // update stream state
\r
4287 stream_.state = STREAM_CLOSED;
\r
4290 //-----------------------------------------------------------------------------
\r
4292 void RtApiWasapi::startStream( void )
\r
4296 if ( stream_.state == STREAM_RUNNING ) {
\r
4297 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4298 error( RtAudioError::WARNING );
\r
4302 // update stream state
\r
4303 stream_.state = STREAM_RUNNING;
\r
4305 // create WASAPI stream thread
\r
4306 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4308 if ( !stream_.callbackInfo.thread ) {
\r
4309 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4310 error( RtAudioError::THREAD_ERROR );
\r
4313 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4314 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4318 //-----------------------------------------------------------------------------
\r
4320 void RtApiWasapi::stopStream( void )
\r
4324 if ( stream_.state == STREAM_STOPPED ) {
\r
4325 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4326 error( RtAudioError::WARNING );
\r
4330 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4331 stream_.state = STREAM_STOPPING;
\r
4333 // wait until stream thread is stopped
\r
4334 while( stream_.state != STREAM_STOPPED ) {
\r
4338 // Wait for the last buffer to play before stopping.
\r
4339 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4341 // stop capture client if applicable
\r
4342 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4343 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4344 if ( FAILED( hr ) ) {
\r
4345 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4346 error( RtAudioError::DRIVER_ERROR );
\r
4351 // stop render client if applicable
\r
4352 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4353 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4354 if ( FAILED( hr ) ) {
\r
4355 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4356 error( RtAudioError::DRIVER_ERROR );
\r
4361 // close thread handle
\r
4362 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4363 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4364 error( RtAudioError::THREAD_ERROR );
\r
4368 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4371 //-----------------------------------------------------------------------------
\r
4373 void RtApiWasapi::abortStream( void )
\r
4377 if ( stream_.state == STREAM_STOPPED ) {
\r
4378 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4379 error( RtAudioError::WARNING );
\r
4383 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4384 stream_.state = STREAM_STOPPING;
\r
4386 // wait until stream thread is stopped
\r
4387 while ( stream_.state != STREAM_STOPPED ) {
\r
4391 // stop capture client if applicable
\r
4392 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4393 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4394 if ( FAILED( hr ) ) {
\r
4395 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4396 error( RtAudioError::DRIVER_ERROR );
\r
4401 // stop render client if applicable
\r
4402 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4403 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4404 if ( FAILED( hr ) ) {
\r
4405 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4406 error( RtAudioError::DRIVER_ERROR );
\r
4411 // close thread handle
\r
4412 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4413 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4414 error( RtAudioError::THREAD_ERROR );
\r
4418 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4421 //-----------------------------------------------------------------------------
\r
4423 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4424 unsigned int firstChannel, unsigned int sampleRate,
\r
4425 RtAudioFormat format, unsigned int* bufferSize,
\r
4426 RtAudio::StreamOptions* options )
\r
4428 bool methodResult = FAILURE;
\r
4429 unsigned int captureDeviceCount = 0;
\r
4430 unsigned int renderDeviceCount = 0;
\r
4432 IMMDeviceCollection* captureDevices = NULL;
\r
4433 IMMDeviceCollection* renderDevices = NULL;
\r
4434 IMMDevice* devicePtr = NULL;
\r
4435 WAVEFORMATEX* deviceFormat = NULL;
\r
4436 unsigned int bufferBytes;
\r
4437 stream_.state = STREAM_STOPPED;
\r
4439 // create API Handle if not already created
\r
4440 if ( !stream_.apiHandle )
\r
4441 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4443 // Count capture devices
\r
4444 errorText_.clear();
\r
4445 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4446 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4447 if ( FAILED( hr ) ) {
\r
4448 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4452 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4453 if ( FAILED( hr ) ) {
\r
4454 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4458 // Count render devices
\r
4459 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4460 if ( FAILED( hr ) ) {
\r
4461 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4465 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4466 if ( FAILED( hr ) ) {
\r
4467 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4471 // validate device index
\r
4472 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4473 errorType = RtAudioError::INVALID_USE;
\r
4474 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4478 // determine whether index falls within capture or render devices
\r
4479 if ( device >= renderDeviceCount ) {
\r
4480 if ( mode != INPUT ) {
\r
4481 errorType = RtAudioError::INVALID_USE;
\r
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4486 // retrieve captureAudioClient from devicePtr
\r
4487 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4489 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4490 if ( FAILED( hr ) ) {
\r
4491 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4495 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4496 NULL, ( void** ) &captureAudioClient );
\r
4497 if ( FAILED( hr ) ) {
\r
4498 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4502 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4503 if ( FAILED( hr ) ) {
\r
4504 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4508 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4509 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4512 if ( mode != OUTPUT ) {
\r
4513 errorType = RtAudioError::INVALID_USE;
\r
4514 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4518 // retrieve renderAudioClient from devicePtr
\r
4519 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4521 hr = renderDevices->Item( device, &devicePtr );
\r
4522 if ( FAILED( hr ) ) {
\r
4523 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4527 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4528 NULL, ( void** ) &renderAudioClient );
\r
4529 if ( FAILED( hr ) ) {
\r
4530 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4534 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4535 if ( FAILED( hr ) ) {
\r
4536 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4540 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4541 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4544 // fill stream data
\r
4545 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4546 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4547 stream_.mode = DUPLEX;
\r
4550 stream_.mode = mode;
\r
4553 stream_.device[mode] = device;
\r
4554 stream_.doByteSwap[mode] = false;
\r
4555 stream_.sampleRate = sampleRate;
\r
4556 stream_.bufferSize = *bufferSize;
\r
4557 stream_.nBuffers = 1;
\r
4558 stream_.nUserChannels[mode] = channels;
\r
4559 stream_.channelOffset[mode] = firstChannel;
\r
4560 stream_.userFormat = format;
\r
4561 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4563 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4564 stream_.userInterleaved = false;
\r
4566 stream_.userInterleaved = true;
\r
4567 stream_.deviceInterleaved[mode] = true;
\r
4569 // Set flags for buffer conversion.
\r
4570 stream_.doConvertBuffer[mode] = false;
\r
4571 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4572 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4573 stream_.doConvertBuffer[mode] = true;
\r
4574 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4575 stream_.nUserChannels[mode] > 1 )
\r
4576 stream_.doConvertBuffer[mode] = true;
\r
4578 if ( stream_.doConvertBuffer[mode] )
\r
4579 setConvertInfo( mode, 0 );
\r
4581 // Allocate necessary internal buffers
\r
4582 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4584 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4585 if ( !stream_.userBuffer[mode] ) {
\r
4586 errorType = RtAudioError::MEMORY_ERROR;
\r
4587 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4591 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4592 stream_.callbackInfo.priority = 15;
\r
4594 stream_.callbackInfo.priority = 0;
\r
4596 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4597 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4599 methodResult = SUCCESS;
\r
4603 SAFE_RELEASE( captureDevices );
\r
4604 SAFE_RELEASE( renderDevices );
\r
4605 SAFE_RELEASE( devicePtr );
\r
4606 CoTaskMemFree( deviceFormat );
\r
4608 // if method failed, close the stream
\r
4609 if ( methodResult == FAILURE )
\r
4612 if ( !errorText_.empty() )
\r
4613 error( errorType );
\r
4614 return methodResult;
\r
4617 //=============================================================================
\r
4619 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4622 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4627 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4630 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4635 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4638 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4643 //-----------------------------------------------------------------------------
\r
4645 void RtApiWasapi::wasapiThread()
\r
4647 // as this is a new thread, we must CoInitialize it
\r
4648 CoInitialize( NULL );
\r
4652 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4653 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4654 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4655 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4656 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4657 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4659 WAVEFORMATEX* captureFormat = NULL;
\r
4660 WAVEFORMATEX* renderFormat = NULL;
\r
4661 float captureSrRatio = 0.0f;
\r
4662 float renderSrRatio = 0.0f;
\r
4663 WasapiBuffer captureBuffer;
\r
4664 WasapiBuffer renderBuffer;
\r
4666 // declare local stream variables
\r
4667 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4668 BYTE* streamBuffer = NULL;
\r
4669 unsigned long captureFlags = 0;
\r
4670 unsigned int bufferFrameCount = 0;
\r
4671 unsigned int numFramesPadding = 0;
\r
4672 unsigned int convBufferSize = 0;
\r
4673 bool callbackPushed = false;
\r
4674 bool callbackPulled = false;
\r
4675 bool callbackStopped = false;
\r
4676 int callbackResult = 0;
\r
4678 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4679 char* convBuffer = NULL;
\r
4680 unsigned int convBuffSize = 0;
\r
4681 unsigned int deviceBuffSize = 0;
\r
4683 errorText_.clear();
\r
4684 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4686 // Attempt to assign "Pro Audio" characteristic to thread
\r
4687 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4689 DWORD taskIndex = 0;
\r
4690 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4691 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4692 FreeLibrary( AvrtDll );
\r
4695 // start capture stream if applicable
\r
4696 if ( captureAudioClient ) {
\r
4697 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4698 if ( FAILED( hr ) ) {
\r
4699 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4703 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4705 // initialize capture stream according to desire buffer size
\r
4706 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4707 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4709 if ( !captureClient ) {
\r
4710 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4711 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4712 desiredBufferPeriod,
\r
4713 desiredBufferPeriod,
\r
4716 if ( FAILED( hr ) ) {
\r
4717 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4721 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4722 ( void** ) &captureClient );
\r
4723 if ( FAILED( hr ) ) {
\r
4724 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4728 // configure captureEvent to trigger on every available capture buffer
\r
4729 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4730 if ( !captureEvent ) {
\r
4731 errorType = RtAudioError::SYSTEM_ERROR;
\r
4732 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4736 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4737 if ( FAILED( hr ) ) {
\r
4738 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4742 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4743 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4746 unsigned int inBufferSize = 0;
\r
4747 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4748 if ( FAILED( hr ) ) {
\r
4749 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4753 // scale outBufferSize according to stream->user sample rate ratio
\r
4754 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4755 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4757 // set captureBuffer size
\r
4758 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4760 // reset the capture stream
\r
4761 hr = captureAudioClient->Reset();
\r
4762 if ( FAILED( hr ) ) {
\r
4763 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4767 // start the capture stream
\r
4768 hr = captureAudioClient->Start();
\r
4769 if ( FAILED( hr ) ) {
\r
4770 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4775 // start render stream if applicable
\r
4776 if ( renderAudioClient ) {
\r
4777 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4778 if ( FAILED( hr ) ) {
\r
4779 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4783 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4785 // initialize render stream according to desire buffer size
\r
4786 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4787 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4789 if ( !renderClient ) {
\r
4790 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4791 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4792 desiredBufferPeriod,
\r
4793 desiredBufferPeriod,
\r
4796 if ( FAILED( hr ) ) {
\r
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4801 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4802 ( void** ) &renderClient );
\r
4803 if ( FAILED( hr ) ) {
\r
4804 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4808 // configure renderEvent to trigger on every available render buffer
\r
4809 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4810 if ( !renderEvent ) {
\r
4811 errorType = RtAudioError::SYSTEM_ERROR;
\r
4812 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4816 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4817 if ( FAILED( hr ) ) {
\r
4818 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4822 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4823 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4826 unsigned int outBufferSize = 0;
\r
4827 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4828 if ( FAILED( hr ) ) {
\r
4829 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4833 // scale inBufferSize according to user->stream sample rate ratio
\r
4834 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4835 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4837 // set renderBuffer size
\r
4838 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4840 // reset the render stream
\r
4841 hr = renderAudioClient->Reset();
\r
4842 if ( FAILED( hr ) ) {
\r
4843 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4847 // start the render stream
\r
4848 hr = renderAudioClient->Start();
\r
4849 if ( FAILED( hr ) ) {
\r
4850 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4855 if ( stream_.mode == INPUT ) {
\r
4856 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4857 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4859 else if ( stream_.mode == OUTPUT ) {
\r
4860 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4861 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4863 else if ( stream_.mode == DUPLEX ) {
\r
4864 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4865 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4866 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4867 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4870 convBuffer = ( char* ) malloc( convBuffSize );
\r
4871 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4872 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4873 errorType = RtAudioError::MEMORY_ERROR;
\r
4874 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4878 // stream process loop
\r
4879 while ( stream_.state != STREAM_STOPPING ) {
\r
4880 if ( !callbackPulled ) {
\r
4883 // 1. Pull callback buffer from inputBuffer
\r
4884 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4885 // Convert callback buffer to user format
\r
4887 if ( captureAudioClient ) {
\r
4888 // Pull callback buffer from inputBuffer
\r
4889 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4890 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4891 stream_.deviceFormat[INPUT] );
\r
4893 if ( callbackPulled ) {
\r
4894 // Convert callback buffer to user sample rate
\r
4895 convertBufferWasapi( stream_.deviceBuffer,
\r
4897 stream_.nDeviceChannels[INPUT],
\r
4898 captureFormat->nSamplesPerSec,
\r
4899 stream_.sampleRate,
\r
4900 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4902 stream_.deviceFormat[INPUT] );
\r
4904 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4905 // Convert callback buffer to user format
\r
4906 convertBuffer( stream_.userBuffer[INPUT],
\r
4907 stream_.deviceBuffer,
\r
4908 stream_.convertInfo[INPUT] );
\r
4911 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4912 memcpy( stream_.userBuffer[INPUT],
\r
4913 stream_.deviceBuffer,
\r
4914 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4919 // if there is no capture stream, set callbackPulled flag
\r
4920 callbackPulled = true;
\r
4923 // Execute Callback
\r
4924 // ================
\r
4925 // 1. Execute user callback method
\r
4926 // 2. Handle return value from callback
\r
4928 // if callback has not requested the stream to stop
\r
4929 if ( callbackPulled && !callbackStopped ) {
\r
4930 // Execute user callback method
\r
4931 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4932 stream_.userBuffer[INPUT],
\r
4933 stream_.bufferSize,
\r
4935 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4936 stream_.callbackInfo.userData );
\r
4938 // Handle return value from callback
\r
4939 if ( callbackResult == 1 ) {
\r
4940 // instantiate a thread to stop this thread
\r
4941 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4942 if ( !threadHandle ) {
\r
4943 errorType = RtAudioError::THREAD_ERROR;
\r
4944 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4947 else if ( !CloseHandle( threadHandle ) ) {
\r
4948 errorType = RtAudioError::THREAD_ERROR;
\r
4949 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4953 callbackStopped = true;
\r
4955 else if ( callbackResult == 2 ) {
\r
4956 // instantiate a thread to stop this thread
\r
4957 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4958 if ( !threadHandle ) {
\r
4959 errorType = RtAudioError::THREAD_ERROR;
\r
4960 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4963 else if ( !CloseHandle( threadHandle ) ) {
\r
4964 errorType = RtAudioError::THREAD_ERROR;
\r
4965 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4969 callbackStopped = true;
\r
4974 // Callback Output
\r
4975 // ===============
\r
4976 // 1. Convert callback buffer to stream format
\r
4977 // 2. Convert callback buffer to stream sample rate and channel count
\r
4978 // 3. Push callback buffer into outputBuffer
\r
4980 if ( renderAudioClient && callbackPulled ) {
\r
4981 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4982 // Convert callback buffer to stream format
\r
4983 convertBuffer( stream_.deviceBuffer,
\r
4984 stream_.userBuffer[OUTPUT],
\r
4985 stream_.convertInfo[OUTPUT] );
\r
4989 // Convert callback buffer to stream sample rate
\r
4990 convertBufferWasapi( convBuffer,
\r
4991 stream_.deviceBuffer,
\r
4992 stream_.nDeviceChannels[OUTPUT],
\r
4993 stream_.sampleRate,
\r
4994 renderFormat->nSamplesPerSec,
\r
4995 stream_.bufferSize,
\r
4997 stream_.deviceFormat[OUTPUT] );
\r
4999 // Push callback buffer into outputBuffer
\r
5000 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5001 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5002 stream_.deviceFormat[OUTPUT] );
\r
5005 // if there is no render stream, set callbackPushed flag
\r
5006 callbackPushed = true;
\r
5011 // 1. Get capture buffer from stream
\r
5012 // 2. Push capture buffer into inputBuffer
\r
5013 // 3. If 2. was successful: Release capture buffer
\r
5015 if ( captureAudioClient ) {
\r
5016 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5017 if ( !callbackPulled ) {
\r
5018 WaitForSingleObject( captureEvent, INFINITE );
\r
5021 // Get capture buffer from stream
\r
5022 hr = captureClient->GetBuffer( &streamBuffer,
\r
5023 &bufferFrameCount,
\r
5024 &captureFlags, NULL, NULL );
\r
5025 if ( FAILED( hr ) ) {
\r
5026 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5030 if ( bufferFrameCount != 0 ) {
\r
5031 // Push capture buffer into inputBuffer
\r
5032 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5033 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5034 stream_.deviceFormat[INPUT] ) )
\r
5036 // Release capture buffer
\r
5037 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5038 if ( FAILED( hr ) ) {
\r
5039 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5045 // Inform WASAPI that capture was unsuccessful
\r
5046 hr = captureClient->ReleaseBuffer( 0 );
\r
5047 if ( FAILED( hr ) ) {
\r
5048 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5055 // Inform WASAPI that capture was unsuccessful
\r
5056 hr = captureClient->ReleaseBuffer( 0 );
\r
5057 if ( FAILED( hr ) ) {
\r
5058 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5066 // 1. Get render buffer from stream
\r
5067 // 2. Pull next buffer from outputBuffer
\r
5068 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5069 // Release render buffer
\r
5071 if ( renderAudioClient ) {
\r
5072 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5073 if ( callbackPulled && !callbackPushed ) {
\r
5074 WaitForSingleObject( renderEvent, INFINITE );
\r
5077 // Get render buffer from stream
\r
5078 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5079 if ( FAILED( hr ) ) {
\r
5080 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5084 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5085 if ( FAILED( hr ) ) {
\r
5086 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5090 bufferFrameCount -= numFramesPadding;
\r
5092 if ( bufferFrameCount != 0 ) {
\r
5093 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5094 if ( FAILED( hr ) ) {
\r
5095 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5099 // Pull next buffer from outputBuffer
\r
5100 // Fill render buffer with next buffer
\r
5101 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5102 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5103 stream_.deviceFormat[OUTPUT] ) )
\r
5105 // Release render buffer
\r
5106 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5107 if ( FAILED( hr ) ) {
\r
5108 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5114 // Inform WASAPI that render was unsuccessful
\r
5115 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5116 if ( FAILED( hr ) ) {
\r
5117 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5124 // Inform WASAPI that render was unsuccessful
\r
5125 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5126 if ( FAILED( hr ) ) {
\r
5127 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5133 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5134 if ( callbackPushed ) {
\r
5135 callbackPulled = false;
\r
5138 // tick stream time
\r
5139 RtApi::tickStreamTime();
\r
5144 CoTaskMemFree( captureFormat );
\r
5145 CoTaskMemFree( renderFormat );
\r
5147 free ( convBuffer );
\r
5151 // update stream state
\r
5152 stream_.state = STREAM_STOPPED;
\r
5154 if ( errorText_.empty() )
\r
5157 error( errorType );
\r
5160 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5164 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5166 // Modified by Robin Davies, October 2005
\r
5167 // - Improvements to DirectX pointer chasing.
\r
5168 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5169 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5170 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5171 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5173 #include <dsound.h>
\r
5174 #include <assert.h>
\r
5175 #include <algorithm>
\r
5177 #if defined(__MINGW32__)
\r
5178 // missing from latest mingw winapi
\r
5179 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5180 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5181 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5182 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5185 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5187 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5188 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5191 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5193 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5194 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5195 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5196 return pointer >= earlierPointer && pointer < laterPointer;
\r
5199 // A structure to hold various information related to the DirectSound
\r
5200 // API implementation.
\r
5202 unsigned int drainCounter; // Tracks callback counts when draining
\r
5203 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5207 UINT bufferPointer[2];
\r
5208 DWORD dsBufferSize[2];
\r
5209 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5213 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5216 // Declarations for utility functions, callbacks, and structures
\r
5217 // specific to the DirectSound implementation.
\r
5218 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5219 LPCTSTR description,
\r
5221 LPVOID lpContext );
\r
5223 static const char* getErrorString( int code );
\r
5225 static unsigned __stdcall callbackHandler( void *ptr );
\r
5234 : found(false) { validId[0] = false; validId[1] = false; }
\r
5237 struct DsProbeData {
\r
5239 std::vector<struct DsDevice>* dsDevices;
\r
5242 RtApiDs :: RtApiDs()
\r
5244 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5245 // accept whatever the mainline chose for a threading model.
\r
5246 coInitialized_ = false;
\r
5247 HRESULT hr = CoInitialize( NULL );
\r
5248 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5251 RtApiDs :: ~RtApiDs()
\r
5253 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5254 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5257 // The DirectSound default output is always the first device.
\r
5258 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5263 // The DirectSound default input is always the first input device,
\r
5264 // which is the first capture device enumerated.
\r
5265 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5270 unsigned int RtApiDs :: getDeviceCount( void )
\r
5272 // Set query flag for previously found devices to false, so that we
\r
5273 // can check for any devices that have disappeared.
\r
5274 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5275 dsDevices[i].found = false;
\r
5277 // Query DirectSound devices.
\r
5278 struct DsProbeData probeInfo;
\r
5279 probeInfo.isInput = false;
\r
5280 probeInfo.dsDevices = &dsDevices;
\r
5281 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5282 if ( FAILED( result ) ) {
\r
5283 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5284 errorText_ = errorStream_.str();
\r
5285 error( RtAudioError::WARNING );
\r
5288 // Query DirectSoundCapture devices.
\r
5289 probeInfo.isInput = true;
\r
5290 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5291 if ( FAILED( result ) ) {
\r
5292 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5293 errorText_ = errorStream_.str();
\r
5294 error( RtAudioError::WARNING );
\r
5297 // Clean out any devices that may have disappeared.
\r
5298 std::vector< int > indices;
\r
5299 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5300 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5301 //unsigned int nErased = 0;
\r
5302 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5303 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5304 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5306 return static_cast<unsigned int>(dsDevices.size());
\r
5309 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5311 RtAudio::DeviceInfo info;
\r
5312 info.probed = false;
\r
5314 if ( dsDevices.size() == 0 ) {
\r
5315 // Force a query of all devices
\r
5317 if ( dsDevices.size() == 0 ) {
\r
5318 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5319 error( RtAudioError::INVALID_USE );
\r
5324 if ( device >= dsDevices.size() ) {
\r
5325 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5326 error( RtAudioError::INVALID_USE );
\r
5331 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5333 LPDIRECTSOUND output;
\r
5335 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5336 if ( FAILED( result ) ) {
\r
5337 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5338 errorText_ = errorStream_.str();
\r
5339 error( RtAudioError::WARNING );
\r
5343 outCaps.dwSize = sizeof( outCaps );
\r
5344 result = output->GetCaps( &outCaps );
\r
5345 if ( FAILED( result ) ) {
\r
5346 output->Release();
\r
5347 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5348 errorText_ = errorStream_.str();
\r
5349 error( RtAudioError::WARNING );
\r
5353 // Get output channel information.
\r
5354 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5356 // Get sample rate information.
\r
5357 info.sampleRates.clear();
\r
5358 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5359 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5360 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5361 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5363 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5364 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5368 // Get format information.
\r
5369 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5370 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5372 output->Release();
\r
5374 if ( getDefaultOutputDevice() == device )
\r
5375 info.isDefaultOutput = true;
\r
5377 if ( dsDevices[ device ].validId[1] == false ) {
\r
5378 info.name = dsDevices[ device ].name;
\r
5379 info.probed = true;
\r
5385 LPDIRECTSOUNDCAPTURE input;
\r
5386 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5387 if ( FAILED( result ) ) {
\r
5388 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5389 errorText_ = errorStream_.str();
\r
5390 error( RtAudioError::WARNING );
\r
5395 inCaps.dwSize = sizeof( inCaps );
\r
5396 result = input->GetCaps( &inCaps );
\r
5397 if ( FAILED( result ) ) {
\r
5399 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5400 errorText_ = errorStream_.str();
\r
5401 error( RtAudioError::WARNING );
\r
5405 // Get input channel information.
\r
5406 info.inputChannels = inCaps.dwChannels;
\r
5408 // Get sample rate and format information.
\r
5409 std::vector<unsigned int> rates;
\r
5410 if ( inCaps.dwChannels >= 2 ) {
\r
5411 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5412 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5413 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5414 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5415 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5416 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5417 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5418 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5420 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5421 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5422 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5423 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5424 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5426 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5427 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5428 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5429 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5430 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5433 else if ( inCaps.dwChannels == 1 ) {
\r
5434 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5441 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5443 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5444 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5446 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5447 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5449 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5453 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5456 else info.inputChannels = 0; // technically, this would be an error
\r
5460 if ( info.inputChannels == 0 ) return info;
\r
5462 // Copy the supported rates to the info structure but avoid duplication.
\r
5464 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5466 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5467 if ( rates[i] == info.sampleRates[j] ) {
\r
5472 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5474 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5476 // If device opens for both playback and capture, we determine the channels.
\r
5477 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5478 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5480 if ( device == 0 ) info.isDefaultInput = true;
\r
5482 // Copy name and return.
\r
5483 info.name = dsDevices[ device ].name;
\r
5484 info.probed = true;
\r
5488 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5489 unsigned int firstChannel, unsigned int sampleRate,
\r
5490 RtAudioFormat format, unsigned int *bufferSize,
\r
5491 RtAudio::StreamOptions *options )
\r
5493 if ( channels + firstChannel > 2 ) {
\r
5494 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5498 size_t nDevices = dsDevices.size();
\r
5499 if ( nDevices == 0 ) {
\r
5500 // This should not happen because a check is made before this function is called.
\r
5501 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5505 if ( device >= nDevices ) {
\r
5506 // This should not happen because a check is made before this function is called.
\r
5507 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5511 if ( mode == OUTPUT ) {
\r
5512 if ( dsDevices[ device ].validId[0] == false ) {
\r
5513 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5514 errorText_ = errorStream_.str();
\r
5518 else { // mode == INPUT
\r
5519 if ( dsDevices[ device ].validId[1] == false ) {
\r
5520 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5521 errorText_ = errorStream_.str();
\r
5526 // According to a note in PortAudio, using GetDesktopWindow()
\r
5527 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5528 // that occur when the application's window is not the foreground
\r
5529 // window. Also, if the application window closes before the
\r
5530 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5531 // problems when using GetDesktopWindow() but it seems fine now
\r
5532 // (January 2010). I'll leave it commented here.
\r
5533 // HWND hWnd = GetForegroundWindow();
\r
5534 HWND hWnd = GetDesktopWindow();
\r
5536 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5537 // two. This is a judgement call and a value of two is probably too
\r
5538 // low for capture, but it should work for playback.
\r
5540 if ( options ) nBuffers = options->numberOfBuffers;
\r
5541 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5542 if ( nBuffers < 2 ) nBuffers = 3;
\r
5544 // Check the lower range of the user-specified buffer size and set
\r
5545 // (arbitrarily) to a lower bound of 32.
\r
5546 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5548 // Create the wave format structure. The data format setting will
\r
5549 // be determined later.
\r
5550 WAVEFORMATEX waveFormat;
\r
5551 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5552 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5553 waveFormat.nChannels = channels + firstChannel;
\r
5554 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5556 // Determine the device buffer size. By default, we'll use the value
\r
5557 // defined above (32K), but we will grow it to make allowances for
\r
5558 // very large software buffer sizes.
\r
5559 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5560 DWORD dsPointerLeadTime = 0;
\r
5562 void *ohandle = 0, *bhandle = 0;
\r
5564 if ( mode == OUTPUT ) {
\r
5566 LPDIRECTSOUND output;
\r
5567 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5568 if ( FAILED( result ) ) {
\r
5569 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5570 errorText_ = errorStream_.str();
\r
5575 outCaps.dwSize = sizeof( outCaps );
\r
5576 result = output->GetCaps( &outCaps );
\r
5577 if ( FAILED( result ) ) {
\r
5578 output->Release();
\r
5579 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5580 errorText_ = errorStream_.str();
\r
5584 // Check channel information.
\r
5585 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5586 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5587 errorText_ = errorStream_.str();
\r
5591 // Check format information. Use 16-bit format unless not
\r
5592 // supported or user requests 8-bit.
\r
5593 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5594 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5595 waveFormat.wBitsPerSample = 16;
\r
5596 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5599 waveFormat.wBitsPerSample = 8;
\r
5600 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5602 stream_.userFormat = format;
\r
5604 // Update wave format structure and buffer information.
\r
5605 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5606 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5607 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5609 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5610 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5611 dsBufferSize *= 2;
\r
5613 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5614 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5615 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5616 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5617 if ( FAILED( result ) ) {
\r
5618 output->Release();
\r
5619 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5620 errorText_ = errorStream_.str();
\r
5624 // Even though we will write to the secondary buffer, we need to
\r
5625 // access the primary buffer to set the correct output format
\r
5626 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5627 // buffer description.
\r
5628 DSBUFFERDESC bufferDescription;
\r
5629 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5630 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5631 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5633 // Obtain the primary buffer
\r
5634 LPDIRECTSOUNDBUFFER buffer;
\r
5635 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5636 if ( FAILED( result ) ) {
\r
5637 output->Release();
\r
5638 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5639 errorText_ = errorStream_.str();
\r
5643 // Set the primary DS buffer sound format.
\r
5644 result = buffer->SetFormat( &waveFormat );
\r
5645 if ( FAILED( result ) ) {
\r
5646 output->Release();
\r
5647 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5648 errorText_ = errorStream_.str();
\r
5652 // Setup the secondary DS buffer description.
\r
5653 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5654 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5655 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5656 DSBCAPS_GLOBALFOCUS |
\r
5657 DSBCAPS_GETCURRENTPOSITION2 |
\r
5658 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5659 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5660 bufferDescription.lpwfxFormat = &waveFormat;
\r
5662 // Try to create the secondary DS buffer. If that doesn't work,
\r
5663 // try to use software mixing. Otherwise, there's a problem.
\r
5664 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5665 if ( FAILED( result ) ) {
\r
5666 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5667 DSBCAPS_GLOBALFOCUS |
\r
5668 DSBCAPS_GETCURRENTPOSITION2 |
\r
5669 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5670 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5671 if ( FAILED( result ) ) {
\r
5672 output->Release();
\r
5673 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5674 errorText_ = errorStream_.str();
\r
5679 // Get the buffer size ... might be different from what we specified.
\r
5681 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5682 result = buffer->GetCaps( &dsbcaps );
\r
5683 if ( FAILED( result ) ) {
\r
5684 output->Release();
\r
5685 buffer->Release();
\r
5686 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5687 errorText_ = errorStream_.str();
\r
5691 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5693 // Lock the DS buffer
\r
5696 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5697 if ( FAILED( result ) ) {
\r
5698 output->Release();
\r
5699 buffer->Release();
\r
5700 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5701 errorText_ = errorStream_.str();
\r
5705 // Zero the DS buffer
\r
5706 ZeroMemory( audioPtr, dataLen );
\r
5708 // Unlock the DS buffer
\r
5709 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5710 if ( FAILED( result ) ) {
\r
5711 output->Release();
\r
5712 buffer->Release();
\r
5713 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5714 errorText_ = errorStream_.str();
\r
5718 ohandle = (void *) output;
\r
5719 bhandle = (void *) buffer;
\r
5722 if ( mode == INPUT ) {
\r
5724 LPDIRECTSOUNDCAPTURE input;
\r
5725 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5726 if ( FAILED( result ) ) {
\r
5727 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5728 errorText_ = errorStream_.str();
\r
5733 inCaps.dwSize = sizeof( inCaps );
\r
5734 result = input->GetCaps( &inCaps );
\r
5735 if ( FAILED( result ) ) {
\r
5737 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5738 errorText_ = errorStream_.str();
\r
5742 // Check channel information.
\r
5743 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5744 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5748 // Check format information. Use 16-bit format unless user
\r
5749 // requests 8-bit.
\r
5750 DWORD deviceFormats;
\r
5751 if ( channels + firstChannel == 2 ) {
\r
5752 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5753 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5754 waveFormat.wBitsPerSample = 8;
\r
5755 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5757 else { // assume 16-bit is supported
\r
5758 waveFormat.wBitsPerSample = 16;
\r
5759 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5762 else { // channel == 1
\r
5763 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5764 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5765 waveFormat.wBitsPerSample = 8;
\r
5766 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5768 else { // assume 16-bit is supported
\r
5769 waveFormat.wBitsPerSample = 16;
\r
5770 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5773 stream_.userFormat = format;
\r
5775 // Update wave format structure and buffer information.
\r
5776 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5777 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5778 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5780 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5781 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5782 dsBufferSize *= 2;
\r
5784 // Setup the secondary DS buffer description.
\r
5785 DSCBUFFERDESC bufferDescription;
\r
5786 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5787 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5788 bufferDescription.dwFlags = 0;
\r
5789 bufferDescription.dwReserved = 0;
\r
5790 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5791 bufferDescription.lpwfxFormat = &waveFormat;
\r
5793 // Create the capture buffer.
\r
5794 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5795 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5796 if ( FAILED( result ) ) {
\r
5798 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5799 errorText_ = errorStream_.str();
\r
5803 // Get the buffer size ... might be different from what we specified.
\r
5804 DSCBCAPS dscbcaps;
\r
5805 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5806 result = buffer->GetCaps( &dscbcaps );
\r
5807 if ( FAILED( result ) ) {
\r
5809 buffer->Release();
\r
5810 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5811 errorText_ = errorStream_.str();
\r
5815 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5817 // NOTE: We could have a problem here if this is a duplex stream
\r
5818 // and the play and capture hardware buffer sizes are different
\r
5819 // (I'm actually not sure if that is a problem or not).
\r
5820 // Currently, we are not verifying that.
\r
5822 // Lock the capture buffer
\r
5825 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5826 if ( FAILED( result ) ) {
\r
5828 buffer->Release();
\r
5829 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5830 errorText_ = errorStream_.str();
\r
5834 // Zero the buffer
\r
5835 ZeroMemory( audioPtr, dataLen );
\r
5837 // Unlock the buffer
\r
5838 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5839 if ( FAILED( result ) ) {
\r
5841 buffer->Release();
\r
5842 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5843 errorText_ = errorStream_.str();
\r
5847 ohandle = (void *) input;
\r
5848 bhandle = (void *) buffer;
\r
5851 // Set various stream parameters
\r
5852 DsHandle *handle = 0;
\r
5853 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5854 stream_.nUserChannels[mode] = channels;
\r
5855 stream_.bufferSize = *bufferSize;
\r
5856 stream_.channelOffset[mode] = firstChannel;
\r
5857 stream_.deviceInterleaved[mode] = true;
\r
5858 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5859 else stream_.userInterleaved = true;
\r
5861 // Set flag for buffer conversion
\r
5862 stream_.doConvertBuffer[mode] = false;
\r
5863 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5864 stream_.doConvertBuffer[mode] = true;
\r
5865 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5866 stream_.doConvertBuffer[mode] = true;
\r
5867 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5868 stream_.nUserChannels[mode] > 1 )
\r
5869 stream_.doConvertBuffer[mode] = true;
\r
5871 // Allocate necessary internal buffers
\r
5872 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5873 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5874 if ( stream_.userBuffer[mode] == NULL ) {
\r
5875 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5879 if ( stream_.doConvertBuffer[mode] ) {
\r
5881 bool makeBuffer = true;
\r
5882 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5883 if ( mode == INPUT ) {
\r
5884 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5885 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5886 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5890 if ( makeBuffer ) {
\r
5891 bufferBytes *= *bufferSize;
\r
5892 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5893 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5894 if ( stream_.deviceBuffer == NULL ) {
\r
5895 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5901 // Allocate our DsHandle structures for the stream.
\r
5902 if ( stream_.apiHandle == 0 ) {
\r
5904 handle = new DsHandle;
\r
5906 catch ( std::bad_alloc& ) {
\r
5907 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5911 // Create a manual-reset event.
\r
5912 handle->condition = CreateEvent( NULL, // no security
\r
5913 TRUE, // manual-reset
\r
5914 FALSE, // non-signaled initially
\r
5915 NULL ); // unnamed
\r
5916 stream_.apiHandle = (void *) handle;
\r
5919 handle = (DsHandle *) stream_.apiHandle;
\r
5920 handle->id[mode] = ohandle;
\r
5921 handle->buffer[mode] = bhandle;
\r
5922 handle->dsBufferSize[mode] = dsBufferSize;
\r
5923 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5925 stream_.device[mode] = device;
\r
5926 stream_.state = STREAM_STOPPED;
\r
5927 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5928 // We had already set up an output stream.
\r
5929 stream_.mode = DUPLEX;
\r
5931 stream_.mode = mode;
\r
5932 stream_.nBuffers = nBuffers;
\r
5933 stream_.sampleRate = sampleRate;
\r
5935 // Setup the buffer conversion information structure.
\r
5936 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5938 // Setup the callback thread.
\r
5939 if ( stream_.callbackInfo.isRunning == false ) {
\r
5940 unsigned threadId;
\r
5941 stream_.callbackInfo.isRunning = true;
\r
5942 stream_.callbackInfo.object = (void *) this;
\r
5943 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5944 &stream_.callbackInfo, 0, &threadId );
\r
5945 if ( stream_.callbackInfo.thread == 0 ) {
\r
5946 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5950 // Boost DS thread priority
\r
5951 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5957 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5958 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5959 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5960 if ( buffer ) buffer->Release();
\r
5961 object->Release();
\r
5963 if ( handle->buffer[1] ) {
\r
5964 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5965 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5966 if ( buffer ) buffer->Release();
\r
5967 object->Release();
\r
5969 CloseHandle( handle->condition );
\r
5971 stream_.apiHandle = 0;
\r
5974 for ( int i=0; i<2; i++ ) {
\r
5975 if ( stream_.userBuffer[i] ) {
\r
5976 free( stream_.userBuffer[i] );
\r
5977 stream_.userBuffer[i] = 0;
\r
5981 if ( stream_.deviceBuffer ) {
\r
5982 free( stream_.deviceBuffer );
\r
5983 stream_.deviceBuffer = 0;
\r
5986 stream_.state = STREAM_CLOSED;
\r
5990 void RtApiDs :: closeStream()
\r
5992 if ( stream_.state == STREAM_CLOSED ) {
\r
5993 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5994 error( RtAudioError::WARNING );
\r
5998 // Stop the callback thread.
\r
5999 stream_.callbackInfo.isRunning = false;
\r
6000 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6001 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6003 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6005 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6006 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6007 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6010 buffer->Release();
\r
6012 object->Release();
\r
6014 if ( handle->buffer[1] ) {
\r
6015 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6016 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6019 buffer->Release();
\r
6021 object->Release();
\r
6023 CloseHandle( handle->condition );
\r
6025 stream_.apiHandle = 0;
\r
6028 for ( int i=0; i<2; i++ ) {
\r
6029 if ( stream_.userBuffer[i] ) {
\r
6030 free( stream_.userBuffer[i] );
\r
6031 stream_.userBuffer[i] = 0;
\r
6035 if ( stream_.deviceBuffer ) {
\r
6036 free( stream_.deviceBuffer );
\r
6037 stream_.deviceBuffer = 0;
\r
6040 stream_.mode = UNINITIALIZED;
\r
6041 stream_.state = STREAM_CLOSED;
\r
6044 void RtApiDs :: startStream()
\r
6047 if ( stream_.state == STREAM_RUNNING ) {
\r
6048 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6049 error( RtAudioError::WARNING );
\r
6053 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6055 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6056 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6057 // this is already in effect.
\r
6058 timeBeginPeriod( 1 );
\r
6060 buffersRolling = false;
\r
6061 duplexPrerollBytes = 0;
\r
6063 if ( stream_.mode == DUPLEX ) {
\r
6064 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6065 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6068 HRESULT result = 0;
\r
6069 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6071 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6072 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6073 if ( FAILED( result ) ) {
\r
6074 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6075 errorText_ = errorStream_.str();
\r
6080 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6082 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6083 result = buffer->Start( DSCBSTART_LOOPING );
\r
6084 if ( FAILED( result ) ) {
\r
6085 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6086 errorText_ = errorStream_.str();
\r
6091 handle->drainCounter = 0;
\r
6092 handle->internalDrain = false;
\r
6093 ResetEvent( handle->condition );
\r
6094 stream_.state = STREAM_RUNNING;
\r
6097 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6100 void RtApiDs :: stopStream()
\r
6103 if ( stream_.state == STREAM_STOPPED ) {
\r
6104 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6105 error( RtAudioError::WARNING );
\r
6109 HRESULT result = 0;
\r
6112 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6113 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6114 if ( handle->drainCounter == 0 ) {
\r
6115 handle->drainCounter = 2;
\r
6116 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6119 stream_.state = STREAM_STOPPED;
\r
6121 MUTEX_LOCK( &stream_.mutex );
\r
6123 // Stop the buffer and clear memory
\r
6124 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6125 result = buffer->Stop();
\r
6126 if ( FAILED( result ) ) {
\r
6127 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6128 errorText_ = errorStream_.str();
\r
6132 // Lock the buffer and clear it so that if we start to play again,
\r
6133 // we won't have old data playing.
\r
6134 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6135 if ( FAILED( result ) ) {
\r
6136 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6137 errorText_ = errorStream_.str();
\r
6141 // Zero the DS buffer
\r
6142 ZeroMemory( audioPtr, dataLen );
\r
6144 // Unlock the DS buffer
\r
6145 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6146 if ( FAILED( result ) ) {
\r
6147 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6148 errorText_ = errorStream_.str();
\r
6152 // If we start playing again, we must begin at beginning of buffer.
\r
6153 handle->bufferPointer[0] = 0;
\r
6156 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6157 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6161 stream_.state = STREAM_STOPPED;
\r
6163 if ( stream_.mode != DUPLEX )
\r
6164 MUTEX_LOCK( &stream_.mutex );
\r
6166 result = buffer->Stop();
\r
6167 if ( FAILED( result ) ) {
\r
6168 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6169 errorText_ = errorStream_.str();
\r
6173 // Lock the buffer and clear it so that if we start to play again,
\r
6174 // we won't have old data playing.
\r
6175 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6176 if ( FAILED( result ) ) {
\r
6177 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6178 errorText_ = errorStream_.str();
\r
6182 // Zero the DS buffer
\r
6183 ZeroMemory( audioPtr, dataLen );
\r
6185 // Unlock the DS buffer
\r
6186 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6187 if ( FAILED( result ) ) {
\r
6188 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6189 errorText_ = errorStream_.str();
\r
6193 // If we start recording again, we must begin at beginning of buffer.
\r
6194 handle->bufferPointer[1] = 0;
\r
6198 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6199 MUTEX_UNLOCK( &stream_.mutex );
\r
6201 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6204 void RtApiDs :: abortStream()
\r
6207 if ( stream_.state == STREAM_STOPPED ) {
\r
6208 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6209 error( RtAudioError::WARNING );
\r
6213 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6214 handle->drainCounter = 2;
\r
6219 void RtApiDs :: callbackEvent()
\r
6221 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6222 Sleep( 50 ); // sleep 50 milliseconds
\r
6226 if ( stream_.state == STREAM_CLOSED ) {
\r
6227 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6228 error( RtAudioError::WARNING );
\r
6232 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6233 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6235 // Check if we were draining the stream and signal is finished.
\r
6236 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6238 stream_.state = STREAM_STOPPING;
\r
6239 if ( handle->internalDrain == false )
\r
6240 SetEvent( handle->condition );
\r
6246 // Invoke user callback to get fresh output data UNLESS we are
\r
6247 // draining stream.
\r
6248 if ( handle->drainCounter == 0 ) {
\r
6249 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6250 double streamTime = getStreamTime();
\r
6251 RtAudioStreamStatus status = 0;
\r
6252 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6253 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6254 handle->xrun[0] = false;
\r
6256 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6257 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6258 handle->xrun[1] = false;
\r
6260 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6261 stream_.bufferSize, streamTime, status, info->userData );
\r
6262 if ( cbReturnValue == 2 ) {
\r
6263 stream_.state = STREAM_STOPPING;
\r
6264 handle->drainCounter = 2;
\r
6268 else if ( cbReturnValue == 1 ) {
\r
6269 handle->drainCounter = 1;
\r
6270 handle->internalDrain = true;
\r
6275 DWORD currentWritePointer, safeWritePointer;
\r
6276 DWORD currentReadPointer, safeReadPointer;
\r
6277 UINT nextWritePointer;
\r
6279 LPVOID buffer1 = NULL;
\r
6280 LPVOID buffer2 = NULL;
\r
6281 DWORD bufferSize1 = 0;
\r
6282 DWORD bufferSize2 = 0;
\r
6287 MUTEX_LOCK( &stream_.mutex );
\r
6288 if ( stream_.state == STREAM_STOPPED ) {
\r
6289 MUTEX_UNLOCK( &stream_.mutex );
\r
6293 if ( buffersRolling == false ) {
\r
6294 if ( stream_.mode == DUPLEX ) {
\r
6295 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6297 // It takes a while for the devices to get rolling. As a result,
\r
6298 // there's no guarantee that the capture and write device pointers
\r
6299 // will move in lockstep. Wait here for both devices to start
\r
6300 // rolling, and then set our buffer pointers accordingly.
\r
6301 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6302 // bytes later than the write buffer.
\r
6304 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6305 // take place between the two GetCurrentPosition calls... but I'm
\r
6306 // really not sure how to solve the problem. Temporarily boost to
\r
6307 // Realtime priority, maybe; but I'm not sure what priority the
\r
6308 // DirectSound service threads run at. We *should* be roughly
\r
6309 // within a ms or so of correct.
\r
6311 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6312 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6314 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6316 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6317 if ( FAILED( result ) ) {
\r
6318 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6319 errorText_ = errorStream_.str();
\r
6320 error( RtAudioError::SYSTEM_ERROR );
\r
6323 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6324 if ( FAILED( result ) ) {
\r
6325 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6326 errorText_ = errorStream_.str();
\r
6327 error( RtAudioError::SYSTEM_ERROR );
\r
6331 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6332 if ( FAILED( result ) ) {
\r
6333 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6334 errorText_ = errorStream_.str();
\r
6335 error( RtAudioError::SYSTEM_ERROR );
\r
6338 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6339 if ( FAILED( result ) ) {
\r
6340 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6341 errorText_ = errorStream_.str();
\r
6342 error( RtAudioError::SYSTEM_ERROR );
\r
6345 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6349 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6351 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6352 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6353 handle->bufferPointer[1] = safeReadPointer;
\r
6355 else if ( stream_.mode == OUTPUT ) {
\r
6357 // Set the proper nextWritePosition after initial startup.
\r
6358 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6359 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6360 if ( FAILED( result ) ) {
\r
6361 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6362 errorText_ = errorStream_.str();
\r
6363 error( RtAudioError::SYSTEM_ERROR );
\r
6366 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6367 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6370 buffersRolling = true;
\r
6373 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6375 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6377 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6378 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6379 bufferBytes *= formatBytes( stream_.userFormat );
\r
6380 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6383 // Setup parameters and do buffer conversion if necessary.
\r
6384 if ( stream_.doConvertBuffer[0] ) {
\r
6385 buffer = stream_.deviceBuffer;
\r
6386 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6387 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6388 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6391 buffer = stream_.userBuffer[0];
\r
6392 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6393 bufferBytes *= formatBytes( stream_.userFormat );
\r
6396 // No byte swapping necessary in DirectSound implementation.
\r
6398 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6399 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6401 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6402 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6404 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6405 nextWritePointer = handle->bufferPointer[0];
\r
6407 DWORD endWrite, leadPointer;
\r
6409 // Find out where the read and "safe write" pointers are.
\r
6410 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6411 if ( FAILED( result ) ) {
\r
6412 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6413 errorText_ = errorStream_.str();
\r
6414 error( RtAudioError::SYSTEM_ERROR );
\r
6418 // We will copy our output buffer into the region between
\r
6419 // safeWritePointer and leadPointer. If leadPointer is not
\r
6420 // beyond the next endWrite position, wait until it is.
\r
6421 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6422 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6423 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6424 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6425 endWrite = nextWritePointer + bufferBytes;
\r
6427 // Check whether the entire write region is behind the play pointer.
\r
6428 if ( leadPointer >= endWrite ) break;
\r
6430 // If we are here, then we must wait until the leadPointer advances
\r
6431 // beyond the end of our next write region. We use the
\r
6432 // Sleep() function to suspend operation until that happens.
\r
6433 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6434 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6435 if ( millis < 1.0 ) millis = 1.0;
\r
6436 Sleep( (DWORD) millis );
\r
6439 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6440 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6441 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6442 handle->xrun[0] = true;
\r
6443 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6444 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6445 handle->bufferPointer[0] = nextWritePointer;
\r
6446 endWrite = nextWritePointer + bufferBytes;
\r
6449 // Lock free space in the buffer
\r
6450 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6451 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6452 if ( FAILED( result ) ) {
\r
6453 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6454 errorText_ = errorStream_.str();
\r
6455 error( RtAudioError::SYSTEM_ERROR );
\r
6459 // Copy our buffer into the DS buffer
\r
6460 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6461 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6463 // Update our buffer offset and unlock sound buffer
\r
6464 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6465 if ( FAILED( result ) ) {
\r
6466 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6467 errorText_ = errorStream_.str();
\r
6468 error( RtAudioError::SYSTEM_ERROR );
\r
6471 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6472 handle->bufferPointer[0] = nextWritePointer;
\r
6475 // Don't bother draining input
\r
6476 if ( handle->drainCounter ) {
\r
6477 handle->drainCounter++;
\r
6481 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6483 // Setup parameters.
\r
6484 if ( stream_.doConvertBuffer[1] ) {
\r
6485 buffer = stream_.deviceBuffer;
\r
6486 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6487 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6490 buffer = stream_.userBuffer[1];
\r
6491 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6492 bufferBytes *= formatBytes( stream_.userFormat );
\r
6495 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6496 long nextReadPointer = handle->bufferPointer[1];
\r
6497 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6499 // Find out where the write and "safe read" pointers are.
\r
6500 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6501 if ( FAILED( result ) ) {
\r
6502 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6503 errorText_ = errorStream_.str();
\r
6504 error( RtAudioError::SYSTEM_ERROR );
\r
6508 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6509 DWORD endRead = nextReadPointer + bufferBytes;
\r
6511 // Handling depends on whether we are INPUT or DUPLEX.
\r
6512 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6513 // then a wait here will drag the write pointers into the forbidden zone.
\r
6515 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6516 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6517 // practical way to sync up the read and write pointers reliably, given the
\r
6518 // the very complex relationship between phase and increment of the read and write
\r
6521 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6522 // provide a pre-roll period of 0.5 seconds in which we return
\r
6523 // zeros from the read buffer while the pointers sync up.
\r
6525 if ( stream_.mode == DUPLEX ) {
\r
6526 if ( safeReadPointer < endRead ) {
\r
6527 if ( duplexPrerollBytes <= 0 ) {
\r
6528 // Pre-roll time over. Be more agressive.
\r
6529 int adjustment = endRead-safeReadPointer;
\r
6531 handle->xrun[1] = true;
\r
6533 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6534 // and perform fine adjustments later.
\r
6535 // - small adjustments: back off by twice as much.
\r
6536 if ( adjustment >= 2*bufferBytes )
\r
6537 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6539 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6541 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6545 // In pre=roll time. Just do it.
\r
6546 nextReadPointer = safeReadPointer - bufferBytes;
\r
6547 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6549 endRead = nextReadPointer + bufferBytes;
\r
6552 else { // mode == INPUT
\r
6553 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6554 // See comments for playback.
\r
6555 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6556 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6557 if ( millis < 1.0 ) millis = 1.0;
\r
6558 Sleep( (DWORD) millis );
\r
6560 // Wake up and find out where we are now.
\r
6561 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6562 if ( FAILED( result ) ) {
\r
6563 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6564 errorText_ = errorStream_.str();
\r
6565 error( RtAudioError::SYSTEM_ERROR );
\r
6569 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6573 // Lock free space in the buffer
\r
6574 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6575 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6576 if ( FAILED( result ) ) {
\r
6577 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6578 errorText_ = errorStream_.str();
\r
6579 error( RtAudioError::SYSTEM_ERROR );
\r
6583 if ( duplexPrerollBytes <= 0 ) {
\r
6584 // Copy our buffer into the DS buffer
\r
6585 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6586 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6589 memset( buffer, 0, bufferSize1 );
\r
6590 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6591 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6594 // Update our buffer offset and unlock sound buffer
\r
6595 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6596 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6597 if ( FAILED( result ) ) {
\r
6598 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6599 errorText_ = errorStream_.str();
\r
6600 error( RtAudioError::SYSTEM_ERROR );
\r
6603 handle->bufferPointer[1] = nextReadPointer;
\r
6605 // No byte swapping necessary in DirectSound implementation.
\r
6607 // If necessary, convert 8-bit data from unsigned to signed.
\r
6608 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6609 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6611 // Do buffer conversion if necessary.
\r
6612 if ( stream_.doConvertBuffer[1] )
\r
6613 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6617 MUTEX_UNLOCK( &stream_.mutex );
\r
6618 RtApi::tickStreamTime();
\r
6621 // Definitions for utility functions and callbacks
\r
6622 // specific to the DirectSound implementation.
\r
6624 static unsigned __stdcall callbackHandler( void *ptr )
\r
6626 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6627 RtApiDs *object = (RtApiDs *) info->object;
\r
6628 bool* isRunning = &info->isRunning;
\r
6630 while ( *isRunning == true ) {
\r
6631 object->callbackEvent();
\r
6634 _endthreadex( 0 );
\r
6638 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6639 LPCTSTR description,
\r
6640 LPCTSTR /*module*/,
\r
6641 LPVOID lpContext )
\r
6643 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6644 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6647 bool validDevice = false;
\r
6648 if ( probeInfo.isInput == true ) {
\r
6650 LPDIRECTSOUNDCAPTURE object;
\r
6652 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6653 if ( hr != DS_OK ) return TRUE;
\r
6655 caps.dwSize = sizeof(caps);
\r
6656 hr = object->GetCaps( &caps );
\r
6657 if ( hr == DS_OK ) {
\r
6658 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6659 validDevice = true;
\r
6661 object->Release();
\r
6665 LPDIRECTSOUND object;
\r
6666 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6667 if ( hr != DS_OK ) return TRUE;
\r
6669 caps.dwSize = sizeof(caps);
\r
6670 hr = object->GetCaps( &caps );
\r
6671 if ( hr == DS_OK ) {
\r
6672 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6673 validDevice = true;
\r
6675 object->Release();
\r
6678 // If good device, then save its name and guid.
\r
6679 std::string name = convertCharPointerToStdString( description );
\r
6680 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6681 if ( lpguid == NULL )
\r
6682 name = "Default Device";
\r
6683 if ( validDevice ) {
\r
6684 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6685 if ( dsDevices[i].name == name ) {
\r
6686 dsDevices[i].found = true;
\r
6687 if ( probeInfo.isInput ) {
\r
6688 dsDevices[i].id[1] = lpguid;
\r
6689 dsDevices[i].validId[1] = true;
\r
6692 dsDevices[i].id[0] = lpguid;
\r
6693 dsDevices[i].validId[0] = true;
\r
6700 device.name = name;
\r
6701 device.found = true;
\r
6702 if ( probeInfo.isInput ) {
\r
6703 device.id[1] = lpguid;
\r
6704 device.validId[1] = true;
\r
6707 device.id[0] = lpguid;
\r
6708 device.validId[0] = true;
\r
6710 dsDevices.push_back( device );
\r
6716 static const char* getErrorString( int code )
\r
6720 case DSERR_ALLOCATED:
\r
6721 return "Already allocated";
\r
6723 case DSERR_CONTROLUNAVAIL:
\r
6724 return "Control unavailable";
\r
6726 case DSERR_INVALIDPARAM:
\r
6727 return "Invalid parameter";
\r
6729 case DSERR_INVALIDCALL:
\r
6730 return "Invalid call";
\r
6732 case DSERR_GENERIC:
\r
6733 return "Generic error";
\r
6735 case DSERR_PRIOLEVELNEEDED:
\r
6736 return "Priority level needed";
\r
6738 case DSERR_OUTOFMEMORY:
\r
6739 return "Out of memory";
\r
6741 case DSERR_BADFORMAT:
\r
6742 return "The sample rate or the channel format is not supported";
\r
6744 case DSERR_UNSUPPORTED:
\r
6745 return "Not supported";
\r
6747 case DSERR_NODRIVER:
\r
6748 return "No driver";
\r
6750 case DSERR_ALREADYINITIALIZED:
\r
6751 return "Already initialized";
\r
6753 case DSERR_NOAGGREGATION:
\r
6754 return "No aggregation";
\r
6756 case DSERR_BUFFERLOST:
\r
6757 return "Buffer lost";
\r
6759 case DSERR_OTHERAPPHASPRIO:
\r
6760 return "Another application already has priority";
\r
6762 case DSERR_UNINITIALIZED:
\r
6763 return "Uninitialized";
\r
6766 return "DirectSound unknown error";
\r
6769 //******************** End of __WINDOWS_DS__ *********************//
\r
6773 #if defined(__LINUX_ALSA__)
\r
6775 #include <alsa/asoundlib.h>
\r
6776 #include <unistd.h>
\r
6778 // A structure to hold various information related to the ALSA API
\r
6779 // implementation.
\r
6780 struct AlsaHandle {
\r
6781 snd_pcm_t *handles[2];
\r
6782 bool synchronized;
\r
6784 pthread_cond_t runnable_cv;
\r
6788 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6791 static void *alsaCallbackHandler( void * ptr );
\r
6793 RtApiAlsa :: RtApiAlsa()
\r
6795 // Nothing to do here.
\r
6798 RtApiAlsa :: ~RtApiAlsa()
\r
6800 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6803 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6805 unsigned nDevices = 0;
\r
6806 int result, subdevice, card;
\r
6808 snd_ctl_t *handle;
\r
6810 // Count cards and devices
\r
6812 snd_card_next( &card );
\r
6813 while ( card >= 0 ) {
\r
6814 sprintf( name, "hw:%d", card );
\r
6815 result = snd_ctl_open( &handle, name, 0 );
\r
6816 if ( result < 0 ) {
\r
6817 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6818 errorText_ = errorStream_.str();
\r
6819 error( RtAudioError::WARNING );
\r
6824 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6825 if ( result < 0 ) {
\r
6826 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6827 errorText_ = errorStream_.str();
\r
6828 error( RtAudioError::WARNING );
\r
6831 if ( subdevice < 0 )
\r
6836 snd_ctl_close( handle );
\r
6837 snd_card_next( &card );
\r
6840 result = snd_ctl_open( &handle, "default", 0 );
\r
6841 if (result == 0) {
\r
6843 snd_ctl_close( handle );
\r
6849 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6851 RtAudio::DeviceInfo info;
\r
6852 info.probed = false;
\r
6854 unsigned nDevices = 0;
\r
6855 int result, subdevice, card;
\r
6857 snd_ctl_t *chandle;
\r
6859 // Count cards and devices
\r
6862 snd_card_next( &card );
\r
6863 while ( card >= 0 ) {
\r
6864 sprintf( name, "hw:%d", card );
\r
6865 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6866 if ( result < 0 ) {
\r
6867 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6868 errorText_ = errorStream_.str();
\r
6869 error( RtAudioError::WARNING );
\r
6874 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6875 if ( result < 0 ) {
\r
6876 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6877 errorText_ = errorStream_.str();
\r
6878 error( RtAudioError::WARNING );
\r
6881 if ( subdevice < 0 ) break;
\r
6882 if ( nDevices == device ) {
\r
6883 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6889 snd_ctl_close( chandle );
\r
6890 snd_card_next( &card );
\r
6893 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6894 if ( result == 0 ) {
\r
6895 if ( nDevices == device ) {
\r
6896 strcpy( name, "default" );
\r
6902 if ( nDevices == 0 ) {
\r
6903 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6904 error( RtAudioError::INVALID_USE );
\r
6908 if ( device >= nDevices ) {
\r
6909 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6910 error( RtAudioError::INVALID_USE );
\r
6916 // If a stream is already open, we cannot probe the stream devices.
\r
6917 // Thus, use the saved results.
\r
6918 if ( stream_.state != STREAM_CLOSED &&
\r
6919 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6920 snd_ctl_close( chandle );
\r
6921 if ( device >= devices_.size() ) {
\r
6922 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6923 error( RtAudioError::WARNING );
\r
6926 return devices_[ device ];
\r
6929 int openMode = SND_PCM_ASYNC;
\r
6930 snd_pcm_stream_t stream;
\r
6931 snd_pcm_info_t *pcminfo;
\r
6932 snd_pcm_info_alloca( &pcminfo );
\r
6933 snd_pcm_t *phandle;
\r
6934 snd_pcm_hw_params_t *params;
\r
6935 snd_pcm_hw_params_alloca( ¶ms );
\r
6937 // First try for playback unless default device (which has subdev -1)
\r
6938 stream = SND_PCM_STREAM_PLAYBACK;
\r
6939 snd_pcm_info_set_stream( pcminfo, stream );
\r
6940 if ( subdevice != -1 ) {
\r
6941 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6942 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6944 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6945 if ( result < 0 ) {
\r
6946 // Device probably doesn't support playback.
\r
6947 goto captureProbe;
\r
6951 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6952 if ( result < 0 ) {
\r
6953 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6954 errorText_ = errorStream_.str();
\r
6955 error( RtAudioError::WARNING );
\r
6956 goto captureProbe;
\r
6959 // The device is open ... fill the parameter structure.
\r
6960 result = snd_pcm_hw_params_any( phandle, params );
\r
6961 if ( result < 0 ) {
\r
6962 snd_pcm_close( phandle );
\r
6963 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6964 errorText_ = errorStream_.str();
\r
6965 error( RtAudioError::WARNING );
\r
6966 goto captureProbe;
\r
6969 // Get output channel information.
\r
6970 unsigned int value;
\r
6971 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6972 if ( result < 0 ) {
\r
6973 snd_pcm_close( phandle );
\r
6974 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6975 errorText_ = errorStream_.str();
\r
6976 error( RtAudioError::WARNING );
\r
6977 goto captureProbe;
\r
6979 info.outputChannels = value;
\r
6980 snd_pcm_close( phandle );
\r
6983 stream = SND_PCM_STREAM_CAPTURE;
\r
6984 snd_pcm_info_set_stream( pcminfo, stream );
\r
6986 // Now try for capture unless default device (with subdev = -1)
\r
6987 if ( subdevice != -1 ) {
\r
6988 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6989 snd_ctl_close( chandle );
\r
6990 if ( result < 0 ) {
\r
6991 // Device probably doesn't support capture.
\r
6992 if ( info.outputChannels == 0 ) return info;
\r
6993 goto probeParameters;
\r
6997 snd_ctl_close( chandle );
\r
6999 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7000 if ( result < 0 ) {
\r
7001 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7002 errorText_ = errorStream_.str();
\r
7003 error( RtAudioError::WARNING );
\r
7004 if ( info.outputChannels == 0 ) return info;
\r
7005 goto probeParameters;
\r
7008 // The device is open ... fill the parameter structure.
\r
7009 result = snd_pcm_hw_params_any( phandle, params );
\r
7010 if ( result < 0 ) {
\r
7011 snd_pcm_close( phandle );
\r
7012 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7013 errorText_ = errorStream_.str();
\r
7014 error( RtAudioError::WARNING );
\r
7015 if ( info.outputChannels == 0 ) return info;
\r
7016 goto probeParameters;
\r
7019 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7020 if ( result < 0 ) {
\r
7021 snd_pcm_close( phandle );
\r
7022 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7023 errorText_ = errorStream_.str();
\r
7024 error( RtAudioError::WARNING );
\r
7025 if ( info.outputChannels == 0 ) return info;
\r
7026 goto probeParameters;
\r
7028 info.inputChannels = value;
\r
7029 snd_pcm_close( phandle );
\r
7031 // If device opens for both playback and capture, we determine the channels.
\r
7032 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7033 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7035 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7036 if ( device == 0 && info.outputChannels > 0 )
\r
7037 info.isDefaultOutput = true;
\r
7038 if ( device == 0 && info.inputChannels > 0 )
\r
7039 info.isDefaultInput = true;
\r
7042 // At this point, we just need to figure out the supported data
\r
7043 // formats and sample rates. We'll proceed by opening the device in
\r
7044 // the direction with the maximum number of channels, or playback if
\r
7045 // they are equal. This might limit our sample rate options, but so
\r
7048 if ( info.outputChannels >= info.inputChannels )
\r
7049 stream = SND_PCM_STREAM_PLAYBACK;
\r
7051 stream = SND_PCM_STREAM_CAPTURE;
\r
7052 snd_pcm_info_set_stream( pcminfo, stream );
\r
7054 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7055 if ( result < 0 ) {
\r
7056 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7057 errorText_ = errorStream_.str();
\r
7058 error( RtAudioError::WARNING );
\r
7062 // The device is open ... fill the parameter structure.
\r
7063 result = snd_pcm_hw_params_any( phandle, params );
\r
7064 if ( result < 0 ) {
\r
7065 snd_pcm_close( phandle );
\r
7066 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7067 errorText_ = errorStream_.str();
\r
7068 error( RtAudioError::WARNING );
\r
7072 // Test our discrete set of sample rate values.
\r
7073 info.sampleRates.clear();
\r
7074 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7075 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7076 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7078 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7079 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7082 if ( info.sampleRates.size() == 0 ) {
\r
7083 snd_pcm_close( phandle );
\r
7084 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7085 errorText_ = errorStream_.str();
\r
7086 error( RtAudioError::WARNING );
\r
7090 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7091 snd_pcm_format_t format;
\r
7092 info.nativeFormats = 0;
\r
7093 format = SND_PCM_FORMAT_S8;
\r
7094 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7095 info.nativeFormats |= RTAUDIO_SINT8;
\r
7096 format = SND_PCM_FORMAT_S16;
\r
7097 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7098 info.nativeFormats |= RTAUDIO_SINT16;
\r
7099 format = SND_PCM_FORMAT_S24;
\r
7100 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7101 info.nativeFormats |= RTAUDIO_SINT24;
\r
7102 format = SND_PCM_FORMAT_S32;
\r
7103 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7104 info.nativeFormats |= RTAUDIO_SINT32;
\r
7105 format = SND_PCM_FORMAT_FLOAT;
\r
7106 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7107 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7108 format = SND_PCM_FORMAT_FLOAT64;
\r
7109 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7110 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7112 // Check that we have at least one supported format
\r
7113 if ( info.nativeFormats == 0 ) {
\r
7114 snd_pcm_close( phandle );
\r
7115 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7116 errorText_ = errorStream_.str();
\r
7117 error( RtAudioError::WARNING );
\r
7121 // Get the device name
\r
7123 result = snd_card_get_name( card, &cardname );
\r
7124 if ( result >= 0 ) {
\r
7125 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7130 // That's all ... close the device and return
\r
7131 snd_pcm_close( phandle );
\r
7132 info.probed = true;
\r
7136 void RtApiAlsa :: saveDeviceInfo( void )
\r
7140 unsigned int nDevices = getDeviceCount();
\r
7141 devices_.resize( nDevices );
\r
7142 for ( unsigned int i=0; i<nDevices; i++ )
\r
7143 devices_[i] = getDeviceInfo( i );
\r
7146 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7147 unsigned int firstChannel, unsigned int sampleRate,
\r
7148 RtAudioFormat format, unsigned int *bufferSize,
\r
7149 RtAudio::StreamOptions *options )
\r
7152 #if defined(__RTAUDIO_DEBUG__)
\r
7153 snd_output_t *out;
\r
7154 snd_output_stdio_attach(&out, stderr, 0);
\r
7157 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7159 unsigned nDevices = 0;
\r
7160 int result, subdevice, card;
\r
7162 snd_ctl_t *chandle;
\r
7164 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7165 snprintf(name, sizeof(name), "%s", "default");
\r
7167 // Count cards and devices
\r
7169 snd_card_next( &card );
\r
7170 while ( card >= 0 ) {
\r
7171 sprintf( name, "hw:%d", card );
\r
7172 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7173 if ( result < 0 ) {
\r
7174 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7175 errorText_ = errorStream_.str();
\r
7180 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7181 if ( result < 0 ) break;
\r
7182 if ( subdevice < 0 ) break;
\r
7183 if ( nDevices == device ) {
\r
7184 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7185 snd_ctl_close( chandle );
\r
7190 snd_ctl_close( chandle );
\r
7191 snd_card_next( &card );
\r
7194 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7195 if ( result == 0 ) {
\r
7196 if ( nDevices == device ) {
\r
7197 strcpy( name, "default" );
\r
7203 if ( nDevices == 0 ) {
\r
7204 // This should not happen because a check is made before this function is called.
\r
7205 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7209 if ( device >= nDevices ) {
\r
7210 // This should not happen because a check is made before this function is called.
\r
7211 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7218 // The getDeviceInfo() function will not work for a device that is
\r
7219 // already open. Thus, we'll probe the system before opening a
\r
7220 // stream and save the results for use by getDeviceInfo().
\r
7221 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7222 this->saveDeviceInfo();
\r
7224 snd_pcm_stream_t stream;
\r
7225 if ( mode == OUTPUT )
\r
7226 stream = SND_PCM_STREAM_PLAYBACK;
\r
7228 stream = SND_PCM_STREAM_CAPTURE;
\r
7230 snd_pcm_t *phandle;
\r
7231 int openMode = SND_PCM_ASYNC;
\r
7232 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7233 if ( result < 0 ) {
\r
7234 if ( mode == OUTPUT )
\r
7235 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7237 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7238 errorText_ = errorStream_.str();
\r
7242 // Fill the parameter structure.
\r
7243 snd_pcm_hw_params_t *hw_params;
\r
7244 snd_pcm_hw_params_alloca( &hw_params );
\r
7245 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7246 if ( result < 0 ) {
\r
7247 snd_pcm_close( phandle );
\r
7248 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7249 errorText_ = errorStream_.str();
\r
7253 #if defined(__RTAUDIO_DEBUG__)
\r
7254 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7255 snd_pcm_hw_params_dump( hw_params, out );
\r
7258 // Set access ... check user preference.
\r
7259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7260 stream_.userInterleaved = false;
\r
7261 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7262 if ( result < 0 ) {
\r
7263 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7264 stream_.deviceInterleaved[mode] = true;
\r
7267 stream_.deviceInterleaved[mode] = false;
\r
7270 stream_.userInterleaved = true;
\r
7271 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7272 if ( result < 0 ) {
\r
7273 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7274 stream_.deviceInterleaved[mode] = false;
\r
7277 stream_.deviceInterleaved[mode] = true;
\r
7280 if ( result < 0 ) {
\r
7281 snd_pcm_close( phandle );
\r
7282 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7283 errorText_ = errorStream_.str();
\r
7287 // Determine how to set the device format.
\r
7288 stream_.userFormat = format;
\r
7289 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7291 if ( format == RTAUDIO_SINT8 )
\r
7292 deviceFormat = SND_PCM_FORMAT_S8;
\r
7293 else if ( format == RTAUDIO_SINT16 )
\r
7294 deviceFormat = SND_PCM_FORMAT_S16;
\r
7295 else if ( format == RTAUDIO_SINT24 )
\r
7296 deviceFormat = SND_PCM_FORMAT_S24;
\r
7297 else if ( format == RTAUDIO_SINT32 )
\r
7298 deviceFormat = SND_PCM_FORMAT_S32;
\r
7299 else if ( format == RTAUDIO_FLOAT32 )
\r
7300 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7301 else if ( format == RTAUDIO_FLOAT64 )
\r
7302 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7304 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7305 stream_.deviceFormat[mode] = format;
\r
7309 // The user requested format is not natively supported by the device.
\r
7310 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7311 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7316 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7317 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7318 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7322 deviceFormat = SND_PCM_FORMAT_S32;
\r
7323 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7324 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7328 deviceFormat = SND_PCM_FORMAT_S24;
\r
7329 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7330 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7334 deviceFormat = SND_PCM_FORMAT_S16;
\r
7335 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7336 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7340 deviceFormat = SND_PCM_FORMAT_S8;
\r
7341 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7342 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7346 // If we get here, no supported format was found.
\r
7347 snd_pcm_close( phandle );
\r
7348 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7349 errorText_ = errorStream_.str();
\r
7353 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7354 if ( result < 0 ) {
\r
7355 snd_pcm_close( phandle );
\r
7356 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7357 errorText_ = errorStream_.str();
\r
7361 // Determine whether byte-swaping is necessary.
\r
7362 stream_.doByteSwap[mode] = false;
\r
7363 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7364 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7365 if ( result == 0 )
\r
7366 stream_.doByteSwap[mode] = true;
\r
7367 else if (result < 0) {
\r
7368 snd_pcm_close( phandle );
\r
7369 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7370 errorText_ = errorStream_.str();
\r
7375 // Set the sample rate.
\r
7376 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7377 if ( result < 0 ) {
\r
7378 snd_pcm_close( phandle );
\r
7379 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7380 errorText_ = errorStream_.str();
\r
7384 // Determine the number of channels for this device. We support a possible
\r
7385 // minimum device channel number > than the value requested by the user.
\r
7386 stream_.nUserChannels[mode] = channels;
\r
7387 unsigned int value;
\r
7388 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7389 unsigned int deviceChannels = value;
\r
7390 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7391 snd_pcm_close( phandle );
\r
7392 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7393 errorText_ = errorStream_.str();
\r
7397 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7398 if ( result < 0 ) {
\r
7399 snd_pcm_close( phandle );
\r
7400 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7401 errorText_ = errorStream_.str();
\r
7404 deviceChannels = value;
\r
7405 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7406 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7408 // Set the device channels.
\r
7409 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7410 if ( result < 0 ) {
\r
7411 snd_pcm_close( phandle );
\r
7412 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7413 errorText_ = errorStream_.str();
\r
7417 // Set the buffer (or period) size.
\r
7419 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7420 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7421 if ( result < 0 ) {
\r
7422 snd_pcm_close( phandle );
\r
7423 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7424 errorText_ = errorStream_.str();
\r
7427 *bufferSize = periodSize;
\r
7429 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7430 unsigned int periods = 0;
\r
7431 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7432 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7433 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7434 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7435 if ( result < 0 ) {
\r
7436 snd_pcm_close( phandle );
\r
7437 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7438 errorText_ = errorStream_.str();
\r
7442 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7443 // MUST be the same in both directions!
\r
7444 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7445 snd_pcm_close( phandle );
\r
7446 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7447 errorText_ = errorStream_.str();
\r
7451 stream_.bufferSize = *bufferSize;
\r
7453 // Install the hardware configuration
\r
7454 result = snd_pcm_hw_params( phandle, hw_params );
\r
7455 if ( result < 0 ) {
\r
7456 snd_pcm_close( phandle );
\r
7457 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7458 errorText_ = errorStream_.str();
\r
7462 #if defined(__RTAUDIO_DEBUG__)
\r
7463 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7464 snd_pcm_hw_params_dump( hw_params, out );
\r
7467 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7468 snd_pcm_sw_params_t *sw_params = NULL;
\r
7469 snd_pcm_sw_params_alloca( &sw_params );
\r
7470 snd_pcm_sw_params_current( phandle, sw_params );
\r
7471 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7472 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7473 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7475 // The following two settings were suggested by Theo Veenker
\r
7476 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7477 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7479 // here are two options for a fix
\r
7480 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7481 snd_pcm_uframes_t val;
\r
7482 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7483 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7485 result = snd_pcm_sw_params( phandle, sw_params );
\r
7486 if ( result < 0 ) {
\r
7487 snd_pcm_close( phandle );
\r
7488 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7489 errorText_ = errorStream_.str();
\r
7493 #if defined(__RTAUDIO_DEBUG__)
\r
7494 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7495 snd_pcm_sw_params_dump( sw_params, out );
\r
7498 // Set flags for buffer conversion
\r
7499 stream_.doConvertBuffer[mode] = false;
\r
7500 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7501 stream_.doConvertBuffer[mode] = true;
\r
7502 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7503 stream_.doConvertBuffer[mode] = true;
\r
7504 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7505 stream_.nUserChannels[mode] > 1 )
\r
7506 stream_.doConvertBuffer[mode] = true;
\r
7508 // Allocate the ApiHandle if necessary and then save.
\r
7509 AlsaHandle *apiInfo = 0;
\r
7510 if ( stream_.apiHandle == 0 ) {
\r
7512 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7514 catch ( std::bad_alloc& ) {
\r
7515 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7519 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7520 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7524 stream_.apiHandle = (void *) apiInfo;
\r
7525 apiInfo->handles[0] = 0;
\r
7526 apiInfo->handles[1] = 0;
\r
7529 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7531 apiInfo->handles[mode] = phandle;
\r
7534 // Allocate necessary internal buffers.
\r
7535 unsigned long bufferBytes;
\r
7536 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7537 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7538 if ( stream_.userBuffer[mode] == NULL ) {
\r
7539 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7543 if ( stream_.doConvertBuffer[mode] ) {
\r
7545 bool makeBuffer = true;
\r
7546 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7547 if ( mode == INPUT ) {
\r
7548 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7549 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7550 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7554 if ( makeBuffer ) {
\r
7555 bufferBytes *= *bufferSize;
\r
7556 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7557 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7558 if ( stream_.deviceBuffer == NULL ) {
\r
7559 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7565 stream_.sampleRate = sampleRate;
\r
7566 stream_.nBuffers = periods;
\r
7567 stream_.device[mode] = device;
\r
7568 stream_.state = STREAM_STOPPED;
\r
7570 // Setup the buffer conversion information structure.
\r
7571 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7573 // Setup thread if necessary.
\r
7574 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7575 // We had already set up an output stream.
\r
7576 stream_.mode = DUPLEX;
\r
7577 // Link the streams if possible.
\r
7578 apiInfo->synchronized = false;
\r
7579 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7580 apiInfo->synchronized = true;
\r
7582 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7583 error( RtAudioError::WARNING );
\r
7587 stream_.mode = mode;
\r
7589 // Setup callback thread.
\r
7590 stream_.callbackInfo.object = (void *) this;
\r
7592 // Set the thread attributes for joinable and realtime scheduling
\r
7593 // priority (optional). The higher priority will only take affect
\r
7594 // if the program is run as root or suid. Note, under Linux
\r
7595 // processes with CAP_SYS_NICE privilege, a user can change
\r
7596 // scheduling policy and priority (thus need not be root). See
\r
7597 // POSIX "capabilities".
\r
7598 pthread_attr_t attr;
\r
7599 pthread_attr_init( &attr );
\r
7600 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7602 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7603 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7604 // We previously attempted to increase the audio callback priority
\r
7605 // to SCHED_RR here via the attributes. However, while no errors
\r
7606 // were reported in doing so, it did not work. So, now this is
\r
7607 // done in the alsaCallbackHandler function.
\r
7608 stream_.callbackInfo.doRealtime = true;
\r
7609 int priority = options->priority;
\r
7610 int min = sched_get_priority_min( SCHED_RR );
\r
7611 int max = sched_get_priority_max( SCHED_RR );
\r
7612 if ( priority < min ) priority = min;
\r
7613 else if ( priority > max ) priority = max;
\r
7614 stream_.callbackInfo.priority = priority;
\r
7618 stream_.callbackInfo.isRunning = true;
\r
7619 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7620 pthread_attr_destroy( &attr );
\r
7622 stream_.callbackInfo.isRunning = false;
\r
7623 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7632 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7633 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7634 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7636 stream_.apiHandle = 0;
\r
7639 if ( phandle) snd_pcm_close( phandle );
\r
7641 for ( int i=0; i<2; i++ ) {
\r
7642 if ( stream_.userBuffer[i] ) {
\r
7643 free( stream_.userBuffer[i] );
\r
7644 stream_.userBuffer[i] = 0;
\r
7648 if ( stream_.deviceBuffer ) {
\r
7649 free( stream_.deviceBuffer );
\r
7650 stream_.deviceBuffer = 0;
\r
7653 stream_.state = STREAM_CLOSED;
\r
7657 void RtApiAlsa :: closeStream()
\r
7659 if ( stream_.state == STREAM_CLOSED ) {
\r
7660 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7661 error( RtAudioError::WARNING );
\r
7665 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7666 stream_.callbackInfo.isRunning = false;
\r
7667 MUTEX_LOCK( &stream_.mutex );
\r
7668 if ( stream_.state == STREAM_STOPPED ) {
\r
7669 apiInfo->runnable = true;
\r
7670 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7672 MUTEX_UNLOCK( &stream_.mutex );
\r
7673 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7675 if ( stream_.state == STREAM_RUNNING ) {
\r
7676 stream_.state = STREAM_STOPPED;
\r
7677 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7678 snd_pcm_drop( apiInfo->handles[0] );
\r
7679 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7680 snd_pcm_drop( apiInfo->handles[1] );
\r
7684 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7685 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7686 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7688 stream_.apiHandle = 0;
\r
7691 for ( int i=0; i<2; i++ ) {
\r
7692 if ( stream_.userBuffer[i] ) {
\r
7693 free( stream_.userBuffer[i] );
\r
7694 stream_.userBuffer[i] = 0;
\r
7698 if ( stream_.deviceBuffer ) {
\r
7699 free( stream_.deviceBuffer );
\r
7700 stream_.deviceBuffer = 0;
\r
7703 stream_.mode = UNINITIALIZED;
\r
7704 stream_.state = STREAM_CLOSED;
\r
7707 void RtApiAlsa :: startStream()
\r
7709 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7712 if ( stream_.state == STREAM_RUNNING ) {
\r
7713 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7714 error( RtAudioError::WARNING );
\r
7718 MUTEX_LOCK( &stream_.mutex );
\r
7721 snd_pcm_state_t state;
\r
7722 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7723 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7724 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7725 state = snd_pcm_state( handle[0] );
\r
7726 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7727 result = snd_pcm_prepare( handle[0] );
\r
7728 if ( result < 0 ) {
\r
7729 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7730 errorText_ = errorStream_.str();
\r
7736 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7737 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7738 state = snd_pcm_state( handle[1] );
\r
7739 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7740 result = snd_pcm_prepare( handle[1] );
\r
7741 if ( result < 0 ) {
\r
7742 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7743 errorText_ = errorStream_.str();
\r
7749 stream_.state = STREAM_RUNNING;
\r
7752 apiInfo->runnable = true;
\r
7753 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7754 MUTEX_UNLOCK( &stream_.mutex );
\r
7756 if ( result >= 0 ) return;
\r
7757 error( RtAudioError::SYSTEM_ERROR );
\r
7760 void RtApiAlsa :: stopStream()
\r
7763 if ( stream_.state == STREAM_STOPPED ) {
\r
7764 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7765 error( RtAudioError::WARNING );
\r
7769 stream_.state = STREAM_STOPPED;
\r
7770 MUTEX_LOCK( &stream_.mutex );
\r
7773 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7774 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7775 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7776 if ( apiInfo->synchronized )
\r
7777 result = snd_pcm_drop( handle[0] );
\r
7779 result = snd_pcm_drain( handle[0] );
\r
7780 if ( result < 0 ) {
\r
7781 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7782 errorText_ = errorStream_.str();
\r
7787 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7788 result = snd_pcm_drop( handle[1] );
\r
7789 if ( result < 0 ) {
\r
7790 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7791 errorText_ = errorStream_.str();
\r
7797 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7798 MUTEX_UNLOCK( &stream_.mutex );
\r
7800 if ( result >= 0 ) return;
\r
7801 error( RtAudioError::SYSTEM_ERROR );
\r
7804 void RtApiAlsa :: abortStream()
\r
7807 if ( stream_.state == STREAM_STOPPED ) {
\r
7808 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7809 error( RtAudioError::WARNING );
\r
7813 stream_.state = STREAM_STOPPED;
\r
7814 MUTEX_LOCK( &stream_.mutex );
\r
7817 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7818 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7819 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7820 result = snd_pcm_drop( handle[0] );
\r
7821 if ( result < 0 ) {
\r
7822 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7823 errorText_ = errorStream_.str();
\r
7828 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7829 result = snd_pcm_drop( handle[1] );
\r
7830 if ( result < 0 ) {
\r
7831 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7832 errorText_ = errorStream_.str();
\r
7838 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7839 MUTEX_UNLOCK( &stream_.mutex );
\r
7841 if ( result >= 0 ) return;
\r
7842 error( RtAudioError::SYSTEM_ERROR );
\r
7845 void RtApiAlsa :: callbackEvent()
\r
7847 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7848 if ( stream_.state == STREAM_STOPPED ) {
\r
7849 MUTEX_LOCK( &stream_.mutex );
\r
7850 while ( !apiInfo->runnable )
\r
7851 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7853 if ( stream_.state != STREAM_RUNNING ) {
\r
7854 MUTEX_UNLOCK( &stream_.mutex );
\r
7857 MUTEX_UNLOCK( &stream_.mutex );
\r
7860 if ( stream_.state == STREAM_CLOSED ) {
\r
7861 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7862 error( RtAudioError::WARNING );
\r
7866 int doStopStream = 0;
\r
7867 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7868 double streamTime = getStreamTime();
\r
7869 RtAudioStreamStatus status = 0;
\r
7870 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7871 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7872 apiInfo->xrun[0] = false;
\r
7874 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7875 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7876 apiInfo->xrun[1] = false;
\r
7878 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7879 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7881 if ( doStopStream == 2 ) {
\r
7886 MUTEX_LOCK( &stream_.mutex );
\r
7888 // The state might change while waiting on a mutex.
\r
7889 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7894 snd_pcm_t **handle;
\r
7895 snd_pcm_sframes_t frames;
\r
7896 RtAudioFormat format;
\r
7897 handle = (snd_pcm_t **) apiInfo->handles;
\r
7899 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7901 // Setup parameters.
\r
7902 if ( stream_.doConvertBuffer[1] ) {
\r
7903 buffer = stream_.deviceBuffer;
\r
7904 channels = stream_.nDeviceChannels[1];
\r
7905 format = stream_.deviceFormat[1];
\r
7908 buffer = stream_.userBuffer[1];
\r
7909 channels = stream_.nUserChannels[1];
\r
7910 format = stream_.userFormat;
\r
7913 // Read samples from device in interleaved/non-interleaved format.
\r
7914 if ( stream_.deviceInterleaved[1] )
\r
7915 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7917 void *bufs[channels];
\r
7918 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7919 for ( int i=0; i<channels; i++ )
\r
7920 bufs[i] = (void *) (buffer + (i * offset));
\r
7921 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7924 if ( result < (int) stream_.bufferSize ) {
\r
7925 // Either an error or overrun occured.
\r
7926 if ( result == -EPIPE ) {
\r
7927 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7928 if ( state == SND_PCM_STATE_XRUN ) {
\r
7929 apiInfo->xrun[1] = true;
\r
7930 result = snd_pcm_prepare( handle[1] );
\r
7931 if ( result < 0 ) {
\r
7932 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7933 errorText_ = errorStream_.str();
\r
7937 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7938 errorText_ = errorStream_.str();
\r
7942 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7943 errorText_ = errorStream_.str();
\r
7945 error( RtAudioError::WARNING );
\r
7949 // Do byte swapping if necessary.
\r
7950 if ( stream_.doByteSwap[1] )
\r
7951 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7953 // Do buffer conversion if necessary.
\r
7954 if ( stream_.doConvertBuffer[1] )
\r
7955 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7957 // Check stream latency
\r
7958 result = snd_pcm_delay( handle[1], &frames );
\r
7959 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7964 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7966 // Setup parameters and do buffer conversion if necessary.
\r
7967 if ( stream_.doConvertBuffer[0] ) {
\r
7968 buffer = stream_.deviceBuffer;
\r
7969 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7970 channels = stream_.nDeviceChannels[0];
\r
7971 format = stream_.deviceFormat[0];
\r
7974 buffer = stream_.userBuffer[0];
\r
7975 channels = stream_.nUserChannels[0];
\r
7976 format = stream_.userFormat;
\r
7979 // Do byte swapping if necessary.
\r
7980 if ( stream_.doByteSwap[0] )
\r
7981 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7983 // Write samples to device in interleaved/non-interleaved format.
\r
7984 if ( stream_.deviceInterleaved[0] )
\r
7985 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7987 void *bufs[channels];
\r
7988 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7989 for ( int i=0; i<channels; i++ )
\r
7990 bufs[i] = (void *) (buffer + (i * offset));
\r
7991 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7994 if ( result < (int) stream_.bufferSize ) {
\r
7995 // Either an error or underrun occured.
\r
7996 if ( result == -EPIPE ) {
\r
7997 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7998 if ( state == SND_PCM_STATE_XRUN ) {
\r
7999 apiInfo->xrun[0] = true;
\r
8000 result = snd_pcm_prepare( handle[0] );
\r
8001 if ( result < 0 ) {
\r
8002 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8003 errorText_ = errorStream_.str();
\r
8007 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8008 errorText_ = errorStream_.str();
\r
8012 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8013 errorText_ = errorStream_.str();
\r
8015 error( RtAudioError::WARNING );
\r
8019 // Check stream latency
\r
8020 result = snd_pcm_delay( handle[0], &frames );
\r
8021 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8025 MUTEX_UNLOCK( &stream_.mutex );
\r
8027 RtApi::tickStreamTime();
\r
8028 if ( doStopStream == 1 ) this->stopStream();
\r
8031 static void *alsaCallbackHandler( void *ptr )
\r
8033 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8034 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8035 bool *isRunning = &info->isRunning;
\r
8037 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8038 if ( &info->doRealtime ) {
\r
8039 pthread_t tID = pthread_self(); // ID of this thread
\r
8040 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8041 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8045 while ( *isRunning == true ) {
\r
8046 pthread_testcancel();
\r
8047 object->callbackEvent();
\r
8050 pthread_exit( NULL );
\r
8053 //******************** End of __LINUX_ALSA__ *********************//
\r
8056 #if defined(__LINUX_PULSE__)
\r
8058 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8059 // and Tristan Matthews.
\r
8061 #include <pulse/error.h>
\r
8062 #include <pulse/simple.h>
\r
8065 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8066 44100, 48000, 96000, 0};
\r
8068 struct rtaudio_pa_format_mapping_t {
\r
8069 RtAudioFormat rtaudio_format;
\r
8070 pa_sample_format_t pa_format;
\r
8073 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8074 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8075 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8076 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8077 {0, PA_SAMPLE_INVALID}};
\r
8079 struct PulseAudioHandle {
\r
8080 pa_simple *s_play;
\r
8083 pthread_cond_t runnable_cv;
\r
8085 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8088 RtApiPulse::~RtApiPulse()
\r
8090 if ( stream_.state != STREAM_CLOSED )
\r
8094 unsigned int RtApiPulse::getDeviceCount( void )
\r
8099 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8101 RtAudio::DeviceInfo info;
\r
8102 info.probed = true;
\r
8103 info.name = "PulseAudio";
\r
8104 info.outputChannels = 2;
\r
8105 info.inputChannels = 2;
\r
8106 info.duplexChannels = 2;
\r
8107 info.isDefaultOutput = true;
\r
8108 info.isDefaultInput = true;
\r
8110 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8111 info.sampleRates.push_back( *sr );
\r
8113 info.preferredSampleRate = 48000;
\r
8114 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8119 static void *pulseaudio_callback( void * user )
\r
8121 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8122 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8123 volatile bool *isRunning = &cbi->isRunning;
\r
8125 while ( *isRunning ) {
\r
8126 pthread_testcancel();
\r
8127 context->callbackEvent();
\r
8130 pthread_exit( NULL );
\r
8133 void RtApiPulse::closeStream( void )
\r
8135 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8137 stream_.callbackInfo.isRunning = false;
\r
8139 MUTEX_LOCK( &stream_.mutex );
\r
8140 if ( stream_.state == STREAM_STOPPED ) {
\r
8141 pah->runnable = true;
\r
8142 pthread_cond_signal( &pah->runnable_cv );
\r
8144 MUTEX_UNLOCK( &stream_.mutex );
\r
8146 pthread_join( pah->thread, 0 );
\r
8147 if ( pah->s_play ) {
\r
8148 pa_simple_flush( pah->s_play, NULL );
\r
8149 pa_simple_free( pah->s_play );
\r
8152 pa_simple_free( pah->s_rec );
\r
8154 pthread_cond_destroy( &pah->runnable_cv );
\r
8156 stream_.apiHandle = 0;
\r
8159 if ( stream_.userBuffer[0] ) {
\r
8160 free( stream_.userBuffer[0] );
\r
8161 stream_.userBuffer[0] = 0;
\r
8163 if ( stream_.userBuffer[1] ) {
\r
8164 free( stream_.userBuffer[1] );
\r
8165 stream_.userBuffer[1] = 0;
\r
8168 stream_.state = STREAM_CLOSED;
\r
8169 stream_.mode = UNINITIALIZED;
\r
8172 void RtApiPulse::callbackEvent( void )
\r
8174 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8176 if ( stream_.state == STREAM_STOPPED ) {
\r
8177 MUTEX_LOCK( &stream_.mutex );
\r
8178 while ( !pah->runnable )
\r
8179 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8181 if ( stream_.state != STREAM_RUNNING ) {
\r
8182 MUTEX_UNLOCK( &stream_.mutex );
\r
8185 MUTEX_UNLOCK( &stream_.mutex );
\r
8188 if ( stream_.state == STREAM_CLOSED ) {
\r
8189 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8190 "this shouldn't happen!";
\r
8191 error( RtAudioError::WARNING );
\r
8195 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8196 double streamTime = getStreamTime();
\r
8197 RtAudioStreamStatus status = 0;
\r
8198 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8199 stream_.bufferSize, streamTime, status,
\r
8200 stream_.callbackInfo.userData );
\r
8202 if ( doStopStream == 2 ) {
\r
8207 MUTEX_LOCK( &stream_.mutex );
\r
8208 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8209 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8211 if ( stream_.state != STREAM_RUNNING )
\r
8216 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8217 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8218 convertBuffer( stream_.deviceBuffer,
\r
8219 stream_.userBuffer[OUTPUT],
\r
8220 stream_.convertInfo[OUTPUT] );
\r
8221 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8222 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8224 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8225 formatBytes( stream_.userFormat );
\r
8227 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8228 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8229 pa_strerror( pa_error ) << ".";
\r
8230 errorText_ = errorStream_.str();
\r
8231 error( RtAudioError::WARNING );
\r
8235 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8236 if ( stream_.doConvertBuffer[INPUT] )
\r
8237 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8238 formatBytes( stream_.deviceFormat[INPUT] );
\r
8240 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8241 formatBytes( stream_.userFormat );
\r
8243 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8244 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8245 pa_strerror( pa_error ) << ".";
\r
8246 errorText_ = errorStream_.str();
\r
8247 error( RtAudioError::WARNING );
\r
8249 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8250 convertBuffer( stream_.userBuffer[INPUT],
\r
8251 stream_.deviceBuffer,
\r
8252 stream_.convertInfo[INPUT] );
\r
8257 MUTEX_UNLOCK( &stream_.mutex );
\r
8258 RtApi::tickStreamTime();
\r
8260 if ( doStopStream == 1 )
\r
8264 void RtApiPulse::startStream( void )
\r
8266 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8268 if ( stream_.state == STREAM_CLOSED ) {
\r
8269 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8270 error( RtAudioError::INVALID_USE );
\r
8273 if ( stream_.state == STREAM_RUNNING ) {
\r
8274 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8275 error( RtAudioError::WARNING );
\r
8279 MUTEX_LOCK( &stream_.mutex );
\r
8281 stream_.state = STREAM_RUNNING;
\r
8283 pah->runnable = true;
\r
8284 pthread_cond_signal( &pah->runnable_cv );
\r
8285 MUTEX_UNLOCK( &stream_.mutex );
\r
8288 void RtApiPulse::stopStream( void )
\r
8290 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8292 if ( stream_.state == STREAM_CLOSED ) {
\r
8293 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8294 error( RtAudioError::INVALID_USE );
\r
8297 if ( stream_.state == STREAM_STOPPED ) {
\r
8298 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8299 error( RtAudioError::WARNING );
\r
8303 stream_.state = STREAM_STOPPED;
\r
8304 MUTEX_LOCK( &stream_.mutex );
\r
8306 if ( pah && pah->s_play ) {
\r
8308 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8309 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8310 pa_strerror( pa_error ) << ".";
\r
8311 errorText_ = errorStream_.str();
\r
8312 MUTEX_UNLOCK( &stream_.mutex );
\r
8313 error( RtAudioError::SYSTEM_ERROR );
\r
8318 stream_.state = STREAM_STOPPED;
\r
8319 MUTEX_UNLOCK( &stream_.mutex );
\r
8322 void RtApiPulse::abortStream( void )
\r
8324 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8326 if ( stream_.state == STREAM_CLOSED ) {
\r
8327 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8328 error( RtAudioError::INVALID_USE );
\r
8331 if ( stream_.state == STREAM_STOPPED ) {
\r
8332 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8333 error( RtAudioError::WARNING );
\r
8337 stream_.state = STREAM_STOPPED;
\r
8338 MUTEX_LOCK( &stream_.mutex );
\r
8340 if ( pah && pah->s_play ) {
\r
8342 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8343 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8344 pa_strerror( pa_error ) << ".";
\r
8345 errorText_ = errorStream_.str();
\r
8346 MUTEX_UNLOCK( &stream_.mutex );
\r
8347 error( RtAudioError::SYSTEM_ERROR );
\r
8352 stream_.state = STREAM_STOPPED;
\r
8353 MUTEX_UNLOCK( &stream_.mutex );
\r
8356 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8357 unsigned int channels, unsigned int firstChannel,
\r
8358 unsigned int sampleRate, RtAudioFormat format,
\r
8359 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8361 PulseAudioHandle *pah = 0;
\r
8362 unsigned long bufferBytes = 0;
\r
8363 pa_sample_spec ss;
\r
8365 if ( device != 0 ) return false;
\r
8366 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8367 if ( channels != 1 && channels != 2 ) {
\r
8368 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8371 ss.channels = channels;
\r
8373 if ( firstChannel != 0 ) return false;
\r
8375 bool sr_found = false;
\r
8376 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8377 if ( sampleRate == *sr ) {
\r
8379 stream_.sampleRate = sampleRate;
\r
8380 ss.rate = sampleRate;
\r
8384 if ( !sr_found ) {
\r
8385 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8389 bool sf_found = 0;
\r
8390 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8391 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8392 if ( format == sf->rtaudio_format ) {
\r
8394 stream_.userFormat = sf->rtaudio_format;
\r
8395 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8396 ss.format = sf->pa_format;
\r
8400 if ( !sf_found ) { // Use internal data format conversion.
\r
8401 stream_.userFormat = format;
\r
8402 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8403 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8406 // Set other stream parameters.
\r
8407 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8408 else stream_.userInterleaved = true;
\r
8409 stream_.deviceInterleaved[mode] = true;
\r
8410 stream_.nBuffers = 1;
\r
8411 stream_.doByteSwap[mode] = false;
\r
8412 stream_.nUserChannels[mode] = channels;
\r
8413 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8414 stream_.channelOffset[mode] = 0;
\r
8415 std::string streamName = "RtAudio";
\r
8417 // Set flags for buffer conversion.
\r
8418 stream_.doConvertBuffer[mode] = false;
\r
8419 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8420 stream_.doConvertBuffer[mode] = true;
\r
8421 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8422 stream_.doConvertBuffer[mode] = true;
\r
8424 // Allocate necessary internal buffers.
\r
8425 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8426 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8427 if ( stream_.userBuffer[mode] == NULL ) {
\r
8428 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8431 stream_.bufferSize = *bufferSize;
\r
8433 if ( stream_.doConvertBuffer[mode] ) {
\r
8435 bool makeBuffer = true;
\r
8436 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8437 if ( mode == INPUT ) {
\r
8438 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8439 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8440 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8444 if ( makeBuffer ) {
\r
8445 bufferBytes *= *bufferSize;
\r
8446 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8447 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8448 if ( stream_.deviceBuffer == NULL ) {
\r
8449 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8455 stream_.device[mode] = device;
\r
8457 // Setup the buffer conversion information structure.
\r
8458 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8460 if ( !stream_.apiHandle ) {
\r
8461 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8463 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8467 stream_.apiHandle = pah;
\r
8468 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8469 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8473 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8476 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8479 pa_buffer_attr buffer_attr;
\r
8480 buffer_attr.fragsize = bufferBytes;
\r
8481 buffer_attr.maxlength = -1;
\r
8483 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8484 if ( !pah->s_rec ) {
\r
8485 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8490 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8491 if ( !pah->s_play ) {
\r
8492 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8500 if ( stream_.mode == UNINITIALIZED )
\r
8501 stream_.mode = mode;
\r
8502 else if ( stream_.mode == mode )
\r
8505 stream_.mode = DUPLEX;
\r
8507 if ( !stream_.callbackInfo.isRunning ) {
\r
8508 stream_.callbackInfo.object = this;
\r
8509 stream_.callbackInfo.isRunning = true;
\r
8510 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8511 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8516 stream_.state = STREAM_STOPPED;
\r
8520 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8521 pthread_cond_destroy( &pah->runnable_cv );
\r
8523 stream_.apiHandle = 0;
\r
8526 for ( int i=0; i<2; i++ ) {
\r
8527 if ( stream_.userBuffer[i] ) {
\r
8528 free( stream_.userBuffer[i] );
\r
8529 stream_.userBuffer[i] = 0;
\r
8533 if ( stream_.deviceBuffer ) {
\r
8534 free( stream_.deviceBuffer );
\r
8535 stream_.deviceBuffer = 0;
\r
8541 //******************** End of __LINUX_PULSE__ *********************//
\r
8544 #if defined(__LINUX_OSS__)
\r
8546 #include <unistd.h>
\r
8547 #include <sys/ioctl.h>
\r
8548 #include <unistd.h>
\r
8549 #include <fcntl.h>
\r
8550 #include <sys/soundcard.h>
\r
8551 #include <errno.h>
\r
8554 static void *ossCallbackHandler(void * ptr);
\r
8556 // A structure to hold various information related to the OSS API
\r
8557 // implementation.
\r
8558 struct OssHandle {
\r
8559 int id[2]; // device ids
\r
8562 pthread_cond_t runnable;
\r
8565 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8568 RtApiOss :: RtApiOss()
\r
8570 // Nothing to do here.
\r
8573 RtApiOss :: ~RtApiOss()
\r
8575 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8578 unsigned int RtApiOss :: getDeviceCount( void )
\r
8580 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8581 if ( mixerfd == -1 ) {
\r
8582 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8583 error( RtAudioError::WARNING );
\r
8587 oss_sysinfo sysinfo;
\r
8588 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8590 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8591 error( RtAudioError::WARNING );
\r
8596 return sysinfo.numaudios;
\r
8599 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8601 RtAudio::DeviceInfo info;
\r
8602 info.probed = false;
\r
8604 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8605 if ( mixerfd == -1 ) {
\r
8606 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8607 error( RtAudioError::WARNING );
\r
8611 oss_sysinfo sysinfo;
\r
8612 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8613 if ( result == -1 ) {
\r
8615 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8616 error( RtAudioError::WARNING );
\r
8620 unsigned nDevices = sysinfo.numaudios;
\r
8621 if ( nDevices == 0 ) {
\r
8623 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8624 error( RtAudioError::INVALID_USE );
\r
8628 if ( device >= nDevices ) {
\r
8630 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8631 error( RtAudioError::INVALID_USE );
\r
8635 oss_audioinfo ainfo;
\r
8636 ainfo.dev = device;
\r
8637 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8639 if ( result == -1 ) {
\r
8640 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8641 errorText_ = errorStream_.str();
\r
8642 error( RtAudioError::WARNING );
\r
8647 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8648 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8649 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8650 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8651 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8654 // Probe data formats ... do for input
\r
8655 unsigned long mask = ainfo.iformats;
\r
8656 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8657 info.nativeFormats |= RTAUDIO_SINT16;
\r
8658 if ( mask & AFMT_S8 )
\r
8659 info.nativeFormats |= RTAUDIO_SINT8;
\r
8660 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8661 info.nativeFormats |= RTAUDIO_SINT32;
\r
8662 if ( mask & AFMT_FLOAT )
\r
8663 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8664 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8665 info.nativeFormats |= RTAUDIO_SINT24;
\r
8667 // Check that we have at least one supported format
\r
8668 if ( info.nativeFormats == 0 ) {
\r
8669 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8670 errorText_ = errorStream_.str();
\r
8671 error( RtAudioError::WARNING );
\r
8675 // Probe the supported sample rates.
\r
8676 info.sampleRates.clear();
\r
8677 if ( ainfo.nrates ) {
\r
8678 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8679 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8680 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8681 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8683 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8684 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8692 // Check min and max rate values;
\r
8693 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8694 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8695 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8697 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8698 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8703 if ( info.sampleRates.size() == 0 ) {
\r
8704 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8705 errorText_ = errorStream_.str();
\r
8706 error( RtAudioError::WARNING );
\r
8709 info.probed = true;
\r
8710 info.name = ainfo.name;
\r
8717 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8718 unsigned int firstChannel, unsigned int sampleRate,
\r
8719 RtAudioFormat format, unsigned int *bufferSize,
\r
8720 RtAudio::StreamOptions *options )
\r
8722 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8723 if ( mixerfd == -1 ) {
\r
8724 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8728 oss_sysinfo sysinfo;
\r
8729 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8730 if ( result == -1 ) {
\r
8732 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8736 unsigned nDevices = sysinfo.numaudios;
\r
8737 if ( nDevices == 0 ) {
\r
8738 // This should not happen because a check is made before this function is called.
\r
8740 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8744 if ( device >= nDevices ) {
\r
8745 // This should not happen because a check is made before this function is called.
\r
8747 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8751 oss_audioinfo ainfo;
\r
8752 ainfo.dev = device;
\r
8753 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8755 if ( result == -1 ) {
\r
8756 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8757 errorText_ = errorStream_.str();
\r
8761 // Check if device supports input or output
\r
8762 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8763 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8764 if ( mode == OUTPUT )
\r
8765 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8767 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8768 errorText_ = errorStream_.str();
\r
8773 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8774 if ( mode == OUTPUT )
\r
8775 flags |= O_WRONLY;
\r
8776 else { // mode == INPUT
\r
8777 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8778 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8779 close( handle->id[0] );
\r
8780 handle->id[0] = 0;
\r
8781 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8782 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8783 errorText_ = errorStream_.str();
\r
8786 // Check that the number previously set channels is the same.
\r
8787 if ( stream_.nUserChannels[0] != channels ) {
\r
8788 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8789 errorText_ = errorStream_.str();
\r
8795 flags |= O_RDONLY;
\r
8798 // Set exclusive access if specified.
\r
8799 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8801 // Try to open the device.
\r
8803 fd = open( ainfo.devnode, flags, 0 );
\r
8805 if ( errno == EBUSY )
\r
8806 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8808 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8809 errorText_ = errorStream_.str();
\r
8813 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8815 if ( flags | O_RDWR ) {
\r
8816 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8817 if ( result == -1) {
\r
8818 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8819 errorText_ = errorStream_.str();
\r
8825 // Check the device channel support.
\r
8826 stream_.nUserChannels[mode] = channels;
\r
8827 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8829 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8830 errorText_ = errorStream_.str();
\r
8834 // Set the number of channels.
\r
8835 int deviceChannels = channels + firstChannel;
\r
8836 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8837 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8839 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8840 errorText_ = errorStream_.str();
\r
8843 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8845 // Get the data format mask
\r
8847 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8848 if ( result == -1 ) {
\r
8850 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8851 errorText_ = errorStream_.str();
\r
8855 // Determine how to set the device format.
\r
8856 stream_.userFormat = format;
\r
8857 int deviceFormat = -1;
\r
8858 stream_.doByteSwap[mode] = false;
\r
8859 if ( format == RTAUDIO_SINT8 ) {
\r
8860 if ( mask & AFMT_S8 ) {
\r
8861 deviceFormat = AFMT_S8;
\r
8862 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8865 else if ( format == RTAUDIO_SINT16 ) {
\r
8866 if ( mask & AFMT_S16_NE ) {
\r
8867 deviceFormat = AFMT_S16_NE;
\r
8868 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8870 else if ( mask & AFMT_S16_OE ) {
\r
8871 deviceFormat = AFMT_S16_OE;
\r
8872 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8873 stream_.doByteSwap[mode] = true;
\r
8876 else if ( format == RTAUDIO_SINT24 ) {
\r
8877 if ( mask & AFMT_S24_NE ) {
\r
8878 deviceFormat = AFMT_S24_NE;
\r
8879 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8881 else if ( mask & AFMT_S24_OE ) {
\r
8882 deviceFormat = AFMT_S24_OE;
\r
8883 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8884 stream_.doByteSwap[mode] = true;
\r
8887 else if ( format == RTAUDIO_SINT32 ) {
\r
8888 if ( mask & AFMT_S32_NE ) {
\r
8889 deviceFormat = AFMT_S32_NE;
\r
8890 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8892 else if ( mask & AFMT_S32_OE ) {
\r
8893 deviceFormat = AFMT_S32_OE;
\r
8894 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8895 stream_.doByteSwap[mode] = true;
\r
8899 if ( deviceFormat == -1 ) {
\r
8900 // The user requested format is not natively supported by the device.
\r
8901 if ( mask & AFMT_S16_NE ) {
\r
8902 deviceFormat = AFMT_S16_NE;
\r
8903 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8905 else if ( mask & AFMT_S32_NE ) {
\r
8906 deviceFormat = AFMT_S32_NE;
\r
8907 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8909 else if ( mask & AFMT_S24_NE ) {
\r
8910 deviceFormat = AFMT_S24_NE;
\r
8911 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8913 else if ( mask & AFMT_S16_OE ) {
\r
8914 deviceFormat = AFMT_S16_OE;
\r
8915 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8916 stream_.doByteSwap[mode] = true;
\r
8918 else if ( mask & AFMT_S32_OE ) {
\r
8919 deviceFormat = AFMT_S32_OE;
\r
8920 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8921 stream_.doByteSwap[mode] = true;
\r
8923 else if ( mask & AFMT_S24_OE ) {
\r
8924 deviceFormat = AFMT_S24_OE;
\r
8925 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8926 stream_.doByteSwap[mode] = true;
\r
8928 else if ( mask & AFMT_S8) {
\r
8929 deviceFormat = AFMT_S8;
\r
8930 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8934 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8935 // This really shouldn't happen ...
\r
8937 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8938 errorText_ = errorStream_.str();
\r
8942 // Set the data format.
\r
8943 int temp = deviceFormat;
\r
8944 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8945 if ( result == -1 || deviceFormat != temp ) {
\r
8947 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8948 errorText_ = errorStream_.str();
\r
8952 // Attempt to set the buffer size. According to OSS, the minimum
\r
8953 // number of buffers is two. The supposed minimum buffer size is 16
\r
8954 // bytes, so that will be our lower bound. The argument to this
\r
8955 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8956 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8957 // We'll check the actual value used near the end of the setup
\r
8959 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8960 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8962 if ( options ) buffers = options->numberOfBuffers;
\r
8963 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8964 if ( buffers < 2 ) buffers = 3;
\r
8965 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8966 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8967 if ( result == -1 ) {
\r
8969 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8970 errorText_ = errorStream_.str();
\r
8973 stream_.nBuffers = buffers;
\r
8975 // Save buffer size (in sample frames).
\r
8976 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8977 stream_.bufferSize = *bufferSize;
\r
8979 // Set the sample rate.
\r
8980 int srate = sampleRate;
\r
8981 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8982 if ( result == -1 ) {
\r
8984 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8985 errorText_ = errorStream_.str();
\r
8989 // Verify the sample rate setup worked.
\r
8990 if ( abs( srate - sampleRate ) > 100 ) {
\r
8992 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8993 errorText_ = errorStream_.str();
\r
8996 stream_.sampleRate = sampleRate;
\r
8998 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8999 // We're doing duplex setup here.
\r
9000 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9001 stream_.nDeviceChannels[0] = deviceChannels;
\r
9004 // Set interleaving parameters.
\r
9005 stream_.userInterleaved = true;
\r
9006 stream_.deviceInterleaved[mode] = true;
\r
9007 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9008 stream_.userInterleaved = false;
\r
9010 // Set flags for buffer conversion
\r
9011 stream_.doConvertBuffer[mode] = false;
\r
9012 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9013 stream_.doConvertBuffer[mode] = true;
\r
9014 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9015 stream_.doConvertBuffer[mode] = true;
\r
9016 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9017 stream_.nUserChannels[mode] > 1 )
\r
9018 stream_.doConvertBuffer[mode] = true;
\r
9020 // Allocate the stream handles if necessary and then save.
\r
9021 if ( stream_.apiHandle == 0 ) {
\r
9023 handle = new OssHandle;
\r
9025 catch ( std::bad_alloc& ) {
\r
9026 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9030 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9031 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9035 stream_.apiHandle = (void *) handle;
\r
9038 handle = (OssHandle *) stream_.apiHandle;
\r
9040 handle->id[mode] = fd;
\r
9042 // Allocate necessary internal buffers.
\r
9043 unsigned long bufferBytes;
\r
9044 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9045 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9046 if ( stream_.userBuffer[mode] == NULL ) {
\r
9047 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9051 if ( stream_.doConvertBuffer[mode] ) {
\r
9053 bool makeBuffer = true;
\r
9054 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9055 if ( mode == INPUT ) {
\r
9056 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9057 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9058 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9062 if ( makeBuffer ) {
\r
9063 bufferBytes *= *bufferSize;
\r
9064 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9065 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9066 if ( stream_.deviceBuffer == NULL ) {
\r
9067 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9073 stream_.device[mode] = device;
\r
9074 stream_.state = STREAM_STOPPED;
\r
9076 // Setup the buffer conversion information structure.
\r
9077 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9079 // Setup thread if necessary.
\r
9080 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9081 // We had already set up an output stream.
\r
9082 stream_.mode = DUPLEX;
\r
9083 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9086 stream_.mode = mode;
\r
9088 // Setup callback thread.
\r
9089 stream_.callbackInfo.object = (void *) this;
\r
9091 // Set the thread attributes for joinable and realtime scheduling
\r
9092 // priority. The higher priority will only take affect if the
\r
9093 // program is run as root or suid.
\r
9094 pthread_attr_t attr;
\r
9095 pthread_attr_init( &attr );
\r
9096 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9097 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9098 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9099 struct sched_param param;
\r
9100 int priority = options->priority;
\r
9101 int min = sched_get_priority_min( SCHED_RR );
\r
9102 int max = sched_get_priority_max( SCHED_RR );
\r
9103 if ( priority < min ) priority = min;
\r
9104 else if ( priority > max ) priority = max;
\r
9105 param.sched_priority = priority;
\r
9106 pthread_attr_setschedparam( &attr, ¶m );
\r
9107 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9110 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9112 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9115 stream_.callbackInfo.isRunning = true;
\r
9116 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9117 pthread_attr_destroy( &attr );
\r
9119 stream_.callbackInfo.isRunning = false;
\r
9120 errorText_ = "RtApiOss::error creating callback thread!";
\r
9129 pthread_cond_destroy( &handle->runnable );
\r
9130 if ( handle->id[0] ) close( handle->id[0] );
\r
9131 if ( handle->id[1] ) close( handle->id[1] );
\r
9133 stream_.apiHandle = 0;
\r
9136 for ( int i=0; i<2; i++ ) {
\r
9137 if ( stream_.userBuffer[i] ) {
\r
9138 free( stream_.userBuffer[i] );
\r
9139 stream_.userBuffer[i] = 0;
\r
9143 if ( stream_.deviceBuffer ) {
\r
9144 free( stream_.deviceBuffer );
\r
9145 stream_.deviceBuffer = 0;
\r
9151 void RtApiOss :: closeStream()
\r
9153 if ( stream_.state == STREAM_CLOSED ) {
\r
9154 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9155 error( RtAudioError::WARNING );
\r
9159 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9160 stream_.callbackInfo.isRunning = false;
\r
9161 MUTEX_LOCK( &stream_.mutex );
\r
9162 if ( stream_.state == STREAM_STOPPED )
\r
9163 pthread_cond_signal( &handle->runnable );
\r
9164 MUTEX_UNLOCK( &stream_.mutex );
\r
9165 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9167 if ( stream_.state == STREAM_RUNNING ) {
\r
9168 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9169 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9171 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9172 stream_.state = STREAM_STOPPED;
\r
9176 pthread_cond_destroy( &handle->runnable );
\r
9177 if ( handle->id[0] ) close( handle->id[0] );
\r
9178 if ( handle->id[1] ) close( handle->id[1] );
\r
9180 stream_.apiHandle = 0;
\r
9183 for ( int i=0; i<2; i++ ) {
\r
9184 if ( stream_.userBuffer[i] ) {
\r
9185 free( stream_.userBuffer[i] );
\r
9186 stream_.userBuffer[i] = 0;
\r
9190 if ( stream_.deviceBuffer ) {
\r
9191 free( stream_.deviceBuffer );
\r
9192 stream_.deviceBuffer = 0;
\r
9195 stream_.mode = UNINITIALIZED;
\r
9196 stream_.state = STREAM_CLOSED;
\r
9199 void RtApiOss :: startStream()
\r
9202 if ( stream_.state == STREAM_RUNNING ) {
\r
9203 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9204 error( RtAudioError::WARNING );
\r
9208 MUTEX_LOCK( &stream_.mutex );
\r
9210 stream_.state = STREAM_RUNNING;
\r
9212 // No need to do anything else here ... OSS automatically starts
\r
9213 // when fed samples.
\r
9215 MUTEX_UNLOCK( &stream_.mutex );
\r
9217 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9218 pthread_cond_signal( &handle->runnable );
\r
9221 void RtApiOss :: stopStream()
\r
9224 if ( stream_.state == STREAM_STOPPED ) {
\r
9225 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9226 error( RtAudioError::WARNING );
\r
9230 MUTEX_LOCK( &stream_.mutex );
\r
9232 // The state might change while waiting on a mutex.
\r
9233 if ( stream_.state == STREAM_STOPPED ) {
\r
9234 MUTEX_UNLOCK( &stream_.mutex );
\r
9239 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9240 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9242 // Flush the output with zeros a few times.
\r
9245 RtAudioFormat format;
\r
9247 if ( stream_.doConvertBuffer[0] ) {
\r
9248 buffer = stream_.deviceBuffer;
\r
9249 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9250 format = stream_.deviceFormat[0];
\r
9253 buffer = stream_.userBuffer[0];
\r
9254 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9255 format = stream_.userFormat;
\r
9258 memset( buffer, 0, samples * formatBytes(format) );
\r
9259 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9260 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9261 if ( result == -1 ) {
\r
9262 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9263 error( RtAudioError::WARNING );
\r
9267 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9268 if ( result == -1 ) {
\r
9269 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9270 errorText_ = errorStream_.str();
\r
9273 handle->triggered = false;
\r
9276 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9277 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9278 if ( result == -1 ) {
\r
9279 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9280 errorText_ = errorStream_.str();
\r
9286 stream_.state = STREAM_STOPPED;
\r
9287 MUTEX_UNLOCK( &stream_.mutex );
\r
9289 if ( result != -1 ) return;
\r
9290 error( RtAudioError::SYSTEM_ERROR );
\r
9293 void RtApiOss :: abortStream()
\r
9296 if ( stream_.state == STREAM_STOPPED ) {
\r
9297 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9298 error( RtAudioError::WARNING );
\r
9302 MUTEX_LOCK( &stream_.mutex );
\r
9304 // The state might change while waiting on a mutex.
\r
9305 if ( stream_.state == STREAM_STOPPED ) {
\r
9306 MUTEX_UNLOCK( &stream_.mutex );
\r
9311 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9312 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9313 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9314 if ( result == -1 ) {
\r
9315 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9316 errorText_ = errorStream_.str();
\r
9319 handle->triggered = false;
\r
9322 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9323 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9324 if ( result == -1 ) {
\r
9325 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9326 errorText_ = errorStream_.str();
\r
9332 stream_.state = STREAM_STOPPED;
\r
9333 MUTEX_UNLOCK( &stream_.mutex );
\r
9335 if ( result != -1 ) return;
\r
9336 error( RtAudioError::SYSTEM_ERROR );
\r
9339 void RtApiOss :: callbackEvent()
\r
9341 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9342 if ( stream_.state == STREAM_STOPPED ) {
\r
9343 MUTEX_LOCK( &stream_.mutex );
\r
9344 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9345 if ( stream_.state != STREAM_RUNNING ) {
\r
9346 MUTEX_UNLOCK( &stream_.mutex );
\r
9349 MUTEX_UNLOCK( &stream_.mutex );
\r
9352 if ( stream_.state == STREAM_CLOSED ) {
\r
9353 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9354 error( RtAudioError::WARNING );
\r
9358 // Invoke user callback to get fresh output data.
\r
9359 int doStopStream = 0;
\r
9360 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9361 double streamTime = getStreamTime();
\r
9362 RtAudioStreamStatus status = 0;
\r
9363 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9364 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9365 handle->xrun[0] = false;
\r
9367 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9368 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9369 handle->xrun[1] = false;
\r
9371 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9372 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9373 if ( doStopStream == 2 ) {
\r
9374 this->abortStream();
\r
9378 MUTEX_LOCK( &stream_.mutex );
\r
9380 // The state might change while waiting on a mutex.
\r
9381 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9386 RtAudioFormat format;
\r
9388 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9390 // Setup parameters and do buffer conversion if necessary.
\r
9391 if ( stream_.doConvertBuffer[0] ) {
\r
9392 buffer = stream_.deviceBuffer;
\r
9393 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9394 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9395 format = stream_.deviceFormat[0];
\r
9398 buffer = stream_.userBuffer[0];
\r
9399 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9400 format = stream_.userFormat;
\r
9403 // Do byte swapping if necessary.
\r
9404 if ( stream_.doByteSwap[0] )
\r
9405 byteSwapBuffer( buffer, samples, format );
\r
9407 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9409 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9410 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9411 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9412 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9413 handle->triggered = true;
\r
9416 // Write samples to device.
\r
9417 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9419 if ( result == -1 ) {
\r
9420 // We'll assume this is an underrun, though there isn't a
\r
9421 // specific means for determining that.
\r
9422 handle->xrun[0] = true;
\r
9423 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9424 error( RtAudioError::WARNING );
\r
9425 // Continue on to input section.
\r
9429 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9431 // Setup parameters.
\r
9432 if ( stream_.doConvertBuffer[1] ) {
\r
9433 buffer = stream_.deviceBuffer;
\r
9434 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9435 format = stream_.deviceFormat[1];
\r
9438 buffer = stream_.userBuffer[1];
\r
9439 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9440 format = stream_.userFormat;
\r
9443 // Read samples from device.
\r
9444 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9446 if ( result == -1 ) {
\r
9447 // We'll assume this is an overrun, though there isn't a
\r
9448 // specific means for determining that.
\r
9449 handle->xrun[1] = true;
\r
9450 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9451 error( RtAudioError::WARNING );
\r
9455 // Do byte swapping if necessary.
\r
9456 if ( stream_.doByteSwap[1] )
\r
9457 byteSwapBuffer( buffer, samples, format );
\r
9459 // Do buffer conversion if necessary.
\r
9460 if ( stream_.doConvertBuffer[1] )
\r
9461 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9465 MUTEX_UNLOCK( &stream_.mutex );
\r
9467 RtApi::tickStreamTime();
\r
9468 if ( doStopStream == 1 ) this->stopStream();
\r
9471 static void *ossCallbackHandler( void *ptr )
\r
9473 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9474 RtApiOss *object = (RtApiOss *) info->object;
\r
9475 bool *isRunning = &info->isRunning;
\r
9477 while ( *isRunning == true ) {
\r
9478 pthread_testcancel();
\r
9479 object->callbackEvent();
\r
9482 pthread_exit( NULL );
\r
9485 //******************** End of __LINUX_OSS__ *********************//
\r
9489 // *************************************************** //
\r
9491 // Protected common (OS-independent) RtAudio methods.
\r
9493 // *************************************************** //
\r
9495 // This method can be modified to control the behavior of error
\r
9496 // message printing.
\r
9497 void RtApi :: error( RtAudioError::Type type )
\r
9499 errorStream_.str(""); // clear the ostringstream
\r
9501 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9502 if ( errorCallback ) {
\r
9503 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9505 if ( firstErrorOccurred_ )
\r
9508 firstErrorOccurred_ = true;
\r
9509 const std::string errorMessage = errorText_;
\r
9511 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9512 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9516 errorCallback( type, errorMessage );
\r
9517 firstErrorOccurred_ = false;
\r
9521 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9522 std::cerr << '\n' << errorText_ << "\n\n";
\r
9523 else if ( type != RtAudioError::WARNING )
\r
9524 throw( RtAudioError( errorText_, type ) );
\r
9527 void RtApi :: verifyStream()
\r
9529 if ( stream_.state == STREAM_CLOSED ) {
\r
9530 errorText_ = "RtApi:: a stream is not open!";
\r
9531 error( RtAudioError::INVALID_USE );
\r
9535 void RtApi :: clearStreamInfo()
\r
9537 stream_.mode = UNINITIALIZED;
\r
9538 stream_.state = STREAM_CLOSED;
\r
9539 stream_.sampleRate = 0;
\r
9540 stream_.bufferSize = 0;
\r
9541 stream_.nBuffers = 0;
\r
9542 stream_.userFormat = 0;
\r
9543 stream_.userInterleaved = true;
\r
9544 stream_.streamTime = 0.0;
\r
9545 stream_.apiHandle = 0;
\r
9546 stream_.deviceBuffer = 0;
\r
9547 stream_.callbackInfo.callback = 0;
\r
9548 stream_.callbackInfo.userData = 0;
\r
9549 stream_.callbackInfo.isRunning = false;
\r
9550 stream_.callbackInfo.errorCallback = 0;
\r
9551 for ( int i=0; i<2; i++ ) {
\r
9552 stream_.device[i] = 11111;
\r
9553 stream_.doConvertBuffer[i] = false;
\r
9554 stream_.deviceInterleaved[i] = true;
\r
9555 stream_.doByteSwap[i] = false;
\r
9556 stream_.nUserChannels[i] = 0;
\r
9557 stream_.nDeviceChannels[i] = 0;
\r
9558 stream_.channelOffset[i] = 0;
\r
9559 stream_.deviceFormat[i] = 0;
\r
9560 stream_.latency[i] = 0;
\r
9561 stream_.userBuffer[i] = 0;
\r
9562 stream_.convertInfo[i].channels = 0;
\r
9563 stream_.convertInfo[i].inJump = 0;
\r
9564 stream_.convertInfo[i].outJump = 0;
\r
9565 stream_.convertInfo[i].inFormat = 0;
\r
9566 stream_.convertInfo[i].outFormat = 0;
\r
9567 stream_.convertInfo[i].inOffset.clear();
\r
9568 stream_.convertInfo[i].outOffset.clear();
\r
9572 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9574 if ( format == RTAUDIO_SINT16 )
\r
9576 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9578 else if ( format == RTAUDIO_FLOAT64 )
\r
9580 else if ( format == RTAUDIO_SINT24 )
\r
9582 else if ( format == RTAUDIO_SINT8 )
\r
9585 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9586 error( RtAudioError::WARNING );
\r
9591 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9593 if ( mode == INPUT ) { // convert device to user buffer
\r
9594 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9595 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9596 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9597 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9599 else { // convert user to device buffer
\r
9600 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9601 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9602 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9603 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9606 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9607 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9609 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9611 // Set up the interleave/deinterleave offsets.
\r
9612 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9613 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9614 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9615 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9616 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9617 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9618 stream_.convertInfo[mode].inJump = 1;
\r
9622 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9623 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9624 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9625 stream_.convertInfo[mode].outJump = 1;
\r
9629 else { // no (de)interleaving
\r
9630 if ( stream_.userInterleaved ) {
\r
9631 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9632 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9633 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9637 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9638 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9639 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9640 stream_.convertInfo[mode].inJump = 1;
\r
9641 stream_.convertInfo[mode].outJump = 1;
\r
9646 // Add channel offset.
\r
9647 if ( firstChannel > 0 ) {
\r
9648 if ( stream_.deviceInterleaved[mode] ) {
\r
9649 if ( mode == OUTPUT ) {
\r
9650 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9651 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9654 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9655 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9659 if ( mode == OUTPUT ) {
\r
9660 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9661 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9664 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9665 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9671 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9673 // This function does format conversion, input/output channel compensation, and
\r
9674 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9675 // the lower three bytes of a 32-bit integer.
\r
9677 // Clear our device buffer when in/out duplex device channels are different
\r
9678 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9679 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9680 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9683 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9685 Float64 *out = (Float64 *)outBuffer;
\r
9687 if (info.inFormat == RTAUDIO_SINT8) {
\r
9688 signed char *in = (signed char *)inBuffer;
\r
9689 scale = 1.0 / 127.5;
\r
9690 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9691 for (j=0; j<info.channels; j++) {
\r
9692 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9693 out[info.outOffset[j]] += 0.5;
\r
9694 out[info.outOffset[j]] *= scale;
\r
9696 in += info.inJump;
\r
9697 out += info.outJump;
\r
9700 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9701 Int16 *in = (Int16 *)inBuffer;
\r
9702 scale = 1.0 / 32767.5;
\r
9703 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9704 for (j=0; j<info.channels; j++) {
\r
9705 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9706 out[info.outOffset[j]] += 0.5;
\r
9707 out[info.outOffset[j]] *= scale;
\r
9709 in += info.inJump;
\r
9710 out += info.outJump;
\r
9713 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9714 Int24 *in = (Int24 *)inBuffer;
\r
9715 scale = 1.0 / 8388607.5;
\r
9716 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9717 for (j=0; j<info.channels; j++) {
\r
9718 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9719 out[info.outOffset[j]] += 0.5;
\r
9720 out[info.outOffset[j]] *= scale;
\r
9722 in += info.inJump;
\r
9723 out += info.outJump;
\r
9726 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9727 Int32 *in = (Int32 *)inBuffer;
\r
9728 scale = 1.0 / 2147483647.5;
\r
9729 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9730 for (j=0; j<info.channels; j++) {
\r
9731 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9732 out[info.outOffset[j]] += 0.5;
\r
9733 out[info.outOffset[j]] *= scale;
\r
9735 in += info.inJump;
\r
9736 out += info.outJump;
\r
9739 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9740 Float32 *in = (Float32 *)inBuffer;
\r
9741 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9742 for (j=0; j<info.channels; j++) {
\r
9743 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9745 in += info.inJump;
\r
9746 out += info.outJump;
\r
9749 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9750 // Channel compensation and/or (de)interleaving only.
\r
9751 Float64 *in = (Float64 *)inBuffer;
\r
9752 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9753 for (j=0; j<info.channels; j++) {
\r
9754 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9756 in += info.inJump;
\r
9757 out += info.outJump;
\r
9761 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9763 Float32 *out = (Float32 *)outBuffer;
\r
9765 if (info.inFormat == RTAUDIO_SINT8) {
\r
9766 signed char *in = (signed char *)inBuffer;
\r
9767 scale = (Float32) ( 1.0 / 127.5 );
\r
9768 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9769 for (j=0; j<info.channels; j++) {
\r
9770 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9771 out[info.outOffset[j]] += 0.5;
\r
9772 out[info.outOffset[j]] *= scale;
\r
9774 in += info.inJump;
\r
9775 out += info.outJump;
\r
9778 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9779 Int16 *in = (Int16 *)inBuffer;
\r
9780 scale = (Float32) ( 1.0 / 32767.5 );
\r
9781 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9782 for (j=0; j<info.channels; j++) {
\r
9783 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9784 out[info.outOffset[j]] += 0.5;
\r
9785 out[info.outOffset[j]] *= scale;
\r
9787 in += info.inJump;
\r
9788 out += info.outJump;
\r
9791 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9792 Int24 *in = (Int24 *)inBuffer;
\r
9793 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9794 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9795 for (j=0; j<info.channels; j++) {
\r
9796 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9797 out[info.outOffset[j]] += 0.5;
\r
9798 out[info.outOffset[j]] *= scale;
\r
9800 in += info.inJump;
\r
9801 out += info.outJump;
\r
9804 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9805 Int32 *in = (Int32 *)inBuffer;
\r
9806 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9807 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9808 for (j=0; j<info.channels; j++) {
\r
9809 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9810 out[info.outOffset[j]] += 0.5;
\r
9811 out[info.outOffset[j]] *= scale;
\r
9813 in += info.inJump;
\r
9814 out += info.outJump;
\r
9817 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9818 // Channel compensation and/or (de)interleaving only.
\r
9819 Float32 *in = (Float32 *)inBuffer;
\r
9820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9821 for (j=0; j<info.channels; j++) {
\r
9822 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9824 in += info.inJump;
\r
9825 out += info.outJump;
\r
9828 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9829 Float64 *in = (Float64 *)inBuffer;
\r
9830 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9831 for (j=0; j<info.channels; j++) {
\r
9832 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9834 in += info.inJump;
\r
9835 out += info.outJump;
\r
9839 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9840 Int32 *out = (Int32 *)outBuffer;
\r
9841 if (info.inFormat == RTAUDIO_SINT8) {
\r
9842 signed char *in = (signed char *)inBuffer;
\r
9843 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9844 for (j=0; j<info.channels; j++) {
\r
9845 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9846 out[info.outOffset[j]] <<= 24;
\r
9848 in += info.inJump;
\r
9849 out += info.outJump;
\r
9852 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9853 Int16 *in = (Int16 *)inBuffer;
\r
9854 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9855 for (j=0; j<info.channels; j++) {
\r
9856 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9857 out[info.outOffset[j]] <<= 16;
\r
9859 in += info.inJump;
\r
9860 out += info.outJump;
\r
9863 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9864 Int24 *in = (Int24 *)inBuffer;
\r
9865 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9866 for (j=0; j<info.channels; j++) {
\r
9867 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9868 out[info.outOffset[j]] <<= 8;
\r
9870 in += info.inJump;
\r
9871 out += info.outJump;
\r
9874 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9875 // Channel compensation and/or (de)interleaving only.
\r
9876 Int32 *in = (Int32 *)inBuffer;
\r
9877 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9878 for (j=0; j<info.channels; j++) {
\r
9879 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9881 in += info.inJump;
\r
9882 out += info.outJump;
\r
9885 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9886 Float32 *in = (Float32 *)inBuffer;
\r
9887 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9888 for (j=0; j<info.channels; j++) {
\r
9889 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9891 in += info.inJump;
\r
9892 out += info.outJump;
\r
9895 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9896 Float64 *in = (Float64 *)inBuffer;
\r
9897 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9898 for (j=0; j<info.channels; j++) {
\r
9899 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9901 in += info.inJump;
\r
9902 out += info.outJump;
\r
9906 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9907 Int24 *out = (Int24 *)outBuffer;
\r
9908 if (info.inFormat == RTAUDIO_SINT8) {
\r
9909 signed char *in = (signed char *)inBuffer;
\r
9910 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9911 for (j=0; j<info.channels; j++) {
\r
9912 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9913 //out[info.outOffset[j]] <<= 16;
\r
9915 in += info.inJump;
\r
9916 out += info.outJump;
\r
9919 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9920 Int16 *in = (Int16 *)inBuffer;
\r
9921 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9922 for (j=0; j<info.channels; j++) {
\r
9923 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9924 //out[info.outOffset[j]] <<= 8;
\r
9926 in += info.inJump;
\r
9927 out += info.outJump;
\r
9930 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9931 // Channel compensation and/or (de)interleaving only.
\r
9932 Int24 *in = (Int24 *)inBuffer;
\r
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9934 for (j=0; j<info.channels; j++) {
\r
9935 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9937 in += info.inJump;
\r
9938 out += info.outJump;
\r
9941 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9942 Int32 *in = (Int32 *)inBuffer;
\r
9943 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9944 for (j=0; j<info.channels; j++) {
\r
9945 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9946 //out[info.outOffset[j]] >>= 8;
\r
9948 in += info.inJump;
\r
9949 out += info.outJump;
\r
9952 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9953 Float32 *in = (Float32 *)inBuffer;
\r
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9955 for (j=0; j<info.channels; j++) {
\r
9956 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9958 in += info.inJump;
\r
9959 out += info.outJump;
\r
9962 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9963 Float64 *in = (Float64 *)inBuffer;
\r
9964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9965 for (j=0; j<info.channels; j++) {
\r
9966 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9968 in += info.inJump;
\r
9969 out += info.outJump;
\r
9973 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9974 Int16 *out = (Int16 *)outBuffer;
\r
9975 if (info.inFormat == RTAUDIO_SINT8) {
\r
9976 signed char *in = (signed char *)inBuffer;
\r
9977 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9978 for (j=0; j<info.channels; j++) {
\r
9979 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9980 out[info.outOffset[j]] <<= 8;
\r
9982 in += info.inJump;
\r
9983 out += info.outJump;
\r
9986 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9987 // Channel compensation and/or (de)interleaving only.
\r
9988 Int16 *in = (Int16 *)inBuffer;
\r
9989 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9990 for (j=0; j<info.channels; j++) {
\r
9991 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9993 in += info.inJump;
\r
9994 out += info.outJump;
\r
9997 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9998 Int24 *in = (Int24 *)inBuffer;
\r
9999 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10000 for (j=0; j<info.channels; j++) {
\r
10001 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10003 in += info.inJump;
\r
10004 out += info.outJump;
\r
10007 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10008 Int32 *in = (Int32 *)inBuffer;
\r
10009 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10010 for (j=0; j<info.channels; j++) {
\r
10011 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10013 in += info.inJump;
\r
10014 out += info.outJump;
\r
10017 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10018 Float32 *in = (Float32 *)inBuffer;
\r
10019 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10020 for (j=0; j<info.channels; j++) {
\r
10021 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10023 in += info.inJump;
\r
10024 out += info.outJump;
\r
10027 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10028 Float64 *in = (Float64 *)inBuffer;
\r
10029 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10030 for (j=0; j<info.channels; j++) {
\r
10031 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10033 in += info.inJump;
\r
10034 out += info.outJump;
\r
10038 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10039 signed char *out = (signed char *)outBuffer;
\r
10040 if (info.inFormat == RTAUDIO_SINT8) {
\r
10041 // Channel compensation and/or (de)interleaving only.
\r
10042 signed char *in = (signed char *)inBuffer;
\r
10043 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10044 for (j=0; j<info.channels; j++) {
\r
10045 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10047 in += info.inJump;
\r
10048 out += info.outJump;
\r
10051 if (info.inFormat == RTAUDIO_SINT16) {
\r
10052 Int16 *in = (Int16 *)inBuffer;
\r
10053 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10054 for (j=0; j<info.channels; j++) {
\r
10055 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10057 in += info.inJump;
\r
10058 out += info.outJump;
\r
10061 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10062 Int24 *in = (Int24 *)inBuffer;
\r
10063 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10064 for (j=0; j<info.channels; j++) {
\r
10065 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10067 in += info.inJump;
\r
10068 out += info.outJump;
\r
10071 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10072 Int32 *in = (Int32 *)inBuffer;
\r
10073 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10074 for (j=0; j<info.channels; j++) {
\r
10075 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10077 in += info.inJump;
\r
10078 out += info.outJump;
\r
10081 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10082 Float32 *in = (Float32 *)inBuffer;
\r
10083 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10084 for (j=0; j<info.channels; j++) {
\r
10085 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10087 in += info.inJump;
\r
10088 out += info.outJump;
\r
10091 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10092 Float64 *in = (Float64 *)inBuffer;
\r
10093 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10094 for (j=0; j<info.channels; j++) {
\r
10095 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10097 in += info.inJump;
\r
10098 out += info.outJump;
\r
10104 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10105 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10106 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10108 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10110 register char val;
\r
10111 register char *ptr;
\r
10114 if ( format == RTAUDIO_SINT16 ) {
\r
10115 for ( unsigned int i=0; i<samples; i++ ) {
\r
10116 // Swap 1st and 2nd bytes.
\r
10118 *(ptr) = *(ptr+1);
\r
10121 // Increment 2 bytes.
\r
10125 else if ( format == RTAUDIO_SINT32 ||
\r
10126 format == RTAUDIO_FLOAT32 ) {
\r
10127 for ( unsigned int i=0; i<samples; i++ ) {
\r
10128 // Swap 1st and 4th bytes.
\r
10130 *(ptr) = *(ptr+3);
\r
10133 // Swap 2nd and 3rd bytes.
\r
10136 *(ptr) = *(ptr+1);
\r
10139 // Increment 3 more bytes.
\r
10143 else if ( format == RTAUDIO_SINT24 ) {
\r
10144 for ( unsigned int i=0; i<samples; i++ ) {
\r
10145 // Swap 1st and 3rd bytes.
\r
10147 *(ptr) = *(ptr+2);
\r
10150 // Increment 2 more bytes.
\r
10154 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10155 for ( unsigned int i=0; i<samples; i++ ) {
\r
10156 // Swap 1st and 8th bytes
\r
10158 *(ptr) = *(ptr+7);
\r
10161 // Swap 2nd and 7th bytes
\r
10164 *(ptr) = *(ptr+5);
\r
10167 // Swap 3rd and 6th bytes
\r
10170 *(ptr) = *(ptr+3);
\r
10173 // Swap 4th and 5th bytes
\r
10176 *(ptr) = *(ptr+1);
\r
10179 // Increment 5 more bytes.
\r
10185 // Indentation settings for Vim and Emacs
\r
10187 // Local Variables:
\r
10188 // c-basic-offset: 2
\r
10189 // indent-tabs-mode: nil
\r
10192 // vim: et sts=2 sw=2
\r