1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1409 if ( stream_.state == STREAM_RUNNING )
\r
1410 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1412 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1414 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1415 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1419 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1420 if ( stream_.state == STREAM_RUNNING )
\r
1421 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1422 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1423 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1425 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1426 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1430 for ( int i=0; i<2; i++ ) {
\r
1431 if ( stream_.userBuffer[i] ) {
\r
1432 free( stream_.userBuffer[i] );
\r
1433 stream_.userBuffer[i] = 0;
\r
1437 if ( stream_.deviceBuffer ) {
\r
1438 free( stream_.deviceBuffer );
\r
1439 stream_.deviceBuffer = 0;
\r
1442 // Destroy pthread condition variable.
\r
1443 pthread_cond_destroy( &handle->condition );
\r
1445 stream_.apiHandle = 0;
\r
1447 stream_.mode = UNINITIALIZED;
\r
1448 stream_.state = STREAM_CLOSED;
\r
1451 void RtApiCore :: startStream( void )
\r
1454 if ( stream_.state == STREAM_RUNNING ) {
\r
1455 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1456 error( RtAudioError::WARNING );
\r
1460 OSStatus result = noErr;
\r
1461 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1462 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1464 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1465 if ( result != noErr ) {
\r
1466 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1467 errorText_ = errorStream_.str();
\r
1472 if ( stream_.mode == INPUT ||
\r
1473 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1475 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1476 if ( result != noErr ) {
\r
1477 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1478 errorText_ = errorStream_.str();
\r
1483 handle->drainCounter = 0;
\r
1484 handle->internalDrain = false;
\r
1485 stream_.state = STREAM_RUNNING;
\r
1488 if ( result == noErr ) return;
\r
1489 error( RtAudioError::SYSTEM_ERROR );
\r
1492 void RtApiCore :: stopStream( void )
\r
1495 if ( stream_.state == STREAM_STOPPED ) {
\r
1496 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1497 error( RtAudioError::WARNING );
\r
1501 OSStatus result = noErr;
\r
1502 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1503 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1505 if ( handle->drainCounter == 0 ) {
\r
1506 handle->drainCounter = 2;
\r
1507 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1510 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1511 if ( result != noErr ) {
\r
1512 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1513 errorText_ = errorStream_.str();
\r
1518 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1520 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1521 if ( result != noErr ) {
\r
1522 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1523 errorText_ = errorStream_.str();
\r
1528 stream_.state = STREAM_STOPPED;
\r
1531 if ( result == noErr ) return;
\r
1532 error( RtAudioError::SYSTEM_ERROR );
\r
1535 void RtApiCore :: abortStream( void )
\r
1538 if ( stream_.state == STREAM_STOPPED ) {
\r
1539 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1540 error( RtAudioError::WARNING );
\r
1544 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1545 handle->drainCounter = 2;
\r
1550 // This function will be called by a spawned thread when the user
\r
1551 // callback function signals that the stream should be stopped or
\r
1552 // aborted. It is better to handle it this way because the
\r
1553 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1554 // function is called.
\r
1555 static void *coreStopStream( void *ptr )
\r
1557 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1558 RtApiCore *object = (RtApiCore *) info->object;
\r
1560 object->stopStream();
\r
1561 pthread_exit( NULL );
\r
1564 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1565 const AudioBufferList *inBufferList,
\r
1566 const AudioBufferList *outBufferList )
\r
1568 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1569 if ( stream_.state == STREAM_CLOSED ) {
\r
1570 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1571 error( RtAudioError::WARNING );
\r
1575 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1576 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1578 // Check if we were draining the stream and signal is finished.
\r
1579 if ( handle->drainCounter > 3 ) {
\r
1580 ThreadHandle threadId;
\r
1582 stream_.state = STREAM_STOPPING;
\r
1583 if ( handle->internalDrain == true )
\r
1584 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1585 else // external call to stopStream()
\r
1586 pthread_cond_signal( &handle->condition );
\r
1590 AudioDeviceID outputDevice = handle->id[0];
\r
1592 // Invoke user callback to get fresh output data UNLESS we are
\r
1593 // draining stream or duplex mode AND the input/output devices are
\r
1594 // different AND this function is called for the input device.
\r
1595 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1596 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1597 double streamTime = getStreamTime();
\r
1598 RtAudioStreamStatus status = 0;
\r
1599 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1600 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1601 handle->xrun[0] = false;
\r
1603 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1604 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1605 handle->xrun[1] = false;
\r
1608 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1609 stream_.bufferSize, streamTime, status, info->userData );
\r
1610 if ( cbReturnValue == 2 ) {
\r
1611 stream_.state = STREAM_STOPPING;
\r
1612 handle->drainCounter = 2;
\r
1616 else if ( cbReturnValue == 1 ) {
\r
1617 handle->drainCounter = 1;
\r
1618 handle->internalDrain = true;
\r
1622 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1624 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1626 if ( handle->nStreams[0] == 1 ) {
\r
1627 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1629 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1631 else { // fill multiple streams with zeros
\r
1632 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1633 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1635 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1639 else if ( handle->nStreams[0] == 1 ) {
\r
1640 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1641 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1642 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1644 else { // copy from user buffer
\r
1645 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1646 stream_.userBuffer[0],
\r
1647 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1650 else { // fill multiple streams
\r
1651 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1652 if ( stream_.doConvertBuffer[0] ) {
\r
1653 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1654 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1657 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1658 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1659 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1660 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1661 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1664 else { // fill multiple multi-channel streams with interleaved data
\r
1665 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1666 Float32 *out, *in;
\r
1668 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1669 UInt32 inChannels = stream_.nUserChannels[0];
\r
1670 if ( stream_.doConvertBuffer[0] ) {
\r
1671 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1672 inChannels = stream_.nDeviceChannels[0];
\r
1675 if ( inInterleaved ) inOffset = 1;
\r
1676 else inOffset = stream_.bufferSize;
\r
1678 channelsLeft = inChannels;
\r
1679 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1681 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1682 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1685 // Account for possible channel offset in first stream
\r
1686 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1687 streamChannels -= stream_.channelOffset[0];
\r
1688 outJump = stream_.channelOffset[0];
\r
1692 // Account for possible unfilled channels at end of the last stream
\r
1693 if ( streamChannels > channelsLeft ) {
\r
1694 outJump = streamChannels - channelsLeft;
\r
1695 streamChannels = channelsLeft;
\r
1698 // Determine input buffer offsets and skips
\r
1699 if ( inInterleaved ) {
\r
1700 inJump = inChannels;
\r
1701 in += inChannels - channelsLeft;
\r
1705 in += (inChannels - channelsLeft) * inOffset;
\r
1708 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1709 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1710 *out++ = in[j*inOffset];
\r
1715 channelsLeft -= streamChannels;
\r
1721 // Don't bother draining input
\r
1722 if ( handle->drainCounter ) {
\r
1723 handle->drainCounter++;
\r
1727 AudioDeviceID inputDevice;
\r
1728 inputDevice = handle->id[1];
\r
1729 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1731 if ( handle->nStreams[1] == 1 ) {
\r
1732 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1733 convertBuffer( stream_.userBuffer[1],
\r
1734 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1735 stream_.convertInfo[1] );
\r
1737 else { // copy to user buffer
\r
1738 memcpy( stream_.userBuffer[1],
\r
1739 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1740 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1743 else { // read from multiple streams
\r
1744 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1745 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1747 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1748 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1749 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1750 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1751 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1754 else { // read from multiple multi-channel streams
\r
1755 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1756 Float32 *out, *in;
\r
1758 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1759 UInt32 outChannels = stream_.nUserChannels[1];
\r
1760 if ( stream_.doConvertBuffer[1] ) {
\r
1761 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1762 outChannels = stream_.nDeviceChannels[1];
\r
1765 if ( outInterleaved ) outOffset = 1;
\r
1766 else outOffset = stream_.bufferSize;
\r
1768 channelsLeft = outChannels;
\r
1769 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1771 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1772 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1775 // Account for possible channel offset in first stream
\r
1776 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1777 streamChannels -= stream_.channelOffset[1];
\r
1778 inJump = stream_.channelOffset[1];
\r
1782 // Account for possible unread channels at end of the last stream
\r
1783 if ( streamChannels > channelsLeft ) {
\r
1784 inJump = streamChannels - channelsLeft;
\r
1785 streamChannels = channelsLeft;
\r
1788 // Determine output buffer offsets and skips
\r
1789 if ( outInterleaved ) {
\r
1790 outJump = outChannels;
\r
1791 out += outChannels - channelsLeft;
\r
1795 out += (outChannels - channelsLeft) * outOffset;
\r
1798 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1799 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1800 out[j*outOffset] = *in++;
\r
1805 channelsLeft -= streamChannels;
\r
1809 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1810 convertBuffer( stream_.userBuffer[1],
\r
1811 stream_.deviceBuffer,
\r
1812 stream_.convertInfo[1] );
\r
1818 //MUTEX_UNLOCK( &stream_.mutex );
\r
1820 RtApi::tickStreamTime();
\r
1824 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1828 case kAudioHardwareNotRunningError:
\r
1829 return "kAudioHardwareNotRunningError";
\r
1831 case kAudioHardwareUnspecifiedError:
\r
1832 return "kAudioHardwareUnspecifiedError";
\r
1834 case kAudioHardwareUnknownPropertyError:
\r
1835 return "kAudioHardwareUnknownPropertyError";
\r
1837 case kAudioHardwareBadPropertySizeError:
\r
1838 return "kAudioHardwareBadPropertySizeError";
\r
1840 case kAudioHardwareIllegalOperationError:
\r
1841 return "kAudioHardwareIllegalOperationError";
\r
1843 case kAudioHardwareBadObjectError:
\r
1844 return "kAudioHardwareBadObjectError";
\r
1846 case kAudioHardwareBadDeviceError:
\r
1847 return "kAudioHardwareBadDeviceError";
\r
1849 case kAudioHardwareBadStreamError:
\r
1850 return "kAudioHardwareBadStreamError";
\r
1852 case kAudioHardwareUnsupportedOperationError:
\r
1853 return "kAudioHardwareUnsupportedOperationError";
\r
1855 case kAudioDeviceUnsupportedFormatError:
\r
1856 return "kAudioDeviceUnsupportedFormatError";
\r
1858 case kAudioDevicePermissionsError:
\r
1859 return "kAudioDevicePermissionsError";
\r
1862 return "CoreAudio unknown error";
\r
1866 //******************** End of __MACOSX_CORE__ *********************//
\r
1869 #if defined(__UNIX_JACK__)
\r
1871 // JACK is a low-latency audio server, originally written for the
\r
1872 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1873 // connect a number of different applications to an audio device, as
\r
1874 // well as allowing them to share audio between themselves.
\r
1876 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1877 // have ports connected to the server. The JACK server is typically
\r
1878 // started in a terminal as follows:
\r
1880 // .jackd -d alsa -d hw:0
\r
1882 // or through an interface program such as qjackctl. Many of the
\r
1883 // parameters normally set for a stream are fixed by the JACK server
\r
1884 // and can be specified when the JACK server is started. In
\r
1887 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1889 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1890 // frames, and number of buffers = 4. Once the server is running, it
\r
1891 // is not possible to override these values. If the values are not
\r
1892 // specified in the command-line, the JACK server uses default values.
\r
1894 // The JACK server does not have to be running when an instance of
\r
1895 // RtApiJack is created, though the function getDeviceCount() will
\r
1896 // report 0 devices found until JACK has been started. When no
\r
1897 // devices are available (i.e., the JACK server is not running), a
\r
1898 // stream cannot be opened.
\r
1900 #include <jack/jack.h>
\r
1901 #include <unistd.h>
\r
1904 // A structure to hold various information related to the Jack API
\r
1905 // implementation.
\r
1906 struct JackHandle {
\r
1907 jack_client_t *client;
\r
1908 jack_port_t **ports[2];
\r
1909 std::string deviceName[2];
\r
1911 pthread_cond_t condition;
\r
1912 int drainCounter; // Tracks callback counts when draining
\r
1913 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1916 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1919 static void jackSilentError( const char * ) {};
\r
1921 RtApiJack :: RtApiJack()
\r
1923 // Nothing to do here.
\r
1924 #if !defined(__RTAUDIO_DEBUG__)
\r
1925 // Turn off Jack's internal error reporting.
\r
1926 jack_set_error_function( &jackSilentError );
\r
1930 RtApiJack :: ~RtApiJack()
\r
1932 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1935 unsigned int RtApiJack :: getDeviceCount( void )
\r
1937 // See if we can become a jack client.
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1941 if ( client == 0 ) return 0;
\r
1943 const char **ports;
\r
1944 std::string port, previousPort;
\r
1945 unsigned int nChannels = 0, nDevices = 0;
\r
1946 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1948 // Parse the port names up to the first colon (:).
\r
1949 size_t iColon = 0;
\r
1951 port = (char *) ports[ nChannels ];
\r
1952 iColon = port.find(":");
\r
1953 if ( iColon != std::string::npos ) {
\r
1954 port = port.substr( 0, iColon + 1 );
\r
1955 if ( port != previousPort ) {
\r
1957 previousPort = port;
\r
1960 } while ( ports[++nChannels] );
\r
1964 jack_client_close( client );
\r
1968 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1970 RtAudio::DeviceInfo info;
\r
1971 info.probed = false;
\r
1973 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1974 jack_status_t *status = NULL;
\r
1975 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1976 if ( client == 0 ) {
\r
1977 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1978 error( RtAudioError::WARNING );
\r
1982 const char **ports;
\r
1983 std::string port, previousPort;
\r
1984 unsigned int nPorts = 0, nDevices = 0;
\r
1985 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1987 // Parse the port names up to the first colon (:).
\r
1988 size_t iColon = 0;
\r
1990 port = (char *) ports[ nPorts ];
\r
1991 iColon = port.find(":");
\r
1992 if ( iColon != std::string::npos ) {
\r
1993 port = port.substr( 0, iColon );
\r
1994 if ( port != previousPort ) {
\r
1995 if ( nDevices == device ) info.name = port;
\r
1997 previousPort = port;
\r
2000 } while ( ports[++nPorts] );
\r
2004 if ( device >= nDevices ) {
\r
2005 jack_client_close( client );
\r
2006 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2007 error( RtAudioError::INVALID_USE );
\r
2011 // Get the current jack server sample rate.
\r
2012 info.sampleRates.clear();
\r
2014 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2015 info.sampleRates.push_back( info.preferredSampleRate );
\r
2017 // Count the available ports containing the client name as device
\r
2018 // channels. Jack "input ports" equal RtAudio output channels.
\r
2019 unsigned int nChannels = 0;
\r
2020 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2022 while ( ports[ nChannels ] ) nChannels++;
\r
2024 info.outputChannels = nChannels;
\r
2027 // Jack "output ports" equal RtAudio input channels.
\r
2029 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2031 while ( ports[ nChannels ] ) nChannels++;
\r
2033 info.inputChannels = nChannels;
\r
2036 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2037 jack_client_close(client);
\r
2038 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2039 error( RtAudioError::WARNING );
\r
2043 // If device opens for both playback and capture, we determine the channels.
\r
2044 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2045 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2047 // Jack always uses 32-bit floats.
\r
2048 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2050 // Jack doesn't provide default devices so we'll use the first available one.
\r
2051 if ( device == 0 && info.outputChannels > 0 )
\r
2052 info.isDefaultOutput = true;
\r
2053 if ( device == 0 && info.inputChannels > 0 )
\r
2054 info.isDefaultInput = true;
\r
2056 jack_client_close(client);
\r
2057 info.probed = true;
\r
2061 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2063 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2065 RtApiJack *object = (RtApiJack *) info->object;
\r
2066 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2071 // This function will be called by a spawned thread when the Jack
\r
2072 // server signals that it is shutting down. It is necessary to handle
\r
2073 // it this way because the jackShutdown() function must return before
\r
2074 // the jack_deactivate() function (in closeStream()) will return.
\r
2075 static void *jackCloseStream( void *ptr )
\r
2077 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2078 RtApiJack *object = (RtApiJack *) info->object;
\r
2080 object->closeStream();
\r
2082 pthread_exit( NULL );
\r
2084 static void jackShutdown( void *infoPointer )
\r
2086 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2087 RtApiJack *object = (RtApiJack *) info->object;
\r
2089 // Check current stream state. If stopped, then we'll assume this
\r
2090 // was called as a result of a call to RtApiJack::stopStream (the
\r
2091 // deactivation of a client handle causes this function to be called).
\r
2092 // If not, we'll assume the Jack server is shutting down or some
\r
2093 // other problem occurred and we should close the stream.
\r
2094 if ( object->isStreamRunning() == false ) return;
\r
2096 ThreadHandle threadId;
\r
2097 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2098 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2101 static int jackXrun( void *infoPointer )
\r
2103 JackHandle *handle = (JackHandle *) infoPointer;
\r
2105 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2106 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2111 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2112 unsigned int firstChannel, unsigned int sampleRate,
\r
2113 RtAudioFormat format, unsigned int *bufferSize,
\r
2114 RtAudio::StreamOptions *options )
\r
2116 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2118 // Look for jack server and try to become a client (only do once per stream).
\r
2119 jack_client_t *client = 0;
\r
2120 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2121 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2122 jack_status_t *status = NULL;
\r
2123 if ( options && !options->streamName.empty() )
\r
2124 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2126 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2127 if ( client == 0 ) {
\r
2128 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2129 error( RtAudioError::WARNING );
\r
2134 // The handle must have been created on an earlier pass.
\r
2135 client = handle->client;
\r
2138 const char **ports;
\r
2139 std::string port, previousPort, deviceName;
\r
2140 unsigned int nPorts = 0, nDevices = 0;
\r
2141 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2143 // Parse the port names up to the first colon (:).
\r
2144 size_t iColon = 0;
\r
2146 port = (char *) ports[ nPorts ];
\r
2147 iColon = port.find(":");
\r
2148 if ( iColon != std::string::npos ) {
\r
2149 port = port.substr( 0, iColon );
\r
2150 if ( port != previousPort ) {
\r
2151 if ( nDevices == device ) deviceName = port;
\r
2153 previousPort = port;
\r
2156 } while ( ports[++nPorts] );
\r
2160 if ( device >= nDevices ) {
\r
2161 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2165 // Count the available ports containing the client name as device
\r
2166 // channels. Jack "input ports" equal RtAudio output channels.
\r
2167 unsigned int nChannels = 0;
\r
2168 unsigned long flag = JackPortIsInput;
\r
2169 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2170 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2172 while ( ports[ nChannels ] ) nChannels++;
\r
2176 // Compare the jack ports for specified client to the requested number of channels.
\r
2177 if ( nChannels < (channels + firstChannel) ) {
\r
2178 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2179 errorText_ = errorStream_.str();
\r
2183 // Check the jack server sample rate.
\r
2184 unsigned int jackRate = jack_get_sample_rate( client );
\r
2185 if ( sampleRate != jackRate ) {
\r
2186 jack_client_close( client );
\r
2187 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2188 errorText_ = errorStream_.str();
\r
2191 stream_.sampleRate = jackRate;
\r
2193 // Get the latency of the JACK port.
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2195 if ( ports[ firstChannel ] ) {
\r
2196 // Added by Ge Wang
\r
2197 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2198 // the range (usually the min and max are equal)
\r
2199 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2200 // get the latency range
\r
2201 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2202 // be optimistic, use the min!
\r
2203 stream_.latency[mode] = latrange.min;
\r
2204 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2208 // The jack server always uses 32-bit floating-point data.
\r
2209 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2210 stream_.userFormat = format;
\r
2212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2213 else stream_.userInterleaved = true;
\r
2215 // Jack always uses non-interleaved buffers.
\r
2216 stream_.deviceInterleaved[mode] = false;
\r
2218 // Jack always provides host byte-ordered data.
\r
2219 stream_.doByteSwap[mode] = false;
\r
2221 // Get the buffer size. The buffer size and number of buffers
\r
2222 // (periods) is set when the jack server is started.
\r
2223 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2224 *bufferSize = stream_.bufferSize;
\r
2226 stream_.nDeviceChannels[mode] = channels;
\r
2227 stream_.nUserChannels[mode] = channels;
\r
2229 // Set flags for buffer conversion.
\r
2230 stream_.doConvertBuffer[mode] = false;
\r
2231 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2232 stream_.doConvertBuffer[mode] = true;
\r
2233 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2234 stream_.nUserChannels[mode] > 1 )
\r
2235 stream_.doConvertBuffer[mode] = true;
\r
2237 // Allocate our JackHandle structure for the stream.
\r
2238 if ( handle == 0 ) {
\r
2240 handle = new JackHandle;
\r
2242 catch ( std::bad_alloc& ) {
\r
2243 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2247 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2248 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2251 stream_.apiHandle = (void *) handle;
\r
2252 handle->client = client;
\r
2254 handle->deviceName[mode] = deviceName;
\r
2256 // Allocate necessary internal buffers.
\r
2257 unsigned long bufferBytes;
\r
2258 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2259 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2260 if ( stream_.userBuffer[mode] == NULL ) {
\r
2261 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2265 if ( stream_.doConvertBuffer[mode] ) {
\r
2267 bool makeBuffer = true;
\r
2268 if ( mode == OUTPUT )
\r
2269 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2270 else { // mode == INPUT
\r
2271 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2272 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2273 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2274 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2278 if ( makeBuffer ) {
\r
2279 bufferBytes *= *bufferSize;
\r
2280 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2281 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2282 if ( stream_.deviceBuffer == NULL ) {
\r
2283 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2289 // Allocate memory for the Jack ports (channels) identifiers.
\r
2290 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2291 if ( handle->ports[mode] == NULL ) {
\r
2292 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2296 stream_.device[mode] = device;
\r
2297 stream_.channelOffset[mode] = firstChannel;
\r
2298 stream_.state = STREAM_STOPPED;
\r
2299 stream_.callbackInfo.object = (void *) this;
\r
2301 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2302 // We had already set up the stream for output.
\r
2303 stream_.mode = DUPLEX;
\r
2305 stream_.mode = mode;
\r
2306 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2307 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2308 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2311 // Register our ports.
\r
2313 if ( mode == OUTPUT ) {
\r
2314 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2315 snprintf( label, 64, "outport %d", i );
\r
2316 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2317 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2321 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2322 snprintf( label, 64, "inport %d", i );
\r
2323 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2324 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2328 // Setup the buffer conversion information structure. We don't use
\r
2329 // buffers to do channel offsets, so we override that parameter
\r
2331 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2337 pthread_cond_destroy( &handle->condition );
\r
2338 jack_client_close( handle->client );
\r
2340 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2341 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2344 stream_.apiHandle = 0;
\r
2347 for ( int i=0; i<2; i++ ) {
\r
2348 if ( stream_.userBuffer[i] ) {
\r
2349 free( stream_.userBuffer[i] );
\r
2350 stream_.userBuffer[i] = 0;
\r
2354 if ( stream_.deviceBuffer ) {
\r
2355 free( stream_.deviceBuffer );
\r
2356 stream_.deviceBuffer = 0;
\r
2362 void RtApiJack :: closeStream( void )
\r
2364 if ( stream_.state == STREAM_CLOSED ) {
\r
2365 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2366 error( RtAudioError::WARNING );
\r
2370 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2373 if ( stream_.state == STREAM_RUNNING )
\r
2374 jack_deactivate( handle->client );
\r
2376 jack_client_close( handle->client );
\r
2380 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2381 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2382 pthread_cond_destroy( &handle->condition );
\r
2384 stream_.apiHandle = 0;
\r
2387 for ( int i=0; i<2; i++ ) {
\r
2388 if ( stream_.userBuffer[i] ) {
\r
2389 free( stream_.userBuffer[i] );
\r
2390 stream_.userBuffer[i] = 0;
\r
2394 if ( stream_.deviceBuffer ) {
\r
2395 free( stream_.deviceBuffer );
\r
2396 stream_.deviceBuffer = 0;
\r
2399 stream_.mode = UNINITIALIZED;
\r
2400 stream_.state = STREAM_CLOSED;
\r
2403 void RtApiJack :: startStream( void )
\r
2406 if ( stream_.state == STREAM_RUNNING ) {
\r
2407 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2408 error( RtAudioError::WARNING );
\r
2412 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2413 int result = jack_activate( handle->client );
\r
2415 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2419 const char **ports;
\r
2421 // Get the list of available ports.
\r
2422 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2424 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2425 if ( ports == NULL) {
\r
2426 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2430 // Now make the port connections. Since RtAudio wasn't designed to
\r
2431 // allow the user to select particular channels of a device, we'll
\r
2432 // just open the first "nChannels" ports with offset.
\r
2433 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2435 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2436 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2439 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2446 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2454 // Now make the port connections. See note above.
\r
2455 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2457 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2458 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2461 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2468 handle->drainCounter = 0;
\r
2469 handle->internalDrain = false;
\r
2470 stream_.state = STREAM_RUNNING;
\r
2473 if ( result == 0 ) return;
\r
2474 error( RtAudioError::SYSTEM_ERROR );
\r
2477 void RtApiJack :: stopStream( void )
\r
2480 if ( stream_.state == STREAM_STOPPED ) {
\r
2481 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2482 error( RtAudioError::WARNING );
\r
2486 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2489 if ( handle->drainCounter == 0 ) {
\r
2490 handle->drainCounter = 2;
\r
2491 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2495 jack_deactivate( handle->client );
\r
2496 stream_.state = STREAM_STOPPED;
\r
2499 void RtApiJack :: abortStream( void )
\r
2502 if ( stream_.state == STREAM_STOPPED ) {
\r
2503 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2504 error( RtAudioError::WARNING );
\r
2508 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 handle->drainCounter = 2;
\r
2514 // This function will be called by a spawned thread when the user
\r
2515 // callback function signals that the stream should be stopped or
\r
2516 // aborted. It is necessary to handle it this way because the
\r
2517 // callbackEvent() function must return before the jack_deactivate()
\r
2518 // function will return.
\r
2519 static void *jackStopStream( void *ptr )
\r
2521 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2522 RtApiJack *object = (RtApiJack *) info->object;
\r
2524 object->stopStream();
\r
2525 pthread_exit( NULL );
\r
2528 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2530 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2531 if ( stream_.state == STREAM_CLOSED ) {
\r
2532 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2533 error( RtAudioError::WARNING );
\r
2536 if ( stream_.bufferSize != nframes ) {
\r
2537 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2538 error( RtAudioError::WARNING );
\r
2542 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2543 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2545 // Check if we were draining the stream and signal is finished.
\r
2546 if ( handle->drainCounter > 3 ) {
\r
2547 ThreadHandle threadId;
\r
2549 stream_.state = STREAM_STOPPING;
\r
2550 if ( handle->internalDrain == true )
\r
2551 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2553 pthread_cond_signal( &handle->condition );
\r
2557 // Invoke user callback first, to get fresh output data.
\r
2558 if ( handle->drainCounter == 0 ) {
\r
2559 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2560 double streamTime = getStreamTime();
\r
2561 RtAudioStreamStatus status = 0;
\r
2562 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2563 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2564 handle->xrun[0] = false;
\r
2566 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2567 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2568 handle->xrun[1] = false;
\r
2570 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2571 stream_.bufferSize, streamTime, status, info->userData );
\r
2572 if ( cbReturnValue == 2 ) {
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 handle->drainCounter = 2;
\r
2576 pthread_create( &id, NULL, jackStopStream, info );
\r
2579 else if ( cbReturnValue == 1 ) {
\r
2580 handle->drainCounter = 1;
\r
2581 handle->internalDrain = true;
\r
2585 jack_default_audio_sample_t *jackbuffer;
\r
2586 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2589 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2591 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2592 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2593 memset( jackbuffer, 0, bufferBytes );
\r
2597 else if ( stream_.doConvertBuffer[0] ) {
\r
2599 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2601 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2602 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2603 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2606 else { // no buffer conversion
\r
2607 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2608 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2609 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2614 // Don't bother draining input
\r
2615 if ( handle->drainCounter ) {
\r
2616 handle->drainCounter++;
\r
2620 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2622 if ( stream_.doConvertBuffer[1] ) {
\r
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2625 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2627 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2629 else { // no buffer conversion
\r
2630 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2631 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2632 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2638 RtApi::tickStreamTime();
\r
2641 //******************** End of __UNIX_JACK__ *********************//
\r
2644 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2646 // The ASIO API is designed around a callback scheme, so this
\r
2647 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2648 // Jack. The primary constraint with ASIO is that it only allows
\r
2649 // access to a single driver at a time. Thus, it is not possible to
\r
2650 // have more than one simultaneous RtAudio stream.
\r
2652 // This implementation also requires a number of external ASIO files
\r
2653 // and a few global variables. The ASIO callback scheme does not
\r
2654 // allow for the passing of user data, so we must create a global
\r
2655 // pointer to our callbackInfo structure.
\r
2657 // On unix systems, we make use of a pthread condition variable.
\r
2658 // Since there is no equivalent in Windows, I hacked something based
\r
2659 // on information found in
\r
2660 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2662 #include "asiosys.h"
\r
2664 #include "iasiothiscallresolver.h"
\r
2665 #include "asiodrivers.h"
\r
2668 static AsioDrivers drivers;
\r
2669 static ASIOCallbacks asioCallbacks;
\r
2670 static ASIODriverInfo driverInfo;
\r
2671 static CallbackInfo *asioCallbackInfo;
\r
2672 static bool asioXRun;
\r
2674 struct AsioHandle {
\r
2675 int drainCounter; // Tracks callback counts when draining
\r
2676 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2677 ASIOBufferInfo *bufferInfos;
\r
2681 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2684 // Function declarations (definitions at end of section)
\r
2685 static const char* getAsioErrorString( ASIOError result );
\r
2686 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2687 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2689 RtApiAsio :: RtApiAsio()
\r
2691 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2692 // CoInitialize beforehand, but it must be for appartment threading
\r
2693 // (in which case, CoInitilialize will return S_FALSE here).
\r
2694 coInitialized_ = false;
\r
2695 HRESULT hr = CoInitialize( NULL );
\r
2696 if ( FAILED(hr) ) {
\r
2697 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2698 error( RtAudioError::WARNING );
\r
2700 coInitialized_ = true;
\r
2702 drivers.removeCurrentDriver();
\r
2703 driverInfo.asioVersion = 2;
\r
2705 // See note in DirectSound implementation about GetDesktopWindow().
\r
2706 driverInfo.sysRef = GetForegroundWindow();
\r
2709 RtApiAsio :: ~RtApiAsio()
\r
2711 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2712 if ( coInitialized_ ) CoUninitialize();
\r
2715 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2717 return (unsigned int) drivers.asioGetNumDev();
\r
2720 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2722 RtAudio::DeviceInfo info;
\r
2723 info.probed = false;
\r
2726 unsigned int nDevices = getDeviceCount();
\r
2727 if ( nDevices == 0 ) {
\r
2728 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2729 error( RtAudioError::INVALID_USE );
\r
2733 if ( device >= nDevices ) {
\r
2734 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2735 error( RtAudioError::INVALID_USE );
\r
2739 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2740 if ( stream_.state != STREAM_CLOSED ) {
\r
2741 if ( device >= devices_.size() ) {
\r
2742 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2743 error( RtAudioError::WARNING );
\r
2746 return devices_[ device ];
\r
2749 char driverName[32];
\r
2750 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2751 if ( result != ASE_OK ) {
\r
2752 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2753 errorText_ = errorStream_.str();
\r
2754 error( RtAudioError::WARNING );
\r
2758 info.name = driverName;
\r
2760 if ( !drivers.loadDriver( driverName ) ) {
\r
2761 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2762 errorText_ = errorStream_.str();
\r
2763 error( RtAudioError::WARNING );
\r
2767 result = ASIOInit( &driverInfo );
\r
2768 if ( result != ASE_OK ) {
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 // Determine the device channel information.
\r
2776 long inputChannels, outputChannels;
\r
2777 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2778 if ( result != ASE_OK ) {
\r
2779 drivers.removeCurrentDriver();
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.outputChannels = outputChannels;
\r
2787 info.inputChannels = inputChannels;
\r
2788 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2789 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2791 // Determine the supported sample rates.
\r
2792 info.sampleRates.clear();
\r
2793 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2794 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2795 if ( result == ASE_OK ) {
\r
2796 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2798 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2799 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2803 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2804 ASIOChannelInfo channelInfo;
\r
2805 channelInfo.channel = 0;
\r
2806 channelInfo.isInput = true;
\r
2807 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2808 result = ASIOGetChannelInfo( &channelInfo );
\r
2809 if ( result != ASE_OK ) {
\r
2810 drivers.removeCurrentDriver();
\r
2811 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2812 errorText_ = errorStream_.str();
\r
2813 error( RtAudioError::WARNING );
\r
2817 info.nativeFormats = 0;
\r
2818 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2819 info.nativeFormats |= RTAUDIO_SINT16;
\r
2820 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2821 info.nativeFormats |= RTAUDIO_SINT32;
\r
2822 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2823 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2824 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2825 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2826 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2827 info.nativeFormats |= RTAUDIO_SINT24;
\r
2829 if ( info.outputChannels > 0 )
\r
2830 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2831 if ( info.inputChannels > 0 )
\r
2832 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2834 info.probed = true;
\r
2835 drivers.removeCurrentDriver();
\r
2839 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2841 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2842 object->callbackEvent( index );
\r
2845 void RtApiAsio :: saveDeviceInfo( void )
\r
2849 unsigned int nDevices = getDeviceCount();
\r
2850 devices_.resize( nDevices );
\r
2851 for ( unsigned int i=0; i<nDevices; i++ )
\r
2852 devices_[i] = getDeviceInfo( i );
\r
2855 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2856 unsigned int firstChannel, unsigned int sampleRate,
\r
2857 RtAudioFormat format, unsigned int *bufferSize,
\r
2858 RtAudio::StreamOptions *options )
\r
2859 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2861 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2863 // For ASIO, a duplex stream MUST use the same driver.
\r
2864 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2865 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2869 char driverName[32];
\r
2870 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2871 if ( result != ASE_OK ) {
\r
2872 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2873 errorText_ = errorStream_.str();
\r
2877 // Only load the driver once for duplex stream.
\r
2878 if ( !isDuplexInput ) {
\r
2879 // The getDeviceInfo() function will not work when a stream is open
\r
2880 // because ASIO does not allow multiple devices to run at the same
\r
2881 // time. Thus, we'll probe the system before opening a stream and
\r
2882 // save the results for use by getDeviceInfo().
\r
2883 this->saveDeviceInfo();
\r
2885 if ( !drivers.loadDriver( driverName ) ) {
\r
2886 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2887 errorText_ = errorStream_.str();
\r
2891 result = ASIOInit( &driverInfo );
\r
2892 if ( result != ASE_OK ) {
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2894 errorText_ = errorStream_.str();
\r
2899 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2900 bool buffersAllocated = false;
\r
2901 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2902 unsigned int nChannels;
\r
2905 // Check the device channel count.
\r
2906 long inputChannels, outputChannels;
\r
2907 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2908 if ( result != ASE_OK ) {
\r
2909 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2910 errorText_ = errorStream_.str();
\r
2914 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2915 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2916 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2917 errorText_ = errorStream_.str();
\r
2920 stream_.nDeviceChannels[mode] = channels;
\r
2921 stream_.nUserChannels[mode] = channels;
\r
2922 stream_.channelOffset[mode] = firstChannel;
\r
2924 // Verify the sample rate is supported.
\r
2925 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2926 if ( result != ASE_OK ) {
\r
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2928 errorText_ = errorStream_.str();
\r
2932 // Get the current sample rate
\r
2933 ASIOSampleRate currentRate;
\r
2934 result = ASIOGetSampleRate( ¤tRate );
\r
2935 if ( result != ASE_OK ) {
\r
2936 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2937 errorText_ = errorStream_.str();
\r
2941 // Set the sample rate only if necessary
\r
2942 if ( currentRate != sampleRate ) {
\r
2943 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2944 if ( result != ASE_OK ) {
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2946 errorText_ = errorStream_.str();
\r
2951 // Determine the driver data type.
\r
2952 ASIOChannelInfo channelInfo;
\r
2953 channelInfo.channel = 0;
\r
2954 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2955 else channelInfo.isInput = true;
\r
2956 result = ASIOGetChannelInfo( &channelInfo );
\r
2957 if ( result != ASE_OK ) {
\r
2958 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2959 errorText_ = errorStream_.str();
\r
2963 // Assuming WINDOWS host is always little-endian.
\r
2964 stream_.doByteSwap[mode] = false;
\r
2965 stream_.userFormat = format;
\r
2966 stream_.deviceFormat[mode] = 0;
\r
2967 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2968 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2969 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2971 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2972 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2973 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2975 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2976 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2977 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2979 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2980 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2981 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2983 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2984 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2985 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2988 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2989 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2990 errorText_ = errorStream_.str();
\r
2994 // Set the buffer size. For a duplex stream, this will end up
\r
2995 // setting the buffer size based on the input constraints, which
\r
2997 long minSize, maxSize, preferSize, granularity;
\r
2998 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2999 if ( result != ASE_OK ) {
\r
3000 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3001 errorText_ = errorStream_.str();
\r
3005 if ( isDuplexInput ) {
\r
3006 // When this is the duplex input (output was opened before), then we have to use the same
\r
3007 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3008 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3009 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3010 // to the "bufferSize" param as usual to set up processing buffers.
\r
3012 *bufferSize = stream_.bufferSize;
\r
3015 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3016 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3017 else if ( granularity == -1 ) {
\r
3018 // Make sure bufferSize is a power of two.
\r
3019 int log2_of_min_size = 0;
\r
3020 int log2_of_max_size = 0;
\r
3022 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3023 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3024 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3027 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3028 int min_delta_num = log2_of_min_size;
\r
3030 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3031 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3032 if (current_delta < min_delta) {
\r
3033 min_delta = current_delta;
\r
3034 min_delta_num = i;
\r
3038 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3039 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3040 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3042 else if ( granularity != 0 ) {
\r
3043 // Set to an even multiple of granularity, rounding up.
\r
3044 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3049 // we don't use it anymore, see above!
\r
3050 // Just left it here for the case...
\r
3051 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3052 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3057 stream_.bufferSize = *bufferSize;
\r
3058 stream_.nBuffers = 2;
\r
3060 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3061 else stream_.userInterleaved = true;
\r
3063 // ASIO always uses non-interleaved buffers.
\r
3064 stream_.deviceInterleaved[mode] = false;
\r
3066 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3067 if ( handle == 0 ) {
\r
3069 handle = new AsioHandle;
\r
3071 catch ( std::bad_alloc& ) {
\r
3072 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3075 handle->bufferInfos = 0;
\r
3077 // Create a manual-reset event.
\r
3078 handle->condition = CreateEvent( NULL, // no security
\r
3079 TRUE, // manual-reset
\r
3080 FALSE, // non-signaled initially
\r
3081 NULL ); // unnamed
\r
3082 stream_.apiHandle = (void *) handle;
\r
3085 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3086 // and output separately, we'll have to dispose of previously
\r
3087 // created output buffers for a duplex stream.
\r
3088 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3089 ASIODisposeBuffers();
\r
3090 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3093 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3095 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3096 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3097 if ( handle->bufferInfos == NULL ) {
\r
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3099 errorText_ = errorStream_.str();
\r
3103 ASIOBufferInfo *infos;
\r
3104 infos = handle->bufferInfos;
\r
3105 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3106 infos->isInput = ASIOFalse;
\r
3107 infos->channelNum = i + stream_.channelOffset[0];
\r
3108 infos->buffers[0] = infos->buffers[1] = 0;
\r
3110 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3111 infos->isInput = ASIOTrue;
\r
3112 infos->channelNum = i + stream_.channelOffset[1];
\r
3113 infos->buffers[0] = infos->buffers[1] = 0;
\r
3116 // prepare for callbacks
\r
3117 stream_.sampleRate = sampleRate;
\r
3118 stream_.device[mode] = device;
\r
3119 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3121 // store this class instance before registering callbacks, that are going to use it
\r
3122 asioCallbackInfo = &stream_.callbackInfo;
\r
3123 stream_.callbackInfo.object = (void *) this;
\r
3125 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3126 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3127 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3128 asioCallbacks.asioMessage = &asioMessages;
\r
3129 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3130 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3131 if ( result != ASE_OK ) {
\r
3132 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3133 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3134 // in that case, let's be naïve and try that instead
\r
3135 *bufferSize = preferSize;
\r
3136 stream_.bufferSize = *bufferSize;
\r
3137 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3140 if ( result != ASE_OK ) {
\r
3141 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3142 errorText_ = errorStream_.str();
\r
3145 buffersAllocated = true;
\r
3146 stream_.state = STREAM_STOPPED;
\r
3148 // Set flags for buffer conversion.
\r
3149 stream_.doConvertBuffer[mode] = false;
\r
3150 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3151 stream_.doConvertBuffer[mode] = true;
\r
3152 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3153 stream_.nUserChannels[mode] > 1 )
\r
3154 stream_.doConvertBuffer[mode] = true;
\r
3156 // Allocate necessary internal buffers
\r
3157 unsigned long bufferBytes;
\r
3158 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3159 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3160 if ( stream_.userBuffer[mode] == NULL ) {
\r
3161 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3165 if ( stream_.doConvertBuffer[mode] ) {
\r
3167 bool makeBuffer = true;
\r
3168 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3169 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3170 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3171 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3174 if ( makeBuffer ) {
\r
3175 bufferBytes *= *bufferSize;
\r
3176 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3177 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3178 if ( stream_.deviceBuffer == NULL ) {
\r
3179 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3185 // Determine device latencies
\r
3186 long inputLatency, outputLatency;
\r
3187 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3188 if ( result != ASE_OK ) {
\r
3189 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3190 errorText_ = errorStream_.str();
\r
3191 error( RtAudioError::WARNING); // warn but don't fail
\r
3194 stream_.latency[0] = outputLatency;
\r
3195 stream_.latency[1] = inputLatency;
\r
3198 // Setup the buffer conversion information structure. We don't use
\r
3199 // buffers to do channel offsets, so we override that parameter
\r
3201 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3206 if ( !isDuplexInput ) {
\r
3207 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3208 // So we clean up for single channel only
\r
3210 if ( buffersAllocated )
\r
3211 ASIODisposeBuffers();
\r
3213 drivers.removeCurrentDriver();
\r
3216 CloseHandle( handle->condition );
\r
3217 if ( handle->bufferInfos )
\r
3218 free( handle->bufferInfos );
\r
3221 stream_.apiHandle = 0;
\r
3225 if ( stream_.userBuffer[mode] ) {
\r
3226 free( stream_.userBuffer[mode] );
\r
3227 stream_.userBuffer[mode] = 0;
\r
3230 if ( stream_.deviceBuffer ) {
\r
3231 free( stream_.deviceBuffer );
\r
3232 stream_.deviceBuffer = 0;
\r
3237 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3239 void RtApiAsio :: closeStream()
\r
3241 if ( stream_.state == STREAM_CLOSED ) {
\r
3242 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3243 error( RtAudioError::WARNING );
\r
3247 if ( stream_.state == STREAM_RUNNING ) {
\r
3248 stream_.state = STREAM_STOPPED;
\r
3251 ASIODisposeBuffers();
\r
3252 drivers.removeCurrentDriver();
\r
3254 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3256 CloseHandle( handle->condition );
\r
3257 if ( handle->bufferInfos )
\r
3258 free( handle->bufferInfos );
\r
3260 stream_.apiHandle = 0;
\r
3263 for ( int i=0; i<2; i++ ) {
\r
3264 if ( stream_.userBuffer[i] ) {
\r
3265 free( stream_.userBuffer[i] );
\r
3266 stream_.userBuffer[i] = 0;
\r
3270 if ( stream_.deviceBuffer ) {
\r
3271 free( stream_.deviceBuffer );
\r
3272 stream_.deviceBuffer = 0;
\r
3275 stream_.mode = UNINITIALIZED;
\r
3276 stream_.state = STREAM_CLOSED;
\r
3279 bool stopThreadCalled = false;
\r
3281 void RtApiAsio :: startStream()
\r
3284 if ( stream_.state == STREAM_RUNNING ) {
\r
3285 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3286 error( RtAudioError::WARNING );
\r
3290 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3291 ASIOError result = ASIOStart();
\r
3292 if ( result != ASE_OK ) {
\r
3293 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3294 errorText_ = errorStream_.str();
\r
3298 handle->drainCounter = 0;
\r
3299 handle->internalDrain = false;
\r
3300 ResetEvent( handle->condition );
\r
3301 stream_.state = STREAM_RUNNING;
\r
3305 stopThreadCalled = false;
\r
3307 if ( result == ASE_OK ) return;
\r
3308 error( RtAudioError::SYSTEM_ERROR );
\r
3311 void RtApiAsio :: stopStream()
\r
3314 if ( stream_.state == STREAM_STOPPED ) {
\r
3315 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3316 error( RtAudioError::WARNING );
\r
3320 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3321 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3322 if ( handle->drainCounter == 0 ) {
\r
3323 handle->drainCounter = 2;
\r
3324 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3328 stream_.state = STREAM_STOPPED;
\r
3330 ASIOError result = ASIOStop();
\r
3331 if ( result != ASE_OK ) {
\r
3332 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3333 errorText_ = errorStream_.str();
\r
3336 if ( result == ASE_OK ) return;
\r
3337 error( RtAudioError::SYSTEM_ERROR );
\r
3340 void RtApiAsio :: abortStream()
\r
3343 if ( stream_.state == STREAM_STOPPED ) {
\r
3344 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3345 error( RtAudioError::WARNING );
\r
3349 // The following lines were commented-out because some behavior was
\r
3350 // noted where the device buffers need to be zeroed to avoid
\r
3351 // continuing sound, even when the device buffers are completely
\r
3352 // disposed. So now, calling abort is the same as calling stop.
\r
3353 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3354 // handle->drainCounter = 2;
\r
3358 // This function will be called by a spawned thread when the user
\r
3359 // callback function signals that the stream should be stopped or
\r
3360 // aborted. It is necessary to handle it this way because the
\r
3361 // callbackEvent() function must return before the ASIOStop()
\r
3362 // function will return.
\r
3363 static unsigned __stdcall asioStopStream( void *ptr )
\r
3365 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3366 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3368 object->stopStream();
\r
3369 _endthreadex( 0 );
\r
3373 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3375 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3376 if ( stream_.state == STREAM_CLOSED ) {
\r
3377 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3378 error( RtAudioError::WARNING );
\r
3382 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3383 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3385 // Check if we were draining the stream and signal if finished.
\r
3386 if ( handle->drainCounter > 3 ) {
\r
3388 stream_.state = STREAM_STOPPING;
\r
3389 if ( handle->internalDrain == false )
\r
3390 SetEvent( handle->condition );
\r
3391 else { // spawn a thread to stop the stream
\r
3392 unsigned threadId;
\r
3393 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3394 &stream_.callbackInfo, 0, &threadId );
\r
3399 // Invoke user callback to get fresh output data UNLESS we are
\r
3400 // draining stream.
\r
3401 if ( handle->drainCounter == 0 ) {
\r
3402 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3403 double streamTime = getStreamTime();
\r
3404 RtAudioStreamStatus status = 0;
\r
3405 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3406 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3409 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3410 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3413 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3414 stream_.bufferSize, streamTime, status, info->userData );
\r
3415 if ( cbReturnValue == 2 ) {
\r
3416 stream_.state = STREAM_STOPPING;
\r
3417 handle->drainCounter = 2;
\r
3418 unsigned threadId;
\r
3419 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3420 &stream_.callbackInfo, 0, &threadId );
\r
3423 else if ( cbReturnValue == 1 ) {
\r
3424 handle->drainCounter = 1;
\r
3425 handle->internalDrain = true;
\r
3429 unsigned int nChannels, bufferBytes, i, j;
\r
3430 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3431 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3433 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3435 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3437 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3438 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3439 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3443 else if ( stream_.doConvertBuffer[0] ) {
\r
3445 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3446 if ( stream_.doByteSwap[0] )
\r
3447 byteSwapBuffer( stream_.deviceBuffer,
\r
3448 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3449 stream_.deviceFormat[0] );
\r
3451 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3452 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3453 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3454 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3460 if ( stream_.doByteSwap[0] )
\r
3461 byteSwapBuffer( stream_.userBuffer[0],
\r
3462 stream_.bufferSize * stream_.nUserChannels[0],
\r
3463 stream_.userFormat );
\r
3465 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3466 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3467 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3468 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3474 // Don't bother draining input
\r
3475 if ( handle->drainCounter ) {
\r
3476 handle->drainCounter++;
\r
3480 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3482 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3484 if (stream_.doConvertBuffer[1]) {
\r
3486 // Always interleave ASIO input data.
\r
3487 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3488 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3489 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3490 handle->bufferInfos[i].buffers[bufferIndex],
\r
3494 if ( stream_.doByteSwap[1] )
\r
3495 byteSwapBuffer( stream_.deviceBuffer,
\r
3496 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3497 stream_.deviceFormat[1] );
\r
3498 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3502 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3503 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3504 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3505 handle->bufferInfos[i].buffers[bufferIndex],
\r
3510 if ( stream_.doByteSwap[1] )
\r
3511 byteSwapBuffer( stream_.userBuffer[1],
\r
3512 stream_.bufferSize * stream_.nUserChannels[1],
\r
3513 stream_.userFormat );
\r
3518 // The following call was suggested by Malte Clasen. While the API
\r
3519 // documentation indicates it should not be required, some device
\r
3520 // drivers apparently do not function correctly without it.
\r
3521 ASIOOutputReady();
\r
3523 RtApi::tickStreamTime();
\r
3527 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3529 // The ASIO documentation says that this usually only happens during
\r
3530 // external sync. Audio processing is not stopped by the driver,
\r
3531 // actual sample rate might not have even changed, maybe only the
\r
3532 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3535 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3537 object->stopStream();
\r
3539 catch ( RtAudioError &exception ) {
\r
3540 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3544 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3547 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3551 switch( selector ) {
\r
3552 case kAsioSelectorSupported:
\r
3553 if ( value == kAsioResetRequest
\r
3554 || value == kAsioEngineVersion
\r
3555 || value == kAsioResyncRequest
\r
3556 || value == kAsioLatenciesChanged
\r
3557 // The following three were added for ASIO 2.0, you don't
\r
3558 // necessarily have to support them.
\r
3559 || value == kAsioSupportsTimeInfo
\r
3560 || value == kAsioSupportsTimeCode
\r
3561 || value == kAsioSupportsInputMonitor)
\r
3564 case kAsioResetRequest:
\r
3565 // Defer the task and perform the reset of the driver during the
\r
3566 // next "safe" situation. You cannot reset the driver right now,
\r
3567 // as this code is called from the driver. Reset the driver is
\r
3568 // done by completely destruct is. I.e. ASIOStop(),
\r
3569 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3571 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3574 case kAsioResyncRequest:
\r
3575 // This informs the application that the driver encountered some
\r
3576 // non-fatal data loss. It is used for synchronization purposes
\r
3577 // of different media. Added mainly to work around the Win16Mutex
\r
3578 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3579 // which could lose data because the Mutex was held too long by
\r
3580 // another thread. However a driver can issue it in other
\r
3581 // situations, too.
\r
3582 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3586 case kAsioLatenciesChanged:
\r
3587 // This will inform the host application that the drivers were
\r
3588 // latencies changed. Beware, it this does not mean that the
\r
3589 // buffer sizes have changed! You might need to update internal
\r
3591 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3594 case kAsioEngineVersion:
\r
3595 // Return the supported ASIO version of the host application. If
\r
3596 // a host application does not implement this selector, ASIO 1.0
\r
3597 // is assumed by the driver.
\r
3600 case kAsioSupportsTimeInfo:
\r
3601 // Informs the driver whether the
\r
3602 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3603 // For compatibility with ASIO 1.0 drivers the host application
\r
3604 // should always support the "old" bufferSwitch method, too.
\r
3607 case kAsioSupportsTimeCode:
\r
3608 // Informs the driver whether application is interested in time
\r
3609 // code info. If an application does not need to know about time
\r
3610 // code, the driver has less work to do.
\r
3617 static const char* getAsioErrorString( ASIOError result )
\r
3622 const char*message;
\r
3625 static const Messages m[] =
\r
3627 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3628 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3629 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3630 { ASE_InvalidMode, "Invalid mode." },
\r
3631 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3632 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3633 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3636 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3637 if ( m[i].value == result ) return m[i].message;
\r
3639 return "Unknown error.";
\r
3642 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3646 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3648 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3649 // - Introduces support for the Windows WASAPI API
\r
3650 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3651 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3652 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3657 #include <audioclient.h>
\r
3659 #include <mmdeviceapi.h>
\r
3660 #include <functiondiscoverykeys_devpkey.h>
\r
3662 //=============================================================================
\r
3664 #define SAFE_RELEASE( objectPtr )\
\r
3667 objectPtr->Release();\
\r
3668 objectPtr = NULL;\
\r
3671 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3673 //-----------------------------------------------------------------------------
\r
3675 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3676 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3677 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3678 // provide intermediate storage for read / write synchronization.
\r
3679 class WasapiBuffer
\r
3683 : buffer_( NULL ),
\r
3692 // sets the length of the internal ring buffer
\r
3693 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3696 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3698 bufferSize_ = bufferSize;
\r
3703 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3704 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3706 if ( !buffer || // incoming buffer is NULL
\r
3707 bufferSize == 0 || // incoming buffer has no data
\r
3708 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3713 unsigned int relOutIndex = outIndex_;
\r
3714 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3715 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3716 relOutIndex += bufferSize_;
\r
3719 // "in" index can end on the "out" index but cannot begin at it
\r
3720 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3721 return false; // not enough space between "in" index and "out" index
\r
3724 // copy buffer from external to internal
\r
3725 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3726 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3727 int fromInSize = bufferSize - fromZeroSize;
\r
3731 case RTAUDIO_SINT8:
\r
3732 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3733 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3735 case RTAUDIO_SINT16:
\r
3736 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3737 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3739 case RTAUDIO_SINT24:
\r
3740 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3741 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3743 case RTAUDIO_SINT32:
\r
3744 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3745 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3747 case RTAUDIO_FLOAT32:
\r
3748 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3749 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3751 case RTAUDIO_FLOAT64:
\r
3752 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3753 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3757 // update "in" index
\r
3758 inIndex_ += bufferSize;
\r
3759 inIndex_ %= bufferSize_;
\r
3764 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3765 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3767 if ( !buffer || // incoming buffer is NULL
\r
3768 bufferSize == 0 || // incoming buffer has no data
\r
3769 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3774 unsigned int relInIndex = inIndex_;
\r
3775 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3776 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3777 relInIndex += bufferSize_;
\r
3780 // "out" index can begin at and end on the "in" index
\r
3781 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3782 return false; // not enough space between "out" index and "in" index
\r
3785 // copy buffer from internal to external
\r
3786 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3787 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3788 int fromOutSize = bufferSize - fromZeroSize;
\r
3792 case RTAUDIO_SINT8:
\r
3793 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3794 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3796 case RTAUDIO_SINT16:
\r
3797 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3798 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3800 case RTAUDIO_SINT24:
\r
3801 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3802 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3804 case RTAUDIO_SINT32:
\r
3805 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3806 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3808 case RTAUDIO_FLOAT32:
\r
3809 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3810 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3812 case RTAUDIO_FLOAT64:
\r
3813 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3814 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3818 // update "out" index
\r
3819 outIndex_ += bufferSize;
\r
3820 outIndex_ %= bufferSize_;
\r
3827 unsigned int bufferSize_;
\r
3828 unsigned int inIndex_;
\r
3829 unsigned int outIndex_;
\r
3832 //-----------------------------------------------------------------------------
\r
3834 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3835 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3836 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3837 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3838 // one rate and its multiple.
\r
3839 void convertBufferWasapi( char* outBuffer,
\r
3840 const char* inBuffer,
\r
3841 const unsigned int& channelCount,
\r
3842 const unsigned int& inSampleRate,
\r
3843 const unsigned int& outSampleRate,
\r
3844 const unsigned int& inSampleCount,
\r
3845 unsigned int& outSampleCount,
\r
3846 const RtAudioFormat& format )
\r
3848 // calculate the new outSampleCount and relative sampleStep
\r
3849 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3850 float sampleStep = 1.0f / sampleRatio;
\r
3851 float inSampleFraction = 0.0f;
\r
3853 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3855 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3856 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3858 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3862 case RTAUDIO_SINT8:
\r
3863 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3865 case RTAUDIO_SINT16:
\r
3866 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3868 case RTAUDIO_SINT24:
\r
3869 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3871 case RTAUDIO_SINT32:
\r
3872 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3874 case RTAUDIO_FLOAT32:
\r
3875 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3877 case RTAUDIO_FLOAT64:
\r
3878 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3882 // jump to next in sample
\r
3883 inSampleFraction += sampleStep;
\r
3887 //-----------------------------------------------------------------------------
\r
3889 // A structure to hold various information related to the WASAPI implementation.
\r
3890 struct WasapiHandle
\r
3892 IAudioClient* captureAudioClient;
\r
3893 IAudioClient* renderAudioClient;
\r
3894 IAudioCaptureClient* captureClient;
\r
3895 IAudioRenderClient* renderClient;
\r
3896 HANDLE captureEvent;
\r
3897 HANDLE renderEvent;
\r
3900 : captureAudioClient( NULL ),
\r
3901 renderAudioClient( NULL ),
\r
3902 captureClient( NULL ),
\r
3903 renderClient( NULL ),
\r
3904 captureEvent( NULL ),
\r
3905 renderEvent( NULL ) {}
\r
3908 //=============================================================================
\r
3910 RtApiWasapi::RtApiWasapi()
\r
3911 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3913 // WASAPI can run either apartment or multi-threaded
\r
3914 HRESULT hr = CoInitialize( NULL );
\r
3915 if ( !FAILED( hr ) )
\r
3916 coInitialized_ = true;
\r
3918 // Instantiate device enumerator
\r
3919 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3920 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3921 ( void** ) &deviceEnumerator_ );
\r
3923 if ( FAILED( hr ) ) {
\r
3924 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3925 error( RtAudioError::DRIVER_ERROR );
\r
3929 //-----------------------------------------------------------------------------
\r
3931 RtApiWasapi::~RtApiWasapi()
\r
3933 if ( stream_.state != STREAM_CLOSED )
\r
3936 SAFE_RELEASE( deviceEnumerator_ );
\r
3938 // If this object previously called CoInitialize()
\r
3939 if ( coInitialized_ )
\r
3943 //=============================================================================
\r
3945 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3947 unsigned int captureDeviceCount = 0;
\r
3948 unsigned int renderDeviceCount = 0;
\r
3950 IMMDeviceCollection* captureDevices = NULL;
\r
3951 IMMDeviceCollection* renderDevices = NULL;
\r
3953 // Count capture devices
\r
3954 errorText_.clear();
\r
3955 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3956 if ( FAILED( hr ) ) {
\r
3957 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3961 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3962 if ( FAILED( hr ) ) {
\r
3963 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3967 // Count render devices
\r
3968 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3969 if ( FAILED( hr ) ) {
\r
3970 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3974 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3975 if ( FAILED( hr ) ) {
\r
3976 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3981 // release all references
\r
3982 SAFE_RELEASE( captureDevices );
\r
3983 SAFE_RELEASE( renderDevices );
\r
3985 if ( errorText_.empty() )
\r
3986 return captureDeviceCount + renderDeviceCount;
\r
3988 error( RtAudioError::DRIVER_ERROR );
\r
3992 //-----------------------------------------------------------------------------
\r
3994 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3996 RtAudio::DeviceInfo info;
\r
3997 unsigned int captureDeviceCount = 0;
\r
3998 unsigned int renderDeviceCount = 0;
\r
3999 std::string defaultDeviceName;
\r
4000 bool isCaptureDevice = false;
\r
4002 PROPVARIANT deviceNameProp;
\r
4003 PROPVARIANT defaultDeviceNameProp;
\r
4005 IMMDeviceCollection* captureDevices = NULL;
\r
4006 IMMDeviceCollection* renderDevices = NULL;
\r
4007 IMMDevice* devicePtr = NULL;
\r
4008 IMMDevice* defaultDevicePtr = NULL;
\r
4009 IAudioClient* audioClient = NULL;
\r
4010 IPropertyStore* devicePropStore = NULL;
\r
4011 IPropertyStore* defaultDevicePropStore = NULL;
\r
4013 WAVEFORMATEX* deviceFormat = NULL;
\r
4014 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4017 info.probed = false;
\r
4019 // Count capture devices
\r
4020 errorText_.clear();
\r
4021 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4022 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4023 if ( FAILED( hr ) ) {
\r
4024 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4028 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4029 if ( FAILED( hr ) ) {
\r
4030 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4034 // Count render devices
\r
4035 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4036 if ( FAILED( hr ) ) {
\r
4037 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4041 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4042 if ( FAILED( hr ) ) {
\r
4043 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4047 // validate device index
\r
4048 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4049 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4050 errorType = RtAudioError::INVALID_USE;
\r
4054 // determine whether index falls within capture or render devices
\r
4055 if ( device >= renderDeviceCount ) {
\r
4056 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4057 if ( FAILED( hr ) ) {
\r
4058 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4061 isCaptureDevice = true;
\r
4064 hr = renderDevices->Item( device, &devicePtr );
\r
4065 if ( FAILED( hr ) ) {
\r
4066 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4069 isCaptureDevice = false;
\r
4072 // get default device name
\r
4073 if ( isCaptureDevice ) {
\r
4074 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4075 if ( FAILED( hr ) ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4081 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4082 if ( FAILED( hr ) ) {
\r
4083 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4088 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4089 if ( FAILED( hr ) ) {
\r
4090 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4093 PropVariantInit( &defaultDeviceNameProp );
\r
4095 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4096 if ( FAILED( hr ) ) {
\r
4097 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4101 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4104 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4105 if ( FAILED( hr ) ) {
\r
4106 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4110 PropVariantInit( &deviceNameProp );
\r
4112 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4113 if ( FAILED( hr ) ) {
\r
4114 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4118 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4121 if ( isCaptureDevice ) {
\r
4122 info.isDefaultInput = info.name == defaultDeviceName;
\r
4123 info.isDefaultOutput = false;
\r
4126 info.isDefaultInput = false;
\r
4127 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4131 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4132 if ( FAILED( hr ) ) {
\r
4133 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4137 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4138 if ( FAILED( hr ) ) {
\r
4139 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4143 if ( isCaptureDevice ) {
\r
4144 info.inputChannels = deviceFormat->nChannels;
\r
4145 info.outputChannels = 0;
\r
4146 info.duplexChannels = 0;
\r
4149 info.inputChannels = 0;
\r
4150 info.outputChannels = deviceFormat->nChannels;
\r
4151 info.duplexChannels = 0;
\r
4155 info.sampleRates.clear();
\r
4157 // allow support for all sample rates as we have a built-in sample rate converter
\r
4158 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4159 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4161 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4164 info.nativeFormats = 0;
\r
4166 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4167 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4168 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4170 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4171 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4173 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4174 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4177 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4178 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4179 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4181 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4182 info.nativeFormats |= RTAUDIO_SINT8;
\r
4184 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4185 info.nativeFormats |= RTAUDIO_SINT16;
\r
4187 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4188 info.nativeFormats |= RTAUDIO_SINT24;
\r
4190 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4191 info.nativeFormats |= RTAUDIO_SINT32;
\r
4196 info.probed = true;
\r
4199 // release all references
\r
4200 PropVariantClear( &deviceNameProp );
\r
4201 PropVariantClear( &defaultDeviceNameProp );
\r
4203 SAFE_RELEASE( captureDevices );
\r
4204 SAFE_RELEASE( renderDevices );
\r
4205 SAFE_RELEASE( devicePtr );
\r
4206 SAFE_RELEASE( defaultDevicePtr );
\r
4207 SAFE_RELEASE( audioClient );
\r
4208 SAFE_RELEASE( devicePropStore );
\r
4209 SAFE_RELEASE( defaultDevicePropStore );
\r
4211 CoTaskMemFree( deviceFormat );
\r
4212 CoTaskMemFree( closestMatchFormat );
\r
4214 if ( !errorText_.empty() )
\r
4215 error( errorType );
\r
4219 //-----------------------------------------------------------------------------
\r
4221 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4223 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4224 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4232 //-----------------------------------------------------------------------------
\r
4234 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4236 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4237 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4245 //-----------------------------------------------------------------------------
\r
4247 void RtApiWasapi::closeStream( void )
\r
4249 if ( stream_.state == STREAM_CLOSED ) {
\r
4250 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4251 error( RtAudioError::WARNING );
\r
4255 if ( stream_.state != STREAM_STOPPED )
\r
4258 // clean up stream memory
\r
4259 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4260 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4262 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4263 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4265 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4266 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4268 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4269 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4271 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4272 stream_.apiHandle = NULL;
\r
4274 for ( int i = 0; i < 2; i++ ) {
\r
4275 if ( stream_.userBuffer[i] ) {
\r
4276 free( stream_.userBuffer[i] );
\r
4277 stream_.userBuffer[i] = 0;
\r
4281 if ( stream_.deviceBuffer ) {
\r
4282 free( stream_.deviceBuffer );
\r
4283 stream_.deviceBuffer = 0;
\r
4286 // update stream state
\r
4287 stream_.state = STREAM_CLOSED;
\r
4290 //-----------------------------------------------------------------------------
\r
4292 void RtApiWasapi::startStream( void )
\r
4296 if ( stream_.state == STREAM_RUNNING ) {
\r
4297 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4298 error( RtAudioError::WARNING );
\r
4302 // update stream state
\r
4303 stream_.state = STREAM_RUNNING;
\r
4305 // create WASAPI stream thread
\r
4306 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4308 if ( !stream_.callbackInfo.thread ) {
\r
4309 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4310 error( RtAudioError::THREAD_ERROR );
\r
4313 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4314 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4318 //-----------------------------------------------------------------------------
\r
4320 void RtApiWasapi::stopStream( void )
\r
4324 if ( stream_.state == STREAM_STOPPED ) {
\r
4325 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4326 error( RtAudioError::WARNING );
\r
4330 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4331 stream_.state = STREAM_STOPPING;
\r
4333 // wait until stream thread is stopped
\r
4334 while( stream_.state != STREAM_STOPPED ) {
\r
4338 // Wait for the last buffer to play before stopping.
\r
4339 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4341 // stop capture client if applicable
\r
4342 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4343 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4344 if ( FAILED( hr ) ) {
\r
4345 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4346 error( RtAudioError::DRIVER_ERROR );
\r
4351 // stop render client if applicable
\r
4352 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4353 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4354 if ( FAILED( hr ) ) {
\r
4355 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4356 error( RtAudioError::DRIVER_ERROR );
\r
4361 // close thread handle
\r
4362 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4363 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4364 error( RtAudioError::THREAD_ERROR );
\r
4368 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4371 //-----------------------------------------------------------------------------
\r
4373 void RtApiWasapi::abortStream( void )
\r
4377 if ( stream_.state == STREAM_STOPPED ) {
\r
4378 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4379 error( RtAudioError::WARNING );
\r
4383 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4384 stream_.state = STREAM_STOPPING;
\r
4386 // wait until stream thread is stopped
\r
4387 while ( stream_.state != STREAM_STOPPED ) {
\r
4391 // stop capture client if applicable
\r
4392 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4393 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4394 if ( FAILED( hr ) ) {
\r
4395 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4396 error( RtAudioError::DRIVER_ERROR );
\r
4401 // stop render client if applicable
\r
4402 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4403 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4404 if ( FAILED( hr ) ) {
\r
4405 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4406 error( RtAudioError::DRIVER_ERROR );
\r
4411 // close thread handle
\r
4412 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4413 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4414 error( RtAudioError::THREAD_ERROR );
\r
4418 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4421 //-----------------------------------------------------------------------------
\r
4423 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4424 unsigned int firstChannel, unsigned int sampleRate,
\r
4425 RtAudioFormat format, unsigned int* bufferSize,
\r
4426 RtAudio::StreamOptions* options )
\r
4428 bool methodResult = FAILURE;
\r
4429 unsigned int captureDeviceCount = 0;
\r
4430 unsigned int renderDeviceCount = 0;
\r
4432 IMMDeviceCollection* captureDevices = NULL;
\r
4433 IMMDeviceCollection* renderDevices = NULL;
\r
4434 IMMDevice* devicePtr = NULL;
\r
4435 WAVEFORMATEX* deviceFormat = NULL;
\r
4436 unsigned int bufferBytes;
\r
4437 stream_.state = STREAM_STOPPED;
\r
4439 // create API Handle if not already created
\r
4440 if ( !stream_.apiHandle )
\r
4441 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4443 // Count capture devices
\r
4444 errorText_.clear();
\r
4445 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4446 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4447 if ( FAILED( hr ) ) {
\r
4448 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4452 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4453 if ( FAILED( hr ) ) {
\r
4454 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4458 // Count render devices
\r
4459 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4460 if ( FAILED( hr ) ) {
\r
4461 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4465 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4466 if ( FAILED( hr ) ) {
\r
4467 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4471 // validate device index
\r
4472 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4473 errorType = RtAudioError::INVALID_USE;
\r
4474 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4478 // determine whether index falls within capture or render devices
\r
4479 if ( device >= renderDeviceCount ) {
\r
4480 if ( mode != INPUT ) {
\r
4481 errorType = RtAudioError::INVALID_USE;
\r
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4486 // retrieve captureAudioClient from devicePtr
\r
4487 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4489 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4490 if ( FAILED( hr ) ) {
\r
4491 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4495 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4496 NULL, ( void** ) &captureAudioClient );
\r
4497 if ( FAILED( hr ) ) {
\r
4498 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4502 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4503 if ( FAILED( hr ) ) {
\r
4504 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4508 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4509 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4512 if ( mode != OUTPUT ) {
\r
4513 errorType = RtAudioError::INVALID_USE;
\r
4514 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4518 // retrieve renderAudioClient from devicePtr
\r
4519 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4521 hr = renderDevices->Item( device, &devicePtr );
\r
4522 if ( FAILED( hr ) ) {
\r
4523 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4527 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4528 NULL, ( void** ) &renderAudioClient );
\r
4529 if ( FAILED( hr ) ) {
\r
4530 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4534 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4535 if ( FAILED( hr ) ) {
\r
4536 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4540 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4541 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4544 // fill stream data
\r
4545 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4546 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4547 stream_.mode = DUPLEX;
\r
4550 stream_.mode = mode;
\r
4553 stream_.device[mode] = device;
\r
4554 stream_.doByteSwap[mode] = false;
\r
4555 stream_.sampleRate = sampleRate;
\r
4556 stream_.bufferSize = *bufferSize;
\r
4557 stream_.nBuffers = 1;
\r
4558 stream_.nUserChannels[mode] = channels;
\r
4559 stream_.channelOffset[mode] = firstChannel;
\r
4560 stream_.userFormat = format;
\r
4561 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4563 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4564 stream_.userInterleaved = false;
\r
4566 stream_.userInterleaved = true;
\r
4567 stream_.deviceInterleaved[mode] = true;
\r
4569 // Set flags for buffer conversion.
\r
4570 stream_.doConvertBuffer[mode] = false;
\r
4571 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4572 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4573 stream_.doConvertBuffer[mode] = true;
\r
4574 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4575 stream_.nUserChannels[mode] > 1 )
\r
4576 stream_.doConvertBuffer[mode] = true;
\r
4578 if ( stream_.doConvertBuffer[mode] )
\r
4579 setConvertInfo( mode, 0 );
\r
4581 // Allocate necessary internal buffers
\r
4582 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4584 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4585 if ( !stream_.userBuffer[mode] ) {
\r
4586 errorType = RtAudioError::MEMORY_ERROR;
\r
4587 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4591 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4592 stream_.callbackInfo.priority = 15;
\r
4594 stream_.callbackInfo.priority = 0;
\r
4596 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4597 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4599 methodResult = SUCCESS;
\r
4603 SAFE_RELEASE( captureDevices );
\r
4604 SAFE_RELEASE( renderDevices );
\r
4605 SAFE_RELEASE( devicePtr );
\r
4606 CoTaskMemFree( deviceFormat );
\r
4608 // if method failed, close the stream
\r
4609 if ( methodResult == FAILURE )
\r
4612 if ( !errorText_.empty() )
\r
4613 error( errorType );
\r
4614 return methodResult;
\r
4617 //=============================================================================
\r
4619 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4622 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4627 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4630 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4635 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4638 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4643 //-----------------------------------------------------------------------------
\r
4645 void RtApiWasapi::wasapiThread()
\r
4647 // as this is a new thread, we must CoInitialize it
\r
4648 CoInitialize( NULL );
\r
4652 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4653 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4654 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4655 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4656 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4657 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4659 WAVEFORMATEX* captureFormat = NULL;
\r
4660 WAVEFORMATEX* renderFormat = NULL;
\r
4661 float captureSrRatio = 0.0f;
\r
4662 float renderSrRatio = 0.0f;
\r
4663 WasapiBuffer captureBuffer;
\r
4664 WasapiBuffer renderBuffer;
\r
4666 // declare local stream variables
\r
4667 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4668 BYTE* streamBuffer = NULL;
\r
4669 unsigned long captureFlags = 0;
\r
4670 unsigned int bufferFrameCount = 0;
\r
4671 unsigned int numFramesPadding = 0;
\r
4672 unsigned int convBufferSize = 0;
\r
4673 bool callbackPushed = false;
\r
4674 bool callbackPulled = false;
\r
4675 bool callbackStopped = false;
\r
4676 int callbackResult = 0;
\r
4678 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4679 char* convBuffer = NULL;
\r
4680 unsigned int convBuffSize = 0;
\r
4681 unsigned int deviceBuffSize = 0;
\r
4683 errorText_.clear();
\r
4684 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4686 // Attempt to assign "Pro Audio" characteristic to thread
\r
4687 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4689 DWORD taskIndex = 0;
\r
4690 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4691 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4692 FreeLibrary( AvrtDll );
\r
4695 // start capture stream if applicable
\r
4696 if ( captureAudioClient ) {
\r
4697 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4698 if ( FAILED( hr ) ) {
\r
4699 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4703 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4705 // initialize capture stream according to desire buffer size
\r
4706 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4707 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4709 if ( !captureClient ) {
\r
4710 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4711 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4712 desiredBufferPeriod,
\r
4713 desiredBufferPeriod,
\r
4716 if ( FAILED( hr ) ) {
\r
4717 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4721 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4722 ( void** ) &captureClient );
\r
4723 if ( FAILED( hr ) ) {
\r
4724 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4728 // configure captureEvent to trigger on every available capture buffer
\r
4729 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4730 if ( !captureEvent ) {
\r
4731 errorType = RtAudioError::SYSTEM_ERROR;
\r
4732 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4736 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4737 if ( FAILED( hr ) ) {
\r
4738 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4742 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4743 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4746 unsigned int inBufferSize = 0;
\r
4747 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4748 if ( FAILED( hr ) ) {
\r
4749 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4753 // scale outBufferSize according to stream->user sample rate ratio
\r
4754 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4755 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4757 // set captureBuffer size
\r
4758 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4760 // reset the capture stream
\r
4761 hr = captureAudioClient->Reset();
\r
4762 if ( FAILED( hr ) ) {
\r
4763 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4767 // start the capture stream
\r
4768 hr = captureAudioClient->Start();
\r
4769 if ( FAILED( hr ) ) {
\r
4770 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4775 // start render stream if applicable
\r
4776 if ( renderAudioClient ) {
\r
4777 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4778 if ( FAILED( hr ) ) {
\r
4779 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4783 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4785 // initialize render stream according to desire buffer size
\r
4786 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4787 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4789 if ( !renderClient ) {
\r
4790 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4791 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4792 desiredBufferPeriod,
\r
4793 desiredBufferPeriod,
\r
4796 if ( FAILED( hr ) ) {
\r
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4801 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4802 ( void** ) &renderClient );
\r
4803 if ( FAILED( hr ) ) {
\r
4804 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4808 // configure renderEvent to trigger on every available render buffer
\r
4809 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4810 if ( !renderEvent ) {
\r
4811 errorType = RtAudioError::SYSTEM_ERROR;
\r
4812 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4816 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4817 if ( FAILED( hr ) ) {
\r
4818 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4822 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4823 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4826 unsigned int outBufferSize = 0;
\r
4827 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4828 if ( FAILED( hr ) ) {
\r
4829 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4833 // scale inBufferSize according to user->stream sample rate ratio
\r
4834 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4835 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4837 // set renderBuffer size
\r
4838 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4840 // reset the render stream
\r
4841 hr = renderAudioClient->Reset();
\r
4842 if ( FAILED( hr ) ) {
\r
4843 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4847 // start the render stream
\r
4848 hr = renderAudioClient->Start();
\r
4849 if ( FAILED( hr ) ) {
\r
4850 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4855 if ( stream_.mode == INPUT ) {
\r
4856 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4857 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4859 else if ( stream_.mode == OUTPUT ) {
\r
4860 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4861 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4863 else if ( stream_.mode == DUPLEX ) {
\r
4864 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4865 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4866 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4867 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4870 convBuffer = ( char* ) malloc( convBuffSize );
\r
4871 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4872 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4873 errorType = RtAudioError::MEMORY_ERROR;
\r
4874 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4878 // stream process loop
\r
4879 while ( stream_.state != STREAM_STOPPING ) {
\r
4880 if ( !callbackPulled ) {
\r
4883 // 1. Pull callback buffer from inputBuffer
\r
4884 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4885 // Convert callback buffer to user format
\r
4887 if ( captureAudioClient ) {
\r
4888 // Pull callback buffer from inputBuffer
\r
4889 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4890 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4891 stream_.deviceFormat[INPUT] );
\r
4893 if ( callbackPulled ) {
\r
4894 // Convert callback buffer to user sample rate
\r
4895 convertBufferWasapi( stream_.deviceBuffer,
\r
4897 stream_.nDeviceChannels[INPUT],
\r
4898 captureFormat->nSamplesPerSec,
\r
4899 stream_.sampleRate,
\r
4900 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4902 stream_.deviceFormat[INPUT] );
\r
4904 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4905 // Convert callback buffer to user format
\r
4906 convertBuffer( stream_.userBuffer[INPUT],
\r
4907 stream_.deviceBuffer,
\r
4908 stream_.convertInfo[INPUT] );
\r
4911 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4912 memcpy( stream_.userBuffer[INPUT],
\r
4913 stream_.deviceBuffer,
\r
4914 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4919 // if there is no capture stream, set callbackPulled flag
\r
4920 callbackPulled = true;
\r
4923 // Execute Callback
\r
4924 // ================
\r
4925 // 1. Execute user callback method
\r
4926 // 2. Handle return value from callback
\r
4928 // if callback has not requested the stream to stop
\r
4929 if ( callbackPulled && !callbackStopped ) {
\r
4930 // Execute user callback method
\r
4931 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4932 stream_.userBuffer[INPUT],
\r
4933 stream_.bufferSize,
\r
4935 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4936 stream_.callbackInfo.userData );
\r
4938 // Handle return value from callback
\r
4939 if ( callbackResult == 1 ) {
\r
4940 // instantiate a thread to stop this thread
\r
4941 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4942 if ( !threadHandle ) {
\r
4943 errorType = RtAudioError::THREAD_ERROR;
\r
4944 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4947 else if ( !CloseHandle( threadHandle ) ) {
\r
4948 errorType = RtAudioError::THREAD_ERROR;
\r
4949 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4953 callbackStopped = true;
\r
4955 else if ( callbackResult == 2 ) {
\r
4956 // instantiate a thread to stop this thread
\r
4957 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4958 if ( !threadHandle ) {
\r
4959 errorType = RtAudioError::THREAD_ERROR;
\r
4960 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4963 else if ( !CloseHandle( threadHandle ) ) {
\r
4964 errorType = RtAudioError::THREAD_ERROR;
\r
4965 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4969 callbackStopped = true;
\r
4974 // Callback Output
\r
4975 // ===============
\r
4976 // 1. Convert callback buffer to stream format
\r
4977 // 2. Convert callback buffer to stream sample rate and channel count
\r
4978 // 3. Push callback buffer into outputBuffer
\r
4980 if ( renderAudioClient && callbackPulled ) {
\r
4981 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4982 // Convert callback buffer to stream format
\r
4983 convertBuffer( stream_.deviceBuffer,
\r
4984 stream_.userBuffer[OUTPUT],
\r
4985 stream_.convertInfo[OUTPUT] );
\r
4989 // Convert callback buffer to stream sample rate
\r
4990 convertBufferWasapi( convBuffer,
\r
4991 stream_.deviceBuffer,
\r
4992 stream_.nDeviceChannels[OUTPUT],
\r
4993 stream_.sampleRate,
\r
4994 renderFormat->nSamplesPerSec,
\r
4995 stream_.bufferSize,
\r
4997 stream_.deviceFormat[OUTPUT] );
\r
4999 // Push callback buffer into outputBuffer
\r
5000 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5001 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5002 stream_.deviceFormat[OUTPUT] );
\r
5005 // if there is no render stream, set callbackPushed flag
\r
5006 callbackPushed = true;
\r
5011 // 1. Get capture buffer from stream
\r
5012 // 2. Push capture buffer into inputBuffer
\r
5013 // 3. If 2. was successful: Release capture buffer
\r
5015 if ( captureAudioClient ) {
\r
5016 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5017 if ( !callbackPulled ) {
\r
5018 WaitForSingleObject( captureEvent, INFINITE );
\r
5021 // Get capture buffer from stream
\r
5022 hr = captureClient->GetBuffer( &streamBuffer,
\r
5023 &bufferFrameCount,
\r
5024 &captureFlags, NULL, NULL );
\r
5025 if ( FAILED( hr ) ) {
\r
5026 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5030 if ( bufferFrameCount != 0 ) {
\r
5031 // Push capture buffer into inputBuffer
\r
5032 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5033 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5034 stream_.deviceFormat[INPUT] ) )
\r
5036 // Release capture buffer
\r
5037 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5038 if ( FAILED( hr ) ) {
\r
5039 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5045 // Inform WASAPI that capture was unsuccessful
\r
5046 hr = captureClient->ReleaseBuffer( 0 );
\r
5047 if ( FAILED( hr ) ) {
\r
5048 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5055 // Inform WASAPI that capture was unsuccessful
\r
5056 hr = captureClient->ReleaseBuffer( 0 );
\r
5057 if ( FAILED( hr ) ) {
\r
5058 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5066 // 1. Get render buffer from stream
\r
5067 // 2. Pull next buffer from outputBuffer
\r
5068 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5069 // Release render buffer
\r
5071 if ( renderAudioClient ) {
\r
5072 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5073 if ( callbackPulled && !callbackPushed ) {
\r
5074 WaitForSingleObject( renderEvent, INFINITE );
\r
5077 // Get render buffer from stream
\r
5078 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5079 if ( FAILED( hr ) ) {
\r
5080 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5084 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5085 if ( FAILED( hr ) ) {
\r
5086 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5090 bufferFrameCount -= numFramesPadding;
\r
5092 if ( bufferFrameCount != 0 ) {
\r
5093 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5094 if ( FAILED( hr ) ) {
\r
5095 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5099 // Pull next buffer from outputBuffer
\r
5100 // Fill render buffer with next buffer
\r
5101 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5102 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5103 stream_.deviceFormat[OUTPUT] ) )
\r
5105 // Release render buffer
\r
5106 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5107 if ( FAILED( hr ) ) {
\r
5108 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5114 // Inform WASAPI that render was unsuccessful
\r
5115 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5116 if ( FAILED( hr ) ) {
\r
5117 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5124 // Inform WASAPI that render was unsuccessful
\r
5125 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5126 if ( FAILED( hr ) ) {
\r
5127 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5133 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5134 if ( callbackPushed ) {
\r
5135 callbackPulled = false;
\r
5138 // tick stream time
\r
5139 RtApi::tickStreamTime();
\r
5144 CoTaskMemFree( captureFormat );
\r
5145 CoTaskMemFree( renderFormat );
\r
5147 free ( convBuffer );
\r
5151 // update stream state
\r
5152 stream_.state = STREAM_STOPPED;
\r
5154 if ( errorText_.empty() )
\r
5157 error( errorType );
\r
5160 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5164 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5166 // Modified by Robin Davies, October 2005
\r
5167 // - Improvements to DirectX pointer chasing.
\r
5168 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5169 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5170 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5171 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5173 #include <dsound.h>
\r
5174 #include <assert.h>
\r
5175 #include <algorithm>
\r
5177 #if defined(__MINGW32__)
\r
5178 // missing from latest mingw winapi
\r
5179 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5180 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5181 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5182 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5185 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5187 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5188 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5191 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5193 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5194 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5195 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5196 return pointer >= earlierPointer && pointer < laterPointer;
\r
5199 // A structure to hold various information related to the DirectSound
\r
5200 // API implementation.
\r
5202 unsigned int drainCounter; // Tracks callback counts when draining
\r
5203 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5207 UINT bufferPointer[2];
\r
5208 DWORD dsBufferSize[2];
\r
5209 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5213 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5216 // Declarations for utility functions, callbacks, and structures
\r
5217 // specific to the DirectSound implementation.
\r
5218 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5219 LPCTSTR description,
\r
5221 LPVOID lpContext );
\r
5223 static const char* getErrorString( int code );
\r
5225 static unsigned __stdcall callbackHandler( void *ptr );
\r
5234 : found(false) { validId[0] = false; validId[1] = false; }
\r
5237 struct DsProbeData {
\r
5239 std::vector<struct DsDevice>* dsDevices;
\r
5242 RtApiDs :: RtApiDs()
\r
5244 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5245 // accept whatever the mainline chose for a threading model.
\r
5246 coInitialized_ = false;
\r
5247 HRESULT hr = CoInitialize( NULL );
\r
5248 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5251 RtApiDs :: ~RtApiDs()
\r
5253 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5254 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5257 // The DirectSound default output is always the first device.
\r
5258 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5263 // The DirectSound default input is always the first input device,
\r
5264 // which is the first capture device enumerated.
\r
5265 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5270 unsigned int RtApiDs :: getDeviceCount( void )
\r
5272 // Set query flag for previously found devices to false, so that we
\r
5273 // can check for any devices that have disappeared.
\r
5274 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5275 dsDevices[i].found = false;
\r
5277 // Query DirectSound devices.
\r
5278 struct DsProbeData probeInfo;
\r
5279 probeInfo.isInput = false;
\r
5280 probeInfo.dsDevices = &dsDevices;
\r
5281 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5282 if ( FAILED( result ) ) {
\r
5283 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5284 errorText_ = errorStream_.str();
\r
5285 error( RtAudioError::WARNING );
\r
5288 // Query DirectSoundCapture devices.
\r
5289 probeInfo.isInput = true;
\r
5290 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5291 if ( FAILED( result ) ) {
\r
5292 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5293 errorText_ = errorStream_.str();
\r
5294 error( RtAudioError::WARNING );
\r
5297 // Clean out any devices that may have disappeared.
\r
5298 std::vector< int > indices;
\r
5299 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5300 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5301 //unsigned int nErased = 0;
\r
5302 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5303 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5304 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5306 return static_cast<unsigned int>(dsDevices.size());
\r
5309 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5311 RtAudio::DeviceInfo info;
\r
5312 info.probed = false;
\r
5314 if ( dsDevices.size() == 0 ) {
\r
5315 // Force a query of all devices
\r
5317 if ( dsDevices.size() == 0 ) {
\r
5318 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5319 error( RtAudioError::INVALID_USE );
\r
5324 if ( device >= dsDevices.size() ) {
\r
5325 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5326 error( RtAudioError::INVALID_USE );
\r
5331 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5333 LPDIRECTSOUND output;
\r
5335 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5336 if ( FAILED( result ) ) {
\r
5337 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5338 errorText_ = errorStream_.str();
\r
5339 error( RtAudioError::WARNING );
\r
5343 outCaps.dwSize = sizeof( outCaps );
\r
5344 result = output->GetCaps( &outCaps );
\r
5345 if ( FAILED( result ) ) {
\r
5346 output->Release();
\r
5347 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5348 errorText_ = errorStream_.str();
\r
5349 error( RtAudioError::WARNING );
\r
5353 // Get output channel information.
\r
5354 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5356 // Get sample rate information.
\r
5357 info.sampleRates.clear();
\r
5358 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5359 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5360 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5361 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5363 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5364 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5368 // Get format information.
\r
5369 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5370 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5372 output->Release();
\r
5374 if ( getDefaultOutputDevice() == device )
\r
5375 info.isDefaultOutput = true;
\r
5377 if ( dsDevices[ device ].validId[1] == false ) {
\r
5378 info.name = dsDevices[ device ].name;
\r
5379 info.probed = true;
\r
5385 LPDIRECTSOUNDCAPTURE input;
\r
5386 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5387 if ( FAILED( result ) ) {
\r
5388 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5389 errorText_ = errorStream_.str();
\r
5390 error( RtAudioError::WARNING );
\r
5395 inCaps.dwSize = sizeof( inCaps );
\r
5396 result = input->GetCaps( &inCaps );
\r
5397 if ( FAILED( result ) ) {
\r
5399 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5400 errorText_ = errorStream_.str();
\r
5401 error( RtAudioError::WARNING );
\r
5405 // Get input channel information.
\r
5406 info.inputChannels = inCaps.dwChannels;
\r
5408 // Get sample rate and format information.
\r
5409 std::vector<unsigned int> rates;
\r
5410 if ( inCaps.dwChannels >= 2 ) {
\r
5411 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5412 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5413 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5414 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5415 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5416 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5417 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5418 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5420 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5421 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5422 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5423 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5424 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5426 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5427 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5428 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5429 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5430 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5433 else if ( inCaps.dwChannels == 1 ) {
\r
5434 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5441 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5443 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5444 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5446 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5447 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5449 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5453 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5456 else info.inputChannels = 0; // technically, this would be an error
\r
5460 if ( info.inputChannels == 0 ) return info;
\r
5462 // Copy the supported rates to the info structure but avoid duplication.
\r
5464 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5466 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5467 if ( rates[i] == info.sampleRates[j] ) {
\r
5472 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5474 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5476 // If device opens for both playback and capture, we determine the channels.
\r
5477 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5478 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5480 if ( device == 0 ) info.isDefaultInput = true;
\r
5482 // Copy name and return.
\r
5483 info.name = dsDevices[ device ].name;
\r
5484 info.probed = true;
\r
5488 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5489 unsigned int firstChannel, unsigned int sampleRate,
\r
5490 RtAudioFormat format, unsigned int *bufferSize,
\r
5491 RtAudio::StreamOptions *options )
\r
5493 if ( channels + firstChannel > 2 ) {
\r
5494 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5498 size_t nDevices = dsDevices.size();
\r
5499 if ( nDevices == 0 ) {
\r
5500 // This should not happen because a check is made before this function is called.
\r
5501 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5505 if ( device >= nDevices ) {
\r
5506 // This should not happen because a check is made before this function is called.
\r
5507 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5511 if ( mode == OUTPUT ) {
\r
5512 if ( dsDevices[ device ].validId[0] == false ) {
\r
5513 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5514 errorText_ = errorStream_.str();
\r
5518 else { // mode == INPUT
\r
5519 if ( dsDevices[ device ].validId[1] == false ) {
\r
5520 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5521 errorText_ = errorStream_.str();
\r
5526 // According to a note in PortAudio, using GetDesktopWindow()
\r
5527 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5528 // that occur when the application's window is not the foreground
\r
5529 // window. Also, if the application window closes before the
\r
5530 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5531 // problems when using GetDesktopWindow() but it seems fine now
\r
5532 // (January 2010). I'll leave it commented here.
\r
5533 // HWND hWnd = GetForegroundWindow();
\r
5534 HWND hWnd = GetDesktopWindow();
\r
5536 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5537 // two. This is a judgement call and a value of two is probably too
\r
5538 // low for capture, but it should work for playback.
\r
5540 if ( options ) nBuffers = options->numberOfBuffers;
\r
5541 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5542 if ( nBuffers < 2 ) nBuffers = 3;
\r
5544 // Check the lower range of the user-specified buffer size and set
\r
5545 // (arbitrarily) to a lower bound of 32.
\r
5546 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5548 // Create the wave format structure. The data format setting will
\r
5549 // be determined later.
\r
5550 WAVEFORMATEX waveFormat;
\r
5551 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5552 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5553 waveFormat.nChannels = channels + firstChannel;
\r
5554 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5556 // Determine the device buffer size. By default, we'll use the value
\r
5557 // defined above (32K), but we will grow it to make allowances for
\r
5558 // very large software buffer sizes.
\r
5559 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5560 DWORD dsPointerLeadTime = 0;
\r
5562 void *ohandle = 0, *bhandle = 0;
\r
5564 if ( mode == OUTPUT ) {
\r
5566 LPDIRECTSOUND output;
\r
5567 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5568 if ( FAILED( result ) ) {
\r
5569 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5570 errorText_ = errorStream_.str();
\r
5575 outCaps.dwSize = sizeof( outCaps );
\r
5576 result = output->GetCaps( &outCaps );
\r
5577 if ( FAILED( result ) ) {
\r
5578 output->Release();
\r
5579 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5580 errorText_ = errorStream_.str();
\r
5584 // Check channel information.
\r
5585 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5586 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5587 errorText_ = errorStream_.str();
\r
5591 // Check format information. Use 16-bit format unless not
\r
5592 // supported or user requests 8-bit.
\r
5593 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5594 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5595 waveFormat.wBitsPerSample = 16;
\r
5596 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5599 waveFormat.wBitsPerSample = 8;
\r
5600 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5602 stream_.userFormat = format;
\r
5604 // Update wave format structure and buffer information.
\r
5605 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5606 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5607 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5609 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5610 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5611 dsBufferSize *= 2;
\r
5613 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5614 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5615 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5616 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5617 if ( FAILED( result ) ) {
\r
5618 output->Release();
\r
5619 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5620 errorText_ = errorStream_.str();
\r
5624 // Even though we will write to the secondary buffer, we need to
\r
5625 // access the primary buffer to set the correct output format
\r
5626 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5627 // buffer description.
\r
5628 DSBUFFERDESC bufferDescription;
\r
5629 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5630 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5631 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5633 // Obtain the primary buffer
\r
5634 LPDIRECTSOUNDBUFFER buffer;
\r
5635 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5636 if ( FAILED( result ) ) {
\r
5637 output->Release();
\r
5638 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5639 errorText_ = errorStream_.str();
\r
5643 // Set the primary DS buffer sound format.
\r
5644 result = buffer->SetFormat( &waveFormat );
\r
5645 if ( FAILED( result ) ) {
\r
5646 output->Release();
\r
5647 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5648 errorText_ = errorStream_.str();
\r
5652 // Setup the secondary DS buffer description.
\r
5653 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5654 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5655 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5656 DSBCAPS_GLOBALFOCUS |
\r
5657 DSBCAPS_GETCURRENTPOSITION2 |
\r
5658 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5659 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5660 bufferDescription.lpwfxFormat = &waveFormat;
\r
5662 // Try to create the secondary DS buffer. If that doesn't work,
\r
5663 // try to use software mixing. Otherwise, there's a problem.
\r
5664 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5665 if ( FAILED( result ) ) {
\r
5666 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5667 DSBCAPS_GLOBALFOCUS |
\r
5668 DSBCAPS_GETCURRENTPOSITION2 |
\r
5669 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5670 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5671 if ( FAILED( result ) ) {
\r
5672 output->Release();
\r
5673 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5674 errorText_ = errorStream_.str();
\r
5679 // Get the buffer size ... might be different from what we specified.
\r
5681 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5682 result = buffer->GetCaps( &dsbcaps );
\r
5683 if ( FAILED( result ) ) {
\r
5684 output->Release();
\r
5685 buffer->Release();
\r
5686 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5687 errorText_ = errorStream_.str();
\r
5691 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5693 // Lock the DS buffer
\r
5696 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5697 if ( FAILED( result ) ) {
\r
5698 output->Release();
\r
5699 buffer->Release();
\r
5700 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5701 errorText_ = errorStream_.str();
\r
5705 // Zero the DS buffer
\r
5706 ZeroMemory( audioPtr, dataLen );
\r
5708 // Unlock the DS buffer
\r
5709 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5710 if ( FAILED( result ) ) {
\r
5711 output->Release();
\r
5712 buffer->Release();
\r
5713 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5714 errorText_ = errorStream_.str();
\r
5718 ohandle = (void *) output;
\r
5719 bhandle = (void *) buffer;
\r
5722 if ( mode == INPUT ) {
\r
5724 LPDIRECTSOUNDCAPTURE input;
\r
5725 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5726 if ( FAILED( result ) ) {
\r
5727 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5728 errorText_ = errorStream_.str();
\r
5733 inCaps.dwSize = sizeof( inCaps );
\r
5734 result = input->GetCaps( &inCaps );
\r
5735 if ( FAILED( result ) ) {
\r
5737 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5738 errorText_ = errorStream_.str();
\r
5742 // Check channel information.
\r
5743 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5744 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5748 // Check format information. Use 16-bit format unless user
\r
5749 // requests 8-bit.
\r
5750 DWORD deviceFormats;
\r
5751 if ( channels + firstChannel == 2 ) {
\r
5752 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5753 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5754 waveFormat.wBitsPerSample = 8;
\r
5755 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5757 else { // assume 16-bit is supported
\r
5758 waveFormat.wBitsPerSample = 16;
\r
5759 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5762 else { // channel == 1
\r
5763 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5764 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5765 waveFormat.wBitsPerSample = 8;
\r
5766 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5768 else { // assume 16-bit is supported
\r
5769 waveFormat.wBitsPerSample = 16;
\r
5770 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5773 stream_.userFormat = format;
\r
5775 // Update wave format structure and buffer information.
\r
5776 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5777 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5778 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5780 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5781 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5782 dsBufferSize *= 2;
\r
5784 // Setup the secondary DS buffer description.
\r
5785 DSCBUFFERDESC bufferDescription;
\r
5786 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5787 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5788 bufferDescription.dwFlags = 0;
\r
5789 bufferDescription.dwReserved = 0;
\r
5790 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5791 bufferDescription.lpwfxFormat = &waveFormat;
\r
5793 // Create the capture buffer.
\r
5794 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5795 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5796 if ( FAILED( result ) ) {
\r
5798 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5799 errorText_ = errorStream_.str();
\r
5803 // Get the buffer size ... might be different from what we specified.
\r
5804 DSCBCAPS dscbcaps;
\r
5805 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5806 result = buffer->GetCaps( &dscbcaps );
\r
5807 if ( FAILED( result ) ) {
\r
5809 buffer->Release();
\r
5810 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5811 errorText_ = errorStream_.str();
\r
5815 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5817 // NOTE: We could have a problem here if this is a duplex stream
\r
5818 // and the play and capture hardware buffer sizes are different
\r
5819 // (I'm actually not sure if that is a problem or not).
\r
5820 // Currently, we are not verifying that.
\r
5822 // Lock the capture buffer
\r
5825 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5826 if ( FAILED( result ) ) {
\r
5828 buffer->Release();
\r
5829 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5830 errorText_ = errorStream_.str();
\r
5834 // Zero the buffer
\r
5835 ZeroMemory( audioPtr, dataLen );
\r
5837 // Unlock the buffer
\r
5838 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5839 if ( FAILED( result ) ) {
\r
5841 buffer->Release();
\r
5842 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5843 errorText_ = errorStream_.str();
\r
5847 ohandle = (void *) input;
\r
5848 bhandle = (void *) buffer;
\r
5851 // Set various stream parameters
\r
5852 DsHandle *handle = 0;
\r
5853 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5854 stream_.nUserChannels[mode] = channels;
\r
5855 stream_.bufferSize = *bufferSize;
\r
5856 stream_.channelOffset[mode] = firstChannel;
\r
5857 stream_.deviceInterleaved[mode] = true;
\r
5858 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5859 else stream_.userInterleaved = true;
\r
5861 // Set flag for buffer conversion
\r
5862 stream_.doConvertBuffer[mode] = false;
\r
5863 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5864 stream_.doConvertBuffer[mode] = true;
\r
5865 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5866 stream_.doConvertBuffer[mode] = true;
\r
5867 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5868 stream_.nUserChannels[mode] > 1 )
\r
5869 stream_.doConvertBuffer[mode] = true;
\r
5871 // Allocate necessary internal buffers
\r
5872 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5873 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5874 if ( stream_.userBuffer[mode] == NULL ) {
\r
5875 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5879 if ( stream_.doConvertBuffer[mode] ) {
\r
5881 bool makeBuffer = true;
\r
5882 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5883 if ( mode == INPUT ) {
\r
5884 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5885 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5886 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5890 if ( makeBuffer ) {
\r
5891 bufferBytes *= *bufferSize;
\r
5892 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5893 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5894 if ( stream_.deviceBuffer == NULL ) {
\r
5895 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5901 // Allocate our DsHandle structures for the stream.
\r
5902 if ( stream_.apiHandle == 0 ) {
\r
5904 handle = new DsHandle;
\r
5906 catch ( std::bad_alloc& ) {
\r
5907 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5911 // Create a manual-reset event.
\r
5912 handle->condition = CreateEvent( NULL, // no security
\r
5913 TRUE, // manual-reset
\r
5914 FALSE, // non-signaled initially
\r
5915 NULL ); // unnamed
\r
5916 stream_.apiHandle = (void *) handle;
\r
5919 handle = (DsHandle *) stream_.apiHandle;
\r
5920 handle->id[mode] = ohandle;
\r
5921 handle->buffer[mode] = bhandle;
\r
5922 handle->dsBufferSize[mode] = dsBufferSize;
\r
5923 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5925 stream_.device[mode] = device;
\r
5926 stream_.state = STREAM_STOPPED;
\r
5927 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5928 // We had already set up an output stream.
\r
5929 stream_.mode = DUPLEX;
\r
5931 stream_.mode = mode;
\r
5932 stream_.nBuffers = nBuffers;
\r
5933 stream_.sampleRate = sampleRate;
\r
5935 // Setup the buffer conversion information structure.
\r
5936 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5938 // Setup the callback thread.
\r
5939 if ( stream_.callbackInfo.isRunning == false ) {
\r
5940 unsigned threadId;
\r
5941 stream_.callbackInfo.isRunning = true;
\r
5942 stream_.callbackInfo.object = (void *) this;
\r
5943 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5944 &stream_.callbackInfo, 0, &threadId );
\r
5945 if ( stream_.callbackInfo.thread == 0 ) {
\r
5946 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5950 // Boost DS thread priority
\r
5951 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5957 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5958 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5959 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5960 if ( buffer ) buffer->Release();
\r
5961 object->Release();
\r
5963 if ( handle->buffer[1] ) {
\r
5964 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5965 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5966 if ( buffer ) buffer->Release();
\r
5967 object->Release();
\r
5969 CloseHandle( handle->condition );
\r
5971 stream_.apiHandle = 0;
\r
5974 for ( int i=0; i<2; i++ ) {
\r
5975 if ( stream_.userBuffer[i] ) {
\r
5976 free( stream_.userBuffer[i] );
\r
5977 stream_.userBuffer[i] = 0;
\r
5981 if ( stream_.deviceBuffer ) {
\r
5982 free( stream_.deviceBuffer );
\r
5983 stream_.deviceBuffer = 0;
\r
5986 stream_.state = STREAM_CLOSED;
\r
5990 void RtApiDs :: closeStream()
\r
5992 if ( stream_.state == STREAM_CLOSED ) {
\r
5993 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5994 error( RtAudioError::WARNING );
\r
5998 // Stop the callback thread.
\r
5999 stream_.callbackInfo.isRunning = false;
\r
6000 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6001 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6003 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6005 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6006 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6007 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6010 buffer->Release();
\r
6012 object->Release();
\r
6014 if ( handle->buffer[1] ) {
\r
6015 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6016 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6019 buffer->Release();
\r
6021 object->Release();
\r
6023 CloseHandle( handle->condition );
\r
6025 stream_.apiHandle = 0;
\r
6028 for ( int i=0; i<2; i++ ) {
\r
6029 if ( stream_.userBuffer[i] ) {
\r
6030 free( stream_.userBuffer[i] );
\r
6031 stream_.userBuffer[i] = 0;
\r
6035 if ( stream_.deviceBuffer ) {
\r
6036 free( stream_.deviceBuffer );
\r
6037 stream_.deviceBuffer = 0;
\r
6040 stream_.mode = UNINITIALIZED;
\r
6041 stream_.state = STREAM_CLOSED;
\r
6044 void RtApiDs :: startStream()
\r
6047 if ( stream_.state == STREAM_RUNNING ) {
\r
6048 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6049 error( RtAudioError::WARNING );
\r
6053 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6055 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6056 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6057 // this is already in effect.
\r
6058 timeBeginPeriod( 1 );
\r
6060 buffersRolling = false;
\r
6061 duplexPrerollBytes = 0;
\r
6063 if ( stream_.mode == DUPLEX ) {
\r
6064 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6065 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6068 HRESULT result = 0;
\r
6069 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6071 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6072 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6073 if ( FAILED( result ) ) {
\r
6074 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6075 errorText_ = errorStream_.str();
\r
6080 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6082 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6083 result = buffer->Start( DSCBSTART_LOOPING );
\r
6084 if ( FAILED( result ) ) {
\r
6085 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6086 errorText_ = errorStream_.str();
\r
6091 handle->drainCounter = 0;
\r
6092 handle->internalDrain = false;
\r
6093 ResetEvent( handle->condition );
\r
6094 stream_.state = STREAM_RUNNING;
\r
6097 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6100 void RtApiDs :: stopStream()
\r
6103 if ( stream_.state == STREAM_STOPPED ) {
\r
6104 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6105 error( RtAudioError::WARNING );
\r
6109 HRESULT result = 0;
\r
6112 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6113 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6114 if ( handle->drainCounter == 0 ) {
\r
6115 handle->drainCounter = 2;
\r
6116 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6119 stream_.state = STREAM_STOPPED;
\r
6121 MUTEX_LOCK( &stream_.mutex );
\r
6123 // Stop the buffer and clear memory
\r
6124 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6125 result = buffer->Stop();
\r
6126 if ( FAILED( result ) ) {
\r
6127 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6128 errorText_ = errorStream_.str();
\r
6132 // Lock the buffer and clear it so that if we start to play again,
\r
6133 // we won't have old data playing.
\r
6134 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6135 if ( FAILED( result ) ) {
\r
6136 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6137 errorText_ = errorStream_.str();
\r
6141 // Zero the DS buffer
\r
6142 ZeroMemory( audioPtr, dataLen );
\r
6144 // Unlock the DS buffer
\r
6145 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6146 if ( FAILED( result ) ) {
\r
6147 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6148 errorText_ = errorStream_.str();
\r
6152 // If we start playing again, we must begin at beginning of buffer.
\r
6153 handle->bufferPointer[0] = 0;
\r
6156 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6157 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6161 stream_.state = STREAM_STOPPED;
\r
6163 if ( stream_.mode != DUPLEX )
\r
6164 MUTEX_LOCK( &stream_.mutex );
\r
6166 result = buffer->Stop();
\r
6167 if ( FAILED( result ) ) {
\r
6168 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6169 errorText_ = errorStream_.str();
\r
6173 // Lock the buffer and clear it so that if we start to play again,
\r
6174 // we won't have old data playing.
\r
6175 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6176 if ( FAILED( result ) ) {
\r
6177 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6178 errorText_ = errorStream_.str();
\r
6182 // Zero the DS buffer
\r
6183 ZeroMemory( audioPtr, dataLen );
\r
6185 // Unlock the DS buffer
\r
6186 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6187 if ( FAILED( result ) ) {
\r
6188 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6189 errorText_ = errorStream_.str();
\r
6193 // If we start recording again, we must begin at beginning of buffer.
\r
6194 handle->bufferPointer[1] = 0;
\r
6198 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6199 MUTEX_UNLOCK( &stream_.mutex );
\r
6201 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6204 void RtApiDs :: abortStream()
\r
6207 if ( stream_.state == STREAM_STOPPED ) {
\r
6208 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6209 error( RtAudioError::WARNING );
\r
6213 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6214 handle->drainCounter = 2;
\r
6219 void RtApiDs :: callbackEvent()
\r
6221 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6222 Sleep( 50 ); // sleep 50 milliseconds
\r
6226 if ( stream_.state == STREAM_CLOSED ) {
\r
6227 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6228 error( RtAudioError::WARNING );
\r
6232 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6233 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6235 // Check if we were draining the stream and signal is finished.
\r
6236 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6238 stream_.state = STREAM_STOPPING;
\r
6239 if ( handle->internalDrain == false )
\r
6240 SetEvent( handle->condition );
\r
6246 // Invoke user callback to get fresh output data UNLESS we are
\r
6247 // draining stream.
\r
6248 if ( handle->drainCounter == 0 ) {
\r
6249 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6250 double streamTime = getStreamTime();
\r
6251 RtAudioStreamStatus status = 0;
\r
6252 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6253 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6254 handle->xrun[0] = false;
\r
6256 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6257 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6258 handle->xrun[1] = false;
\r
6260 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6261 stream_.bufferSize, streamTime, status, info->userData );
\r
6262 if ( cbReturnValue == 2 ) {
\r
6263 stream_.state = STREAM_STOPPING;
\r
6264 handle->drainCounter = 2;
\r
6268 else if ( cbReturnValue == 1 ) {
\r
6269 handle->drainCounter = 1;
\r
6270 handle->internalDrain = true;
\r
6275 DWORD currentWritePointer, safeWritePointer;
\r
6276 DWORD currentReadPointer, safeReadPointer;
\r
6277 UINT nextWritePointer;
\r
6279 LPVOID buffer1 = NULL;
\r
6280 LPVOID buffer2 = NULL;
\r
6281 DWORD bufferSize1 = 0;
\r
6282 DWORD bufferSize2 = 0;
\r
6287 MUTEX_LOCK( &stream_.mutex );
\r
6288 if ( stream_.state == STREAM_STOPPED ) {
\r
6289 MUTEX_UNLOCK( &stream_.mutex );
\r
6293 if ( buffersRolling == false ) {
\r
6294 if ( stream_.mode == DUPLEX ) {
\r
6295 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6297 // It takes a while for the devices to get rolling. As a result,
\r
6298 // there's no guarantee that the capture and write device pointers
\r
6299 // will move in lockstep. Wait here for both devices to start
\r
6300 // rolling, and then set our buffer pointers accordingly.
\r
6301 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6302 // bytes later than the write buffer.
\r
6304 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6305 // take place between the two GetCurrentPosition calls... but I'm
\r
6306 // really not sure how to solve the problem. Temporarily boost to
\r
6307 // Realtime priority, maybe; but I'm not sure what priority the
\r
6308 // DirectSound service threads run at. We *should* be roughly
\r
6309 // within a ms or so of correct.
\r
6311 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6312 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6314 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6316 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6317 if ( FAILED( result ) ) {
\r
6318 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6319 errorText_ = errorStream_.str();
\r
6320 MUTEX_UNLOCK( &stream_.mutex );
\r
6321 error( RtAudioError::SYSTEM_ERROR );
\r
6324 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6325 if ( FAILED( result ) ) {
\r
6326 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6327 errorText_ = errorStream_.str();
\r
6328 MUTEX_UNLOCK( &stream_.mutex );
\r
6329 error( RtAudioError::SYSTEM_ERROR );
\r
6333 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6334 if ( FAILED( result ) ) {
\r
6335 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6336 errorText_ = errorStream_.str();
\r
6337 MUTEX_UNLOCK( &stream_.mutex );
\r
6338 error( RtAudioError::SYSTEM_ERROR );
\r
6341 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6342 if ( FAILED( result ) ) {
\r
6343 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6344 errorText_ = errorStream_.str();
\r
6345 MUTEX_UNLOCK( &stream_.mutex );
\r
6346 error( RtAudioError::SYSTEM_ERROR );
\r
6349 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6353 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6355 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6356 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6357 handle->bufferPointer[1] = safeReadPointer;
\r
6359 else if ( stream_.mode == OUTPUT ) {
\r
6361 // Set the proper nextWritePosition after initial startup.
\r
6362 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6363 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6364 if ( FAILED( result ) ) {
\r
6365 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6366 errorText_ = errorStream_.str();
\r
6367 MUTEX_UNLOCK( &stream_.mutex );
\r
6368 error( RtAudioError::SYSTEM_ERROR );
\r
6371 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6372 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6375 buffersRolling = true;
\r
6378 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6380 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6382 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6383 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6384 bufferBytes *= formatBytes( stream_.userFormat );
\r
6385 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6388 // Setup parameters and do buffer conversion if necessary.
\r
6389 if ( stream_.doConvertBuffer[0] ) {
\r
6390 buffer = stream_.deviceBuffer;
\r
6391 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6392 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6393 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6396 buffer = stream_.userBuffer[0];
\r
6397 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6398 bufferBytes *= formatBytes( stream_.userFormat );
\r
6401 // No byte swapping necessary in DirectSound implementation.
\r
6403 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6404 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6406 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6407 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6409 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6410 nextWritePointer = handle->bufferPointer[0];
\r
6412 DWORD endWrite, leadPointer;
\r
6414 // Find out where the read and "safe write" pointers are.
\r
6415 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6416 if ( FAILED( result ) ) {
\r
6417 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6418 errorText_ = errorStream_.str();
\r
6419 error( RtAudioError::SYSTEM_ERROR );
\r
6423 // We will copy our output buffer into the region between
\r
6424 // safeWritePointer and leadPointer. If leadPointer is not
\r
6425 // beyond the next endWrite position, wait until it is.
\r
6426 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6427 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6428 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6429 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6430 endWrite = nextWritePointer + bufferBytes;
\r
6432 // Check whether the entire write region is behind the play pointer.
\r
6433 if ( leadPointer >= endWrite ) break;
\r
6435 // If we are here, then we must wait until the leadPointer advances
\r
6436 // beyond the end of our next write region. We use the
\r
6437 // Sleep() function to suspend operation until that happens.
\r
6438 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6439 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6440 if ( millis < 1.0 ) millis = 1.0;
\r
6441 Sleep( (DWORD) millis );
\r
6444 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6445 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6446 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6447 handle->xrun[0] = true;
\r
6448 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6449 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6450 handle->bufferPointer[0] = nextWritePointer;
\r
6451 endWrite = nextWritePointer + bufferBytes;
\r
6454 // Lock free space in the buffer
\r
6455 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6456 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6457 if ( FAILED( result ) ) {
\r
6458 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6459 errorText_ = errorStream_.str();
\r
6460 MUTEX_UNLOCK( &stream_.mutex );
\r
6461 error( RtAudioError::SYSTEM_ERROR );
\r
6465 // Copy our buffer into the DS buffer
\r
6466 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6467 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6469 // Update our buffer offset and unlock sound buffer
\r
6470 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6471 if ( FAILED( result ) ) {
\r
6472 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6473 errorText_ = errorStream_.str();
\r
6474 MUTEX_UNLOCK( &stream_.mutex );
\r
6475 error( RtAudioError::SYSTEM_ERROR );
\r
6478 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6479 handle->bufferPointer[0] = nextWritePointer;
\r
6482 // Don't bother draining input
\r
6483 if ( handle->drainCounter ) {
\r
6484 handle->drainCounter++;
\r
6488 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6490 // Setup parameters.
\r
6491 if ( stream_.doConvertBuffer[1] ) {
\r
6492 buffer = stream_.deviceBuffer;
\r
6493 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6494 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6497 buffer = stream_.userBuffer[1];
\r
6498 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6499 bufferBytes *= formatBytes( stream_.userFormat );
\r
6502 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6503 long nextReadPointer = handle->bufferPointer[1];
\r
6504 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6506 // Find out where the write and "safe read" pointers are.
\r
6507 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6508 if ( FAILED( result ) ) {
\r
6509 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6510 errorText_ = errorStream_.str();
\r
6511 MUTEX_UNLOCK( &stream_.mutex );
\r
6512 error( RtAudioError::SYSTEM_ERROR );
\r
6516 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6517 DWORD endRead = nextReadPointer + bufferBytes;
\r
6519 // Handling depends on whether we are INPUT or DUPLEX.
\r
6520 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6521 // then a wait here will drag the write pointers into the forbidden zone.
\r
6523 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6524 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6525 // practical way to sync up the read and write pointers reliably, given the
\r
6526 // the very complex relationship between phase and increment of the read and write
\r
6529 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6530 // provide a pre-roll period of 0.5 seconds in which we return
\r
6531 // zeros from the read buffer while the pointers sync up.
\r
6533 if ( stream_.mode == DUPLEX ) {
\r
6534 if ( safeReadPointer < endRead ) {
\r
6535 if ( duplexPrerollBytes <= 0 ) {
\r
6536 // Pre-roll time over. Be more agressive.
\r
6537 int adjustment = endRead-safeReadPointer;
\r
6539 handle->xrun[1] = true;
\r
6541 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6542 // and perform fine adjustments later.
\r
6543 // - small adjustments: back off by twice as much.
\r
6544 if ( adjustment >= 2*bufferBytes )
\r
6545 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6547 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6549 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6553 // In pre=roll time. Just do it.
\r
6554 nextReadPointer = safeReadPointer - bufferBytes;
\r
6555 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6557 endRead = nextReadPointer + bufferBytes;
\r
6560 else { // mode == INPUT
\r
6561 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6562 // See comments for playback.
\r
6563 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6564 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6565 if ( millis < 1.0 ) millis = 1.0;
\r
6566 Sleep( (DWORD) millis );
\r
6568 // Wake up and find out where we are now.
\r
6569 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6570 if ( FAILED( result ) ) {
\r
6571 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6572 errorText_ = errorStream_.str();
\r
6573 MUTEX_UNLOCK( &stream_.mutex );
\r
6574 error( RtAudioError::SYSTEM_ERROR );
\r
6578 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6582 // Lock free space in the buffer
\r
6583 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6584 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6585 if ( FAILED( result ) ) {
\r
6586 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6587 errorText_ = errorStream_.str();
\r
6588 MUTEX_UNLOCK( &stream_.mutex );
\r
6589 error( RtAudioError::SYSTEM_ERROR );
\r
6593 if ( duplexPrerollBytes <= 0 ) {
\r
6594 // Copy our buffer into the DS buffer
\r
6595 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6596 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6599 memset( buffer, 0, bufferSize1 );
\r
6600 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6601 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6604 // Update our buffer offset and unlock sound buffer
\r
6605 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6606 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6607 if ( FAILED( result ) ) {
\r
6608 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6609 errorText_ = errorStream_.str();
\r
6610 MUTEX_UNLOCK( &stream_.mutex );
\r
6611 error( RtAudioError::SYSTEM_ERROR );
\r
6614 handle->bufferPointer[1] = nextReadPointer;
\r
6616 // No byte swapping necessary in DirectSound implementation.
\r
6618 // If necessary, convert 8-bit data from unsigned to signed.
\r
6619 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6620 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6622 // Do buffer conversion if necessary.
\r
6623 if ( stream_.doConvertBuffer[1] )
\r
6624 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6628 MUTEX_UNLOCK( &stream_.mutex );
\r
6629 RtApi::tickStreamTime();
\r
6632 // Definitions for utility functions and callbacks
\r
6633 // specific to the DirectSound implementation.
\r
6635 static unsigned __stdcall callbackHandler( void *ptr )
\r
6637 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6638 RtApiDs *object = (RtApiDs *) info->object;
\r
6639 bool* isRunning = &info->isRunning;
\r
6641 while ( *isRunning == true ) {
\r
6642 object->callbackEvent();
\r
6645 _endthreadex( 0 );
\r
6649 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6650 LPCTSTR description,
\r
6651 LPCTSTR /*module*/,
\r
6652 LPVOID lpContext )
\r
6654 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6655 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6658 bool validDevice = false;
\r
6659 if ( probeInfo.isInput == true ) {
\r
6661 LPDIRECTSOUNDCAPTURE object;
\r
6663 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6664 if ( hr != DS_OK ) return TRUE;
\r
6666 caps.dwSize = sizeof(caps);
\r
6667 hr = object->GetCaps( &caps );
\r
6668 if ( hr == DS_OK ) {
\r
6669 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6670 validDevice = true;
\r
6672 object->Release();
\r
6676 LPDIRECTSOUND object;
\r
6677 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6678 if ( hr != DS_OK ) return TRUE;
\r
6680 caps.dwSize = sizeof(caps);
\r
6681 hr = object->GetCaps( &caps );
\r
6682 if ( hr == DS_OK ) {
\r
6683 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6684 validDevice = true;
\r
6686 object->Release();
\r
6689 // If good device, then save its name and guid.
\r
6690 std::string name = convertCharPointerToStdString( description );
\r
6691 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6692 if ( lpguid == NULL )
\r
6693 name = "Default Device";
\r
6694 if ( validDevice ) {
\r
6695 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6696 if ( dsDevices[i].name == name ) {
\r
6697 dsDevices[i].found = true;
\r
6698 if ( probeInfo.isInput ) {
\r
6699 dsDevices[i].id[1] = lpguid;
\r
6700 dsDevices[i].validId[1] = true;
\r
6703 dsDevices[i].id[0] = lpguid;
\r
6704 dsDevices[i].validId[0] = true;
\r
6711 device.name = name;
\r
6712 device.found = true;
\r
6713 if ( probeInfo.isInput ) {
\r
6714 device.id[1] = lpguid;
\r
6715 device.validId[1] = true;
\r
6718 device.id[0] = lpguid;
\r
6719 device.validId[0] = true;
\r
6721 dsDevices.push_back( device );
\r
6727 static const char* getErrorString( int code )
\r
6731 case DSERR_ALLOCATED:
\r
6732 return "Already allocated";
\r
6734 case DSERR_CONTROLUNAVAIL:
\r
6735 return "Control unavailable";
\r
6737 case DSERR_INVALIDPARAM:
\r
6738 return "Invalid parameter";
\r
6740 case DSERR_INVALIDCALL:
\r
6741 return "Invalid call";
\r
6743 case DSERR_GENERIC:
\r
6744 return "Generic error";
\r
6746 case DSERR_PRIOLEVELNEEDED:
\r
6747 return "Priority level needed";
\r
6749 case DSERR_OUTOFMEMORY:
\r
6750 return "Out of memory";
\r
6752 case DSERR_BADFORMAT:
\r
6753 return "The sample rate or the channel format is not supported";
\r
6755 case DSERR_UNSUPPORTED:
\r
6756 return "Not supported";
\r
6758 case DSERR_NODRIVER:
\r
6759 return "No driver";
\r
6761 case DSERR_ALREADYINITIALIZED:
\r
6762 return "Already initialized";
\r
6764 case DSERR_NOAGGREGATION:
\r
6765 return "No aggregation";
\r
6767 case DSERR_BUFFERLOST:
\r
6768 return "Buffer lost";
\r
6770 case DSERR_OTHERAPPHASPRIO:
\r
6771 return "Another application already has priority";
\r
6773 case DSERR_UNINITIALIZED:
\r
6774 return "Uninitialized";
\r
6777 return "DirectSound unknown error";
\r
6780 //******************** End of __WINDOWS_DS__ *********************//
\r
6784 #if defined(__LINUX_ALSA__)
\r
6786 #include <alsa/asoundlib.h>
\r
6787 #include <unistd.h>
\r
6789 // A structure to hold various information related to the ALSA API
\r
6790 // implementation.
\r
6791 struct AlsaHandle {
\r
6792 snd_pcm_t *handles[2];
\r
6793 bool synchronized;
\r
6795 pthread_cond_t runnable_cv;
\r
6799 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6802 static void *alsaCallbackHandler( void * ptr );
\r
6804 RtApiAlsa :: RtApiAlsa()
\r
6806 // Nothing to do here.
\r
6809 RtApiAlsa :: ~RtApiAlsa()
\r
6811 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6814 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6816 unsigned nDevices = 0;
\r
6817 int result, subdevice, card;
\r
6819 snd_ctl_t *handle;
\r
6821 // Count cards and devices
\r
6823 snd_card_next( &card );
\r
6824 while ( card >= 0 ) {
\r
6825 sprintf( name, "hw:%d", card );
\r
6826 result = snd_ctl_open( &handle, name, 0 );
\r
6827 if ( result < 0 ) {
\r
6828 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6829 errorText_ = errorStream_.str();
\r
6830 error( RtAudioError::WARNING );
\r
6835 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6836 if ( result < 0 ) {
\r
6837 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6838 errorText_ = errorStream_.str();
\r
6839 error( RtAudioError::WARNING );
\r
6842 if ( subdevice < 0 )
\r
6847 snd_ctl_close( handle );
\r
6848 snd_card_next( &card );
\r
6851 result = snd_ctl_open( &handle, "default", 0 );
\r
6852 if (result == 0) {
\r
6854 snd_ctl_close( handle );
\r
6860 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6862 RtAudio::DeviceInfo info;
\r
6863 info.probed = false;
\r
6865 unsigned nDevices = 0;
\r
6866 int result, subdevice, card;
\r
6868 snd_ctl_t *chandle;
\r
6870 // Count cards and devices
\r
6873 snd_card_next( &card );
\r
6874 while ( card >= 0 ) {
\r
6875 sprintf( name, "hw:%d", card );
\r
6876 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6877 if ( result < 0 ) {
\r
6878 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6879 errorText_ = errorStream_.str();
\r
6880 error( RtAudioError::WARNING );
\r
6885 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6886 if ( result < 0 ) {
\r
6887 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6888 errorText_ = errorStream_.str();
\r
6889 error( RtAudioError::WARNING );
\r
6892 if ( subdevice < 0 ) break;
\r
6893 if ( nDevices == device ) {
\r
6894 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6900 snd_ctl_close( chandle );
\r
6901 snd_card_next( &card );
\r
6904 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6905 if ( result == 0 ) {
\r
6906 if ( nDevices == device ) {
\r
6907 strcpy( name, "default" );
\r
6913 if ( nDevices == 0 ) {
\r
6914 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6915 error( RtAudioError::INVALID_USE );
\r
6919 if ( device >= nDevices ) {
\r
6920 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6921 error( RtAudioError::INVALID_USE );
\r
6927 // If a stream is already open, we cannot probe the stream devices.
\r
6928 // Thus, use the saved results.
\r
6929 if ( stream_.state != STREAM_CLOSED &&
\r
6930 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6931 snd_ctl_close( chandle );
\r
6932 if ( device >= devices_.size() ) {
\r
6933 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6934 error( RtAudioError::WARNING );
\r
6937 return devices_[ device ];
\r
6940 int openMode = SND_PCM_ASYNC;
\r
6941 snd_pcm_stream_t stream;
\r
6942 snd_pcm_info_t *pcminfo;
\r
6943 snd_pcm_info_alloca( &pcminfo );
\r
6944 snd_pcm_t *phandle;
\r
6945 snd_pcm_hw_params_t *params;
\r
6946 snd_pcm_hw_params_alloca( ¶ms );
\r
6948 // First try for playback unless default device (which has subdev -1)
\r
6949 stream = SND_PCM_STREAM_PLAYBACK;
\r
6950 snd_pcm_info_set_stream( pcminfo, stream );
\r
6951 if ( subdevice != -1 ) {
\r
6952 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6953 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6955 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6956 if ( result < 0 ) {
\r
6957 // Device probably doesn't support playback.
\r
6958 goto captureProbe;
\r
6962 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6963 if ( result < 0 ) {
\r
6964 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6965 errorText_ = errorStream_.str();
\r
6966 error( RtAudioError::WARNING );
\r
6967 goto captureProbe;
\r
6970 // The device is open ... fill the parameter structure.
\r
6971 result = snd_pcm_hw_params_any( phandle, params );
\r
6972 if ( result < 0 ) {
\r
6973 snd_pcm_close( phandle );
\r
6974 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6975 errorText_ = errorStream_.str();
\r
6976 error( RtAudioError::WARNING );
\r
6977 goto captureProbe;
\r
6980 // Get output channel information.
\r
6981 unsigned int value;
\r
6982 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6983 if ( result < 0 ) {
\r
6984 snd_pcm_close( phandle );
\r
6985 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6986 errorText_ = errorStream_.str();
\r
6987 error( RtAudioError::WARNING );
\r
6988 goto captureProbe;
\r
6990 info.outputChannels = value;
\r
6991 snd_pcm_close( phandle );
\r
6994 stream = SND_PCM_STREAM_CAPTURE;
\r
6995 snd_pcm_info_set_stream( pcminfo, stream );
\r
6997 // Now try for capture unless default device (with subdev = -1)
\r
6998 if ( subdevice != -1 ) {
\r
6999 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7000 snd_ctl_close( chandle );
\r
7001 if ( result < 0 ) {
\r
7002 // Device probably doesn't support capture.
\r
7003 if ( info.outputChannels == 0 ) return info;
\r
7004 goto probeParameters;
\r
7008 snd_ctl_close( chandle );
\r
7010 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7011 if ( result < 0 ) {
\r
7012 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7013 errorText_ = errorStream_.str();
\r
7014 error( RtAudioError::WARNING );
\r
7015 if ( info.outputChannels == 0 ) return info;
\r
7016 goto probeParameters;
\r
7019 // The device is open ... fill the parameter structure.
\r
7020 result = snd_pcm_hw_params_any( phandle, params );
\r
7021 if ( result < 0 ) {
\r
7022 snd_pcm_close( phandle );
\r
7023 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7024 errorText_ = errorStream_.str();
\r
7025 error( RtAudioError::WARNING );
\r
7026 if ( info.outputChannels == 0 ) return info;
\r
7027 goto probeParameters;
\r
7030 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7031 if ( result < 0 ) {
\r
7032 snd_pcm_close( phandle );
\r
7033 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7034 errorText_ = errorStream_.str();
\r
7035 error( RtAudioError::WARNING );
\r
7036 if ( info.outputChannels == 0 ) return info;
\r
7037 goto probeParameters;
\r
7039 info.inputChannels = value;
\r
7040 snd_pcm_close( phandle );
\r
7042 // If device opens for both playback and capture, we determine the channels.
\r
7043 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7044 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7046 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7047 if ( device == 0 && info.outputChannels > 0 )
\r
7048 info.isDefaultOutput = true;
\r
7049 if ( device == 0 && info.inputChannels > 0 )
\r
7050 info.isDefaultInput = true;
\r
7053 // At this point, we just need to figure out the supported data
\r
7054 // formats and sample rates. We'll proceed by opening the device in
\r
7055 // the direction with the maximum number of channels, or playback if
\r
7056 // they are equal. This might limit our sample rate options, but so
\r
7059 if ( info.outputChannels >= info.inputChannels )
\r
7060 stream = SND_PCM_STREAM_PLAYBACK;
\r
7062 stream = SND_PCM_STREAM_CAPTURE;
\r
7063 snd_pcm_info_set_stream( pcminfo, stream );
\r
7065 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7066 if ( result < 0 ) {
\r
7067 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7068 errorText_ = errorStream_.str();
\r
7069 error( RtAudioError::WARNING );
\r
7073 // The device is open ... fill the parameter structure.
\r
7074 result = snd_pcm_hw_params_any( phandle, params );
\r
7075 if ( result < 0 ) {
\r
7076 snd_pcm_close( phandle );
\r
7077 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7078 errorText_ = errorStream_.str();
\r
7079 error( RtAudioError::WARNING );
\r
7083 // Test our discrete set of sample rate values.
\r
7084 info.sampleRates.clear();
\r
7085 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7086 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7087 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7089 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7090 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7093 if ( info.sampleRates.size() == 0 ) {
\r
7094 snd_pcm_close( phandle );
\r
7095 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7096 errorText_ = errorStream_.str();
\r
7097 error( RtAudioError::WARNING );
\r
7101 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7102 snd_pcm_format_t format;
\r
7103 info.nativeFormats = 0;
\r
7104 format = SND_PCM_FORMAT_S8;
\r
7105 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7106 info.nativeFormats |= RTAUDIO_SINT8;
\r
7107 format = SND_PCM_FORMAT_S16;
\r
7108 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7109 info.nativeFormats |= RTAUDIO_SINT16;
\r
7110 format = SND_PCM_FORMAT_S24;
\r
7111 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7112 info.nativeFormats |= RTAUDIO_SINT24;
\r
7113 format = SND_PCM_FORMAT_S32;
\r
7114 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7115 info.nativeFormats |= RTAUDIO_SINT32;
\r
7116 format = SND_PCM_FORMAT_FLOAT;
\r
7117 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7118 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7119 format = SND_PCM_FORMAT_FLOAT64;
\r
7120 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7121 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7123 // Check that we have at least one supported format
\r
7124 if ( info.nativeFormats == 0 ) {
\r
7125 snd_pcm_close( phandle );
\r
7126 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7127 errorText_ = errorStream_.str();
\r
7128 error( RtAudioError::WARNING );
\r
7132 // Get the device name
\r
7134 result = snd_card_get_name( card, &cardname );
\r
7135 if ( result >= 0 ) {
\r
7136 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7141 // That's all ... close the device and return
\r
7142 snd_pcm_close( phandle );
\r
7143 info.probed = true;
\r
7147 void RtApiAlsa :: saveDeviceInfo( void )
\r
7151 unsigned int nDevices = getDeviceCount();
\r
7152 devices_.resize( nDevices );
\r
7153 for ( unsigned int i=0; i<nDevices; i++ )
\r
7154 devices_[i] = getDeviceInfo( i );
\r
7157 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7158 unsigned int firstChannel, unsigned int sampleRate,
\r
7159 RtAudioFormat format, unsigned int *bufferSize,
\r
7160 RtAudio::StreamOptions *options )
\r
7163 #if defined(__RTAUDIO_DEBUG__)
\r
7164 snd_output_t *out;
\r
7165 snd_output_stdio_attach(&out, stderr, 0);
\r
7168 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7170 unsigned nDevices = 0;
\r
7171 int result, subdevice, card;
\r
7173 snd_ctl_t *chandle;
\r
7175 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7176 snprintf(name, sizeof(name), "%s", "default");
\r
7178 // Count cards and devices
\r
7180 snd_card_next( &card );
\r
7181 while ( card >= 0 ) {
\r
7182 sprintf( name, "hw:%d", card );
\r
7183 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7184 if ( result < 0 ) {
\r
7185 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7186 errorText_ = errorStream_.str();
\r
7191 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7192 if ( result < 0 ) break;
\r
7193 if ( subdevice < 0 ) break;
\r
7194 if ( nDevices == device ) {
\r
7195 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7196 snd_ctl_close( chandle );
\r
7201 snd_ctl_close( chandle );
\r
7202 snd_card_next( &card );
\r
7205 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7206 if ( result == 0 ) {
\r
7207 if ( nDevices == device ) {
\r
7208 strcpy( name, "default" );
\r
7214 if ( nDevices == 0 ) {
\r
7215 // This should not happen because a check is made before this function is called.
\r
7216 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7220 if ( device >= nDevices ) {
\r
7221 // This should not happen because a check is made before this function is called.
\r
7222 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7229 // The getDeviceInfo() function will not work for a device that is
\r
7230 // already open. Thus, we'll probe the system before opening a
\r
7231 // stream and save the results for use by getDeviceInfo().
\r
7232 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7233 this->saveDeviceInfo();
\r
7235 snd_pcm_stream_t stream;
\r
7236 if ( mode == OUTPUT )
\r
7237 stream = SND_PCM_STREAM_PLAYBACK;
\r
7239 stream = SND_PCM_STREAM_CAPTURE;
\r
7241 snd_pcm_t *phandle;
\r
7242 int openMode = SND_PCM_ASYNC;
\r
7243 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7244 if ( result < 0 ) {
\r
7245 if ( mode == OUTPUT )
\r
7246 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7248 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7249 errorText_ = errorStream_.str();
\r
7253 // Fill the parameter structure.
\r
7254 snd_pcm_hw_params_t *hw_params;
\r
7255 snd_pcm_hw_params_alloca( &hw_params );
\r
7256 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7257 if ( result < 0 ) {
\r
7258 snd_pcm_close( phandle );
\r
7259 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7260 errorText_ = errorStream_.str();
\r
7264 #if defined(__RTAUDIO_DEBUG__)
\r
7265 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7266 snd_pcm_hw_params_dump( hw_params, out );
\r
7269 // Set access ... check user preference.
\r
7270 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7271 stream_.userInterleaved = false;
\r
7272 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7273 if ( result < 0 ) {
\r
7274 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7275 stream_.deviceInterleaved[mode] = true;
\r
7278 stream_.deviceInterleaved[mode] = false;
\r
7281 stream_.userInterleaved = true;
\r
7282 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7283 if ( result < 0 ) {
\r
7284 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7285 stream_.deviceInterleaved[mode] = false;
\r
7288 stream_.deviceInterleaved[mode] = true;
\r
7291 if ( result < 0 ) {
\r
7292 snd_pcm_close( phandle );
\r
7293 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7294 errorText_ = errorStream_.str();
\r
7298 // Determine how to set the device format.
\r
7299 stream_.userFormat = format;
\r
7300 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7302 if ( format == RTAUDIO_SINT8 )
\r
7303 deviceFormat = SND_PCM_FORMAT_S8;
\r
7304 else if ( format == RTAUDIO_SINT16 )
\r
7305 deviceFormat = SND_PCM_FORMAT_S16;
\r
7306 else if ( format == RTAUDIO_SINT24 )
\r
7307 deviceFormat = SND_PCM_FORMAT_S24;
\r
7308 else if ( format == RTAUDIO_SINT32 )
\r
7309 deviceFormat = SND_PCM_FORMAT_S32;
\r
7310 else if ( format == RTAUDIO_FLOAT32 )
\r
7311 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7312 else if ( format == RTAUDIO_FLOAT64 )
\r
7313 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7315 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7316 stream_.deviceFormat[mode] = format;
\r
7320 // The user requested format is not natively supported by the device.
\r
7321 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7322 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7323 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7327 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7328 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7329 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7333 deviceFormat = SND_PCM_FORMAT_S32;
\r
7334 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7335 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7339 deviceFormat = SND_PCM_FORMAT_S24;
\r
7340 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7341 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7345 deviceFormat = SND_PCM_FORMAT_S16;
\r
7346 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7347 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7351 deviceFormat = SND_PCM_FORMAT_S8;
\r
7352 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7353 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7357 // If we get here, no supported format was found.
\r
7358 snd_pcm_close( phandle );
\r
7359 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7360 errorText_ = errorStream_.str();
\r
7364 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7365 if ( result < 0 ) {
\r
7366 snd_pcm_close( phandle );
\r
7367 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7368 errorText_ = errorStream_.str();
\r
7372 // Determine whether byte-swaping is necessary.
\r
7373 stream_.doByteSwap[mode] = false;
\r
7374 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7375 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7376 if ( result == 0 )
\r
7377 stream_.doByteSwap[mode] = true;
\r
7378 else if (result < 0) {
\r
7379 snd_pcm_close( phandle );
\r
7380 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7381 errorText_ = errorStream_.str();
\r
7386 // Set the sample rate.
\r
7387 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7388 if ( result < 0 ) {
\r
7389 snd_pcm_close( phandle );
\r
7390 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7391 errorText_ = errorStream_.str();
\r
7395 // Determine the number of channels for this device. We support a possible
\r
7396 // minimum device channel number > than the value requested by the user.
\r
7397 stream_.nUserChannels[mode] = channels;
\r
7398 unsigned int value;
\r
7399 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7400 unsigned int deviceChannels = value;
\r
7401 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7402 snd_pcm_close( phandle );
\r
7403 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7404 errorText_ = errorStream_.str();
\r
7408 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7409 if ( result < 0 ) {
\r
7410 snd_pcm_close( phandle );
\r
7411 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7412 errorText_ = errorStream_.str();
\r
7415 deviceChannels = value;
\r
7416 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7417 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7419 // Set the device channels.
\r
7420 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7421 if ( result < 0 ) {
\r
7422 snd_pcm_close( phandle );
\r
7423 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7424 errorText_ = errorStream_.str();
\r
7428 // Set the buffer (or period) size.
\r
7430 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7431 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7432 if ( result < 0 ) {
\r
7433 snd_pcm_close( phandle );
\r
7434 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7435 errorText_ = errorStream_.str();
\r
7438 *bufferSize = periodSize;
\r
7440 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7441 unsigned int periods = 0;
\r
7442 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7443 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7444 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7445 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7446 if ( result < 0 ) {
\r
7447 snd_pcm_close( phandle );
\r
7448 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7449 errorText_ = errorStream_.str();
\r
7453 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7454 // MUST be the same in both directions!
\r
7455 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7456 snd_pcm_close( phandle );
\r
7457 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7458 errorText_ = errorStream_.str();
\r
7462 stream_.bufferSize = *bufferSize;
\r
7464 // Install the hardware configuration
\r
7465 result = snd_pcm_hw_params( phandle, hw_params );
\r
7466 if ( result < 0 ) {
\r
7467 snd_pcm_close( phandle );
\r
7468 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7469 errorText_ = errorStream_.str();
\r
7473 #if defined(__RTAUDIO_DEBUG__)
\r
7474 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7475 snd_pcm_hw_params_dump( hw_params, out );
\r
7478 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7479 snd_pcm_sw_params_t *sw_params = NULL;
\r
7480 snd_pcm_sw_params_alloca( &sw_params );
\r
7481 snd_pcm_sw_params_current( phandle, sw_params );
\r
7482 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7483 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7484 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7486 // The following two settings were suggested by Theo Veenker
\r
7487 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7488 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7490 // here are two options for a fix
\r
7491 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7492 snd_pcm_uframes_t val;
\r
7493 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7494 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7496 result = snd_pcm_sw_params( phandle, sw_params );
\r
7497 if ( result < 0 ) {
\r
7498 snd_pcm_close( phandle );
\r
7499 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7500 errorText_ = errorStream_.str();
\r
7504 #if defined(__RTAUDIO_DEBUG__)
\r
7505 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7506 snd_pcm_sw_params_dump( sw_params, out );
\r
7509 // Set flags for buffer conversion
\r
7510 stream_.doConvertBuffer[mode] = false;
\r
7511 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7512 stream_.doConvertBuffer[mode] = true;
\r
7513 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7514 stream_.doConvertBuffer[mode] = true;
\r
7515 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7516 stream_.nUserChannels[mode] > 1 )
\r
7517 stream_.doConvertBuffer[mode] = true;
\r
7519 // Allocate the ApiHandle if necessary and then save.
\r
7520 AlsaHandle *apiInfo = 0;
\r
7521 if ( stream_.apiHandle == 0 ) {
\r
7523 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7525 catch ( std::bad_alloc& ) {
\r
7526 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7530 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7531 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7535 stream_.apiHandle = (void *) apiInfo;
\r
7536 apiInfo->handles[0] = 0;
\r
7537 apiInfo->handles[1] = 0;
\r
7540 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7542 apiInfo->handles[mode] = phandle;
\r
7545 // Allocate necessary internal buffers.
\r
7546 unsigned long bufferBytes;
\r
7547 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7548 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7549 if ( stream_.userBuffer[mode] == NULL ) {
\r
7550 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7554 if ( stream_.doConvertBuffer[mode] ) {
\r
7556 bool makeBuffer = true;
\r
7557 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7558 if ( mode == INPUT ) {
\r
7559 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7560 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7561 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7565 if ( makeBuffer ) {
\r
7566 bufferBytes *= *bufferSize;
\r
7567 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7568 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7569 if ( stream_.deviceBuffer == NULL ) {
\r
7570 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7576 stream_.sampleRate = sampleRate;
\r
7577 stream_.nBuffers = periods;
\r
7578 stream_.device[mode] = device;
\r
7579 stream_.state = STREAM_STOPPED;
\r
7581 // Setup the buffer conversion information structure.
\r
7582 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7584 // Setup thread if necessary.
\r
7585 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7586 // We had already set up an output stream.
\r
7587 stream_.mode = DUPLEX;
\r
7588 // Link the streams if possible.
\r
7589 apiInfo->synchronized = false;
\r
7590 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7591 apiInfo->synchronized = true;
\r
7593 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7594 error( RtAudioError::WARNING );
\r
7598 stream_.mode = mode;
\r
7600 // Setup callback thread.
\r
7601 stream_.callbackInfo.object = (void *) this;
\r
7603 // Set the thread attributes for joinable and realtime scheduling
\r
7604 // priority (optional). The higher priority will only take affect
\r
7605 // if the program is run as root or suid. Note, under Linux
\r
7606 // processes with CAP_SYS_NICE privilege, a user can change
\r
7607 // scheduling policy and priority (thus need not be root). See
\r
7608 // POSIX "capabilities".
\r
7609 pthread_attr_t attr;
\r
7610 pthread_attr_init( &attr );
\r
7611 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7613 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7614 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7615 // We previously attempted to increase the audio callback priority
\r
7616 // to SCHED_RR here via the attributes. However, while no errors
\r
7617 // were reported in doing so, it did not work. So, now this is
\r
7618 // done in the alsaCallbackHandler function.
\r
7619 stream_.callbackInfo.doRealtime = true;
\r
7620 int priority = options->priority;
\r
7621 int min = sched_get_priority_min( SCHED_RR );
\r
7622 int max = sched_get_priority_max( SCHED_RR );
\r
7623 if ( priority < min ) priority = min;
\r
7624 else if ( priority > max ) priority = max;
\r
7625 stream_.callbackInfo.priority = priority;
\r
7629 stream_.callbackInfo.isRunning = true;
\r
7630 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7631 pthread_attr_destroy( &attr );
\r
7633 stream_.callbackInfo.isRunning = false;
\r
7634 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7643 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7644 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7645 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7647 stream_.apiHandle = 0;
\r
7650 if ( phandle) snd_pcm_close( phandle );
\r
7652 for ( int i=0; i<2; i++ ) {
\r
7653 if ( stream_.userBuffer[i] ) {
\r
7654 free( stream_.userBuffer[i] );
\r
7655 stream_.userBuffer[i] = 0;
\r
7659 if ( stream_.deviceBuffer ) {
\r
7660 free( stream_.deviceBuffer );
\r
7661 stream_.deviceBuffer = 0;
\r
7664 stream_.state = STREAM_CLOSED;
\r
7668 void RtApiAlsa :: closeStream()
\r
7670 if ( stream_.state == STREAM_CLOSED ) {
\r
7671 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7672 error( RtAudioError::WARNING );
\r
7676 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7677 stream_.callbackInfo.isRunning = false;
\r
7678 MUTEX_LOCK( &stream_.mutex );
\r
7679 if ( stream_.state == STREAM_STOPPED ) {
\r
7680 apiInfo->runnable = true;
\r
7681 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7683 MUTEX_UNLOCK( &stream_.mutex );
\r
7684 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7686 if ( stream_.state == STREAM_RUNNING ) {
\r
7687 stream_.state = STREAM_STOPPED;
\r
7688 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7689 snd_pcm_drop( apiInfo->handles[0] );
\r
7690 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7691 snd_pcm_drop( apiInfo->handles[1] );
\r
7695 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7696 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7697 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7699 stream_.apiHandle = 0;
\r
7702 for ( int i=0; i<2; i++ ) {
\r
7703 if ( stream_.userBuffer[i] ) {
\r
7704 free( stream_.userBuffer[i] );
\r
7705 stream_.userBuffer[i] = 0;
\r
7709 if ( stream_.deviceBuffer ) {
\r
7710 free( stream_.deviceBuffer );
\r
7711 stream_.deviceBuffer = 0;
\r
7714 stream_.mode = UNINITIALIZED;
\r
7715 stream_.state = STREAM_CLOSED;
\r
7718 void RtApiAlsa :: startStream()
\r
7720 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7723 if ( stream_.state == STREAM_RUNNING ) {
\r
7724 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7725 error( RtAudioError::WARNING );
\r
7729 MUTEX_LOCK( &stream_.mutex );
\r
7732 snd_pcm_state_t state;
\r
7733 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7734 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7735 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7736 state = snd_pcm_state( handle[0] );
\r
7737 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7738 result = snd_pcm_prepare( handle[0] );
\r
7739 if ( result < 0 ) {
\r
7740 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7741 errorText_ = errorStream_.str();
\r
7747 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7748 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7749 state = snd_pcm_state( handle[1] );
\r
7750 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7751 result = snd_pcm_prepare( handle[1] );
\r
7752 if ( result < 0 ) {
\r
7753 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7754 errorText_ = errorStream_.str();
\r
7760 stream_.state = STREAM_RUNNING;
\r
7763 apiInfo->runnable = true;
\r
7764 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7765 MUTEX_UNLOCK( &stream_.mutex );
\r
7767 if ( result >= 0 ) return;
\r
7768 error( RtAudioError::SYSTEM_ERROR );
\r
7771 void RtApiAlsa :: stopStream()
\r
7774 if ( stream_.state == STREAM_STOPPED ) {
\r
7775 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7776 error( RtAudioError::WARNING );
\r
7780 stream_.state = STREAM_STOPPED;
\r
7781 MUTEX_LOCK( &stream_.mutex );
\r
7784 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7785 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7786 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7787 if ( apiInfo->synchronized )
\r
7788 result = snd_pcm_drop( handle[0] );
\r
7790 result = snd_pcm_drain( handle[0] );
\r
7791 if ( result < 0 ) {
\r
7792 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7793 errorText_ = errorStream_.str();
\r
7798 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7799 result = snd_pcm_drop( handle[1] );
\r
7800 if ( result < 0 ) {
\r
7801 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7802 errorText_ = errorStream_.str();
\r
7808 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7809 MUTEX_UNLOCK( &stream_.mutex );
\r
7811 if ( result >= 0 ) return;
\r
7812 error( RtAudioError::SYSTEM_ERROR );
\r
7815 void RtApiAlsa :: abortStream()
\r
7818 if ( stream_.state == STREAM_STOPPED ) {
\r
7819 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7820 error( RtAudioError::WARNING );
\r
7824 stream_.state = STREAM_STOPPED;
\r
7825 MUTEX_LOCK( &stream_.mutex );
\r
7828 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7829 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7830 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7831 result = snd_pcm_drop( handle[0] );
\r
7832 if ( result < 0 ) {
\r
7833 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7834 errorText_ = errorStream_.str();
\r
7839 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7840 result = snd_pcm_drop( handle[1] );
\r
7841 if ( result < 0 ) {
\r
7842 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7843 errorText_ = errorStream_.str();
\r
7849 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7850 MUTEX_UNLOCK( &stream_.mutex );
\r
7852 if ( result >= 0 ) return;
\r
7853 error( RtAudioError::SYSTEM_ERROR );
\r
7856 void RtApiAlsa :: callbackEvent()
\r
7858 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7859 if ( stream_.state == STREAM_STOPPED ) {
\r
7860 MUTEX_LOCK( &stream_.mutex );
\r
7861 while ( !apiInfo->runnable )
\r
7862 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7864 if ( stream_.state != STREAM_RUNNING ) {
\r
7865 MUTEX_UNLOCK( &stream_.mutex );
\r
7868 MUTEX_UNLOCK( &stream_.mutex );
\r
7871 if ( stream_.state == STREAM_CLOSED ) {
\r
7872 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7873 error( RtAudioError::WARNING );
\r
7877 int doStopStream = 0;
\r
7878 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7879 double streamTime = getStreamTime();
\r
7880 RtAudioStreamStatus status = 0;
\r
7881 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7882 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7883 apiInfo->xrun[0] = false;
\r
7885 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7886 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7887 apiInfo->xrun[1] = false;
\r
7889 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7890 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7892 if ( doStopStream == 2 ) {
\r
7897 MUTEX_LOCK( &stream_.mutex );
\r
7899 // The state might change while waiting on a mutex.
\r
7900 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7905 snd_pcm_t **handle;
\r
7906 snd_pcm_sframes_t frames;
\r
7907 RtAudioFormat format;
\r
7908 handle = (snd_pcm_t **) apiInfo->handles;
\r
7910 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7912 // Setup parameters.
\r
7913 if ( stream_.doConvertBuffer[1] ) {
\r
7914 buffer = stream_.deviceBuffer;
\r
7915 channels = stream_.nDeviceChannels[1];
\r
7916 format = stream_.deviceFormat[1];
\r
7919 buffer = stream_.userBuffer[1];
\r
7920 channels = stream_.nUserChannels[1];
\r
7921 format = stream_.userFormat;
\r
7924 // Read samples from device in interleaved/non-interleaved format.
\r
7925 if ( stream_.deviceInterleaved[1] )
\r
7926 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7928 void *bufs[channels];
\r
7929 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7930 for ( int i=0; i<channels; i++ )
\r
7931 bufs[i] = (void *) (buffer + (i * offset));
\r
7932 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7935 if ( result < (int) stream_.bufferSize ) {
\r
7936 // Either an error or overrun occured.
\r
7937 if ( result == -EPIPE ) {
\r
7938 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7939 if ( state == SND_PCM_STATE_XRUN ) {
\r
7940 apiInfo->xrun[1] = true;
\r
7941 result = snd_pcm_prepare( handle[1] );
\r
7942 if ( result < 0 ) {
\r
7943 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7944 errorText_ = errorStream_.str();
\r
7948 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7949 errorText_ = errorStream_.str();
\r
7953 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7954 errorText_ = errorStream_.str();
\r
7956 error( RtAudioError::WARNING );
\r
7960 // Do byte swapping if necessary.
\r
7961 if ( stream_.doByteSwap[1] )
\r
7962 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7964 // Do buffer conversion if necessary.
\r
7965 if ( stream_.doConvertBuffer[1] )
\r
7966 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7968 // Check stream latency
\r
7969 result = snd_pcm_delay( handle[1], &frames );
\r
7970 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7975 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7977 // Setup parameters and do buffer conversion if necessary.
\r
7978 if ( stream_.doConvertBuffer[0] ) {
\r
7979 buffer = stream_.deviceBuffer;
\r
7980 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7981 channels = stream_.nDeviceChannels[0];
\r
7982 format = stream_.deviceFormat[0];
\r
7985 buffer = stream_.userBuffer[0];
\r
7986 channels = stream_.nUserChannels[0];
\r
7987 format = stream_.userFormat;
\r
7990 // Do byte swapping if necessary.
\r
7991 if ( stream_.doByteSwap[0] )
\r
7992 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7994 // Write samples to device in interleaved/non-interleaved format.
\r
7995 if ( stream_.deviceInterleaved[0] )
\r
7996 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7998 void *bufs[channels];
\r
7999 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8000 for ( int i=0; i<channels; i++ )
\r
8001 bufs[i] = (void *) (buffer + (i * offset));
\r
8002 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8005 if ( result < (int) stream_.bufferSize ) {
\r
8006 // Either an error or underrun occured.
\r
8007 if ( result == -EPIPE ) {
\r
8008 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8009 if ( state == SND_PCM_STATE_XRUN ) {
\r
8010 apiInfo->xrun[0] = true;
\r
8011 result = snd_pcm_prepare( handle[0] );
\r
8012 if ( result < 0 ) {
\r
8013 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8014 errorText_ = errorStream_.str();
\r
8018 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8019 errorText_ = errorStream_.str();
\r
8023 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8024 errorText_ = errorStream_.str();
\r
8026 error( RtAudioError::WARNING );
\r
8030 // Check stream latency
\r
8031 result = snd_pcm_delay( handle[0], &frames );
\r
8032 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8036 MUTEX_UNLOCK( &stream_.mutex );
\r
8038 RtApi::tickStreamTime();
\r
8039 if ( doStopStream == 1 ) this->stopStream();
\r
8042 static void *alsaCallbackHandler( void *ptr )
\r
8044 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8045 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8046 bool *isRunning = &info->isRunning;
\r
8048 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8049 if ( &info->doRealtime ) {
\r
8050 pthread_t tID = pthread_self(); // ID of this thread
\r
8051 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8052 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8056 while ( *isRunning == true ) {
\r
8057 pthread_testcancel();
\r
8058 object->callbackEvent();
\r
8061 pthread_exit( NULL );
\r
8064 //******************** End of __LINUX_ALSA__ *********************//
\r
8067 #if defined(__LINUX_PULSE__)
\r
8069 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8070 // and Tristan Matthews.
\r
8072 #include <pulse/error.h>
\r
8073 #include <pulse/simple.h>
\r
8076 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8077 44100, 48000, 96000, 0};
\r
8079 struct rtaudio_pa_format_mapping_t {
\r
8080 RtAudioFormat rtaudio_format;
\r
8081 pa_sample_format_t pa_format;
\r
8084 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8085 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8086 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8087 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8088 {0, PA_SAMPLE_INVALID}};
\r
8090 struct PulseAudioHandle {
\r
8091 pa_simple *s_play;
\r
8094 pthread_cond_t runnable_cv;
\r
8096 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8099 RtApiPulse::~RtApiPulse()
\r
8101 if ( stream_.state != STREAM_CLOSED )
\r
8105 unsigned int RtApiPulse::getDeviceCount( void )
\r
8110 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8112 RtAudio::DeviceInfo info;
\r
8113 info.probed = true;
\r
8114 info.name = "PulseAudio";
\r
8115 info.outputChannels = 2;
\r
8116 info.inputChannels = 2;
\r
8117 info.duplexChannels = 2;
\r
8118 info.isDefaultOutput = true;
\r
8119 info.isDefaultInput = true;
\r
8121 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8122 info.sampleRates.push_back( *sr );
\r
8124 info.preferredSampleRate = 48000;
\r
8125 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8130 static void *pulseaudio_callback( void * user )
\r
8132 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8133 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8134 volatile bool *isRunning = &cbi->isRunning;
\r
8136 while ( *isRunning ) {
\r
8137 pthread_testcancel();
\r
8138 context->callbackEvent();
\r
8141 pthread_exit( NULL );
\r
8144 void RtApiPulse::closeStream( void )
\r
8146 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8148 stream_.callbackInfo.isRunning = false;
\r
8150 MUTEX_LOCK( &stream_.mutex );
\r
8151 if ( stream_.state == STREAM_STOPPED ) {
\r
8152 pah->runnable = true;
\r
8153 pthread_cond_signal( &pah->runnable_cv );
\r
8155 MUTEX_UNLOCK( &stream_.mutex );
\r
8157 pthread_join( pah->thread, 0 );
\r
8158 if ( pah->s_play ) {
\r
8159 pa_simple_flush( pah->s_play, NULL );
\r
8160 pa_simple_free( pah->s_play );
\r
8163 pa_simple_free( pah->s_rec );
\r
8165 pthread_cond_destroy( &pah->runnable_cv );
\r
8167 stream_.apiHandle = 0;
\r
8170 if ( stream_.userBuffer[0] ) {
\r
8171 free( stream_.userBuffer[0] );
\r
8172 stream_.userBuffer[0] = 0;
\r
8174 if ( stream_.userBuffer[1] ) {
\r
8175 free( stream_.userBuffer[1] );
\r
8176 stream_.userBuffer[1] = 0;
\r
8179 stream_.state = STREAM_CLOSED;
\r
8180 stream_.mode = UNINITIALIZED;
\r
8183 void RtApiPulse::callbackEvent( void )
\r
8185 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8187 if ( stream_.state == STREAM_STOPPED ) {
\r
8188 MUTEX_LOCK( &stream_.mutex );
\r
8189 while ( !pah->runnable )
\r
8190 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8192 if ( stream_.state != STREAM_RUNNING ) {
\r
8193 MUTEX_UNLOCK( &stream_.mutex );
\r
8196 MUTEX_UNLOCK( &stream_.mutex );
\r
8199 if ( stream_.state == STREAM_CLOSED ) {
\r
8200 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8201 "this shouldn't happen!";
\r
8202 error( RtAudioError::WARNING );
\r
8206 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8207 double streamTime = getStreamTime();
\r
8208 RtAudioStreamStatus status = 0;
\r
8209 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8210 stream_.bufferSize, streamTime, status,
\r
8211 stream_.callbackInfo.userData );
\r
8213 if ( doStopStream == 2 ) {
\r
8218 MUTEX_LOCK( &stream_.mutex );
\r
8219 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8220 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8222 if ( stream_.state != STREAM_RUNNING )
\r
8227 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8228 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8229 convertBuffer( stream_.deviceBuffer,
\r
8230 stream_.userBuffer[OUTPUT],
\r
8231 stream_.convertInfo[OUTPUT] );
\r
8232 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8233 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8235 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8236 formatBytes( stream_.userFormat );
\r
8238 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8239 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8240 pa_strerror( pa_error ) << ".";
\r
8241 errorText_ = errorStream_.str();
\r
8242 error( RtAudioError::WARNING );
\r
8246 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8247 if ( stream_.doConvertBuffer[INPUT] )
\r
8248 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8249 formatBytes( stream_.deviceFormat[INPUT] );
\r
8251 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8252 formatBytes( stream_.userFormat );
\r
8254 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8255 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8256 pa_strerror( pa_error ) << ".";
\r
8257 errorText_ = errorStream_.str();
\r
8258 error( RtAudioError::WARNING );
\r
8260 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8261 convertBuffer( stream_.userBuffer[INPUT],
\r
8262 stream_.deviceBuffer,
\r
8263 stream_.convertInfo[INPUT] );
\r
8268 MUTEX_UNLOCK( &stream_.mutex );
\r
8269 RtApi::tickStreamTime();
\r
8271 if ( doStopStream == 1 )
\r
8275 void RtApiPulse::startStream( void )
\r
8277 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8279 if ( stream_.state == STREAM_CLOSED ) {
\r
8280 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8281 error( RtAudioError::INVALID_USE );
\r
8284 if ( stream_.state == STREAM_RUNNING ) {
\r
8285 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8286 error( RtAudioError::WARNING );
\r
8290 MUTEX_LOCK( &stream_.mutex );
\r
8292 stream_.state = STREAM_RUNNING;
\r
8294 pah->runnable = true;
\r
8295 pthread_cond_signal( &pah->runnable_cv );
\r
8296 MUTEX_UNLOCK( &stream_.mutex );
\r
8299 void RtApiPulse::stopStream( void )
\r
8301 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8303 if ( stream_.state == STREAM_CLOSED ) {
\r
8304 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8305 error( RtAudioError::INVALID_USE );
\r
8308 if ( stream_.state == STREAM_STOPPED ) {
\r
8309 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8310 error( RtAudioError::WARNING );
\r
8314 stream_.state = STREAM_STOPPED;
\r
8315 MUTEX_LOCK( &stream_.mutex );
\r
8317 if ( pah && pah->s_play ) {
\r
8319 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8320 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8321 pa_strerror( pa_error ) << ".";
\r
8322 errorText_ = errorStream_.str();
\r
8323 MUTEX_UNLOCK( &stream_.mutex );
\r
8324 error( RtAudioError::SYSTEM_ERROR );
\r
8329 stream_.state = STREAM_STOPPED;
\r
8330 MUTEX_UNLOCK( &stream_.mutex );
\r
8333 void RtApiPulse::abortStream( void )
\r
8335 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8337 if ( stream_.state == STREAM_CLOSED ) {
\r
8338 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8339 error( RtAudioError::INVALID_USE );
\r
8342 if ( stream_.state == STREAM_STOPPED ) {
\r
8343 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8344 error( RtAudioError::WARNING );
\r
8348 stream_.state = STREAM_STOPPED;
\r
8349 MUTEX_LOCK( &stream_.mutex );
\r
8351 if ( pah && pah->s_play ) {
\r
8353 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8354 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8355 pa_strerror( pa_error ) << ".";
\r
8356 errorText_ = errorStream_.str();
\r
8357 MUTEX_UNLOCK( &stream_.mutex );
\r
8358 error( RtAudioError::SYSTEM_ERROR );
\r
8363 stream_.state = STREAM_STOPPED;
\r
8364 MUTEX_UNLOCK( &stream_.mutex );
\r
8367 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8368 unsigned int channels, unsigned int firstChannel,
\r
8369 unsigned int sampleRate, RtAudioFormat format,
\r
8370 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8372 PulseAudioHandle *pah = 0;
\r
8373 unsigned long bufferBytes = 0;
\r
8374 pa_sample_spec ss;
\r
8376 if ( device != 0 ) return false;
\r
8377 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8378 if ( channels != 1 && channels != 2 ) {
\r
8379 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8382 ss.channels = channels;
\r
8384 if ( firstChannel != 0 ) return false;
\r
8386 bool sr_found = false;
\r
8387 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8388 if ( sampleRate == *sr ) {
\r
8390 stream_.sampleRate = sampleRate;
\r
8391 ss.rate = sampleRate;
\r
8395 if ( !sr_found ) {
\r
8396 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8400 bool sf_found = 0;
\r
8401 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8402 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8403 if ( format == sf->rtaudio_format ) {
\r
8405 stream_.userFormat = sf->rtaudio_format;
\r
8406 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8407 ss.format = sf->pa_format;
\r
8411 if ( !sf_found ) { // Use internal data format conversion.
\r
8412 stream_.userFormat = format;
\r
8413 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8414 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8417 // Set other stream parameters.
\r
8418 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8419 else stream_.userInterleaved = true;
\r
8420 stream_.deviceInterleaved[mode] = true;
\r
8421 stream_.nBuffers = 1;
\r
8422 stream_.doByteSwap[mode] = false;
\r
8423 stream_.nUserChannels[mode] = channels;
\r
8424 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8425 stream_.channelOffset[mode] = 0;
\r
8426 std::string streamName = "RtAudio";
\r
8428 // Set flags for buffer conversion.
\r
8429 stream_.doConvertBuffer[mode] = false;
\r
8430 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8431 stream_.doConvertBuffer[mode] = true;
\r
8432 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8433 stream_.doConvertBuffer[mode] = true;
\r
8435 // Allocate necessary internal buffers.
\r
8436 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8437 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8438 if ( stream_.userBuffer[mode] == NULL ) {
\r
8439 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8442 stream_.bufferSize = *bufferSize;
\r
8444 if ( stream_.doConvertBuffer[mode] ) {
\r
8446 bool makeBuffer = true;
\r
8447 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8448 if ( mode == INPUT ) {
\r
8449 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8450 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8451 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8455 if ( makeBuffer ) {
\r
8456 bufferBytes *= *bufferSize;
\r
8457 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8458 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8459 if ( stream_.deviceBuffer == NULL ) {
\r
8460 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8466 stream_.device[mode] = device;
\r
8468 // Setup the buffer conversion information structure.
\r
8469 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8471 if ( !stream_.apiHandle ) {
\r
8472 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8474 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8478 stream_.apiHandle = pah;
\r
8479 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8480 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8484 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8487 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8490 pa_buffer_attr buffer_attr;
\r
8491 buffer_attr.fragsize = bufferBytes;
\r
8492 buffer_attr.maxlength = -1;
\r
8494 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8495 if ( !pah->s_rec ) {
\r
8496 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8501 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8502 if ( !pah->s_play ) {
\r
8503 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8511 if ( stream_.mode == UNINITIALIZED )
\r
8512 stream_.mode = mode;
\r
8513 else if ( stream_.mode == mode )
\r
8516 stream_.mode = DUPLEX;
\r
8518 if ( !stream_.callbackInfo.isRunning ) {
\r
8519 stream_.callbackInfo.object = this;
\r
8520 stream_.callbackInfo.isRunning = true;
\r
8521 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8522 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8527 stream_.state = STREAM_STOPPED;
\r
8531 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8532 pthread_cond_destroy( &pah->runnable_cv );
\r
8534 stream_.apiHandle = 0;
\r
8537 for ( int i=0; i<2; i++ ) {
\r
8538 if ( stream_.userBuffer[i] ) {
\r
8539 free( stream_.userBuffer[i] );
\r
8540 stream_.userBuffer[i] = 0;
\r
8544 if ( stream_.deviceBuffer ) {
\r
8545 free( stream_.deviceBuffer );
\r
8546 stream_.deviceBuffer = 0;
\r
8552 //******************** End of __LINUX_PULSE__ *********************//
\r
8555 #if defined(__LINUX_OSS__)
\r
8557 #include <unistd.h>
\r
8558 #include <sys/ioctl.h>
\r
8559 #include <unistd.h>
\r
8560 #include <fcntl.h>
\r
8561 #include <sys/soundcard.h>
\r
8562 #include <errno.h>
\r
8565 static void *ossCallbackHandler(void * ptr);
\r
8567 // A structure to hold various information related to the OSS API
\r
8568 // implementation.
\r
8569 struct OssHandle {
\r
8570 int id[2]; // device ids
\r
8573 pthread_cond_t runnable;
\r
8576 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8579 RtApiOss :: RtApiOss()
\r
8581 // Nothing to do here.
\r
8584 RtApiOss :: ~RtApiOss()
\r
8586 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8589 unsigned int RtApiOss :: getDeviceCount( void )
\r
8591 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8592 if ( mixerfd == -1 ) {
\r
8593 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8594 error( RtAudioError::WARNING );
\r
8598 oss_sysinfo sysinfo;
\r
8599 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8601 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8602 error( RtAudioError::WARNING );
\r
8607 return sysinfo.numaudios;
\r
8610 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8612 RtAudio::DeviceInfo info;
\r
8613 info.probed = false;
\r
8615 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8616 if ( mixerfd == -1 ) {
\r
8617 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8618 error( RtAudioError::WARNING );
\r
8622 oss_sysinfo sysinfo;
\r
8623 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8624 if ( result == -1 ) {
\r
8626 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8627 error( RtAudioError::WARNING );
\r
8631 unsigned nDevices = sysinfo.numaudios;
\r
8632 if ( nDevices == 0 ) {
\r
8634 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8635 error( RtAudioError::INVALID_USE );
\r
8639 if ( device >= nDevices ) {
\r
8641 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8642 error( RtAudioError::INVALID_USE );
\r
8646 oss_audioinfo ainfo;
\r
8647 ainfo.dev = device;
\r
8648 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8650 if ( result == -1 ) {
\r
8651 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8652 errorText_ = errorStream_.str();
\r
8653 error( RtAudioError::WARNING );
\r
8658 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8659 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8660 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8661 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8662 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8665 // Probe data formats ... do for input
\r
8666 unsigned long mask = ainfo.iformats;
\r
8667 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8668 info.nativeFormats |= RTAUDIO_SINT16;
\r
8669 if ( mask & AFMT_S8 )
\r
8670 info.nativeFormats |= RTAUDIO_SINT8;
\r
8671 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8672 info.nativeFormats |= RTAUDIO_SINT32;
\r
8673 if ( mask & AFMT_FLOAT )
\r
8674 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8675 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8676 info.nativeFormats |= RTAUDIO_SINT24;
\r
8678 // Check that we have at least one supported format
\r
8679 if ( info.nativeFormats == 0 ) {
\r
8680 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8681 errorText_ = errorStream_.str();
\r
8682 error( RtAudioError::WARNING );
\r
8686 // Probe the supported sample rates.
\r
8687 info.sampleRates.clear();
\r
8688 if ( ainfo.nrates ) {
\r
8689 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8690 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8691 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8692 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8694 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8695 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8703 // Check min and max rate values;
\r
8704 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8705 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8706 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8708 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8709 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8714 if ( info.sampleRates.size() == 0 ) {
\r
8715 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8716 errorText_ = errorStream_.str();
\r
8717 error( RtAudioError::WARNING );
\r
8720 info.probed = true;
\r
8721 info.name = ainfo.name;
\r
8728 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8729 unsigned int firstChannel, unsigned int sampleRate,
\r
8730 RtAudioFormat format, unsigned int *bufferSize,
\r
8731 RtAudio::StreamOptions *options )
\r
8733 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8734 if ( mixerfd == -1 ) {
\r
8735 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8739 oss_sysinfo sysinfo;
\r
8740 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8741 if ( result == -1 ) {
\r
8743 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8747 unsigned nDevices = sysinfo.numaudios;
\r
8748 if ( nDevices == 0 ) {
\r
8749 // This should not happen because a check is made before this function is called.
\r
8751 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8755 if ( device >= nDevices ) {
\r
8756 // This should not happen because a check is made before this function is called.
\r
8758 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8762 oss_audioinfo ainfo;
\r
8763 ainfo.dev = device;
\r
8764 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8766 if ( result == -1 ) {
\r
8767 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8768 errorText_ = errorStream_.str();
\r
8772 // Check if device supports input or output
\r
8773 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8774 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8775 if ( mode == OUTPUT )
\r
8776 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8778 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8779 errorText_ = errorStream_.str();
\r
8784 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8785 if ( mode == OUTPUT )
\r
8786 flags |= O_WRONLY;
\r
8787 else { // mode == INPUT
\r
8788 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8789 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8790 close( handle->id[0] );
\r
8791 handle->id[0] = 0;
\r
8792 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8793 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8794 errorText_ = errorStream_.str();
\r
8797 // Check that the number previously set channels is the same.
\r
8798 if ( stream_.nUserChannels[0] != channels ) {
\r
8799 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8800 errorText_ = errorStream_.str();
\r
8806 flags |= O_RDONLY;
\r
8809 // Set exclusive access if specified.
\r
8810 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8812 // Try to open the device.
\r
8814 fd = open( ainfo.devnode, flags, 0 );
\r
8816 if ( errno == EBUSY )
\r
8817 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8819 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8820 errorText_ = errorStream_.str();
\r
8824 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8826 if ( flags | O_RDWR ) {
\r
8827 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8828 if ( result == -1) {
\r
8829 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8830 errorText_ = errorStream_.str();
\r
8836 // Check the device channel support.
\r
8837 stream_.nUserChannels[mode] = channels;
\r
8838 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8840 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8841 errorText_ = errorStream_.str();
\r
8845 // Set the number of channels.
\r
8846 int deviceChannels = channels + firstChannel;
\r
8847 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8848 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8850 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8851 errorText_ = errorStream_.str();
\r
8854 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8856 // Get the data format mask
\r
8858 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8859 if ( result == -1 ) {
\r
8861 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8862 errorText_ = errorStream_.str();
\r
8866 // Determine how to set the device format.
\r
8867 stream_.userFormat = format;
\r
8868 int deviceFormat = -1;
\r
8869 stream_.doByteSwap[mode] = false;
\r
8870 if ( format == RTAUDIO_SINT8 ) {
\r
8871 if ( mask & AFMT_S8 ) {
\r
8872 deviceFormat = AFMT_S8;
\r
8873 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8876 else if ( format == RTAUDIO_SINT16 ) {
\r
8877 if ( mask & AFMT_S16_NE ) {
\r
8878 deviceFormat = AFMT_S16_NE;
\r
8879 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8881 else if ( mask & AFMT_S16_OE ) {
\r
8882 deviceFormat = AFMT_S16_OE;
\r
8883 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8884 stream_.doByteSwap[mode] = true;
\r
8887 else if ( format == RTAUDIO_SINT24 ) {
\r
8888 if ( mask & AFMT_S24_NE ) {
\r
8889 deviceFormat = AFMT_S24_NE;
\r
8890 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8892 else if ( mask & AFMT_S24_OE ) {
\r
8893 deviceFormat = AFMT_S24_OE;
\r
8894 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8895 stream_.doByteSwap[mode] = true;
\r
8898 else if ( format == RTAUDIO_SINT32 ) {
\r
8899 if ( mask & AFMT_S32_NE ) {
\r
8900 deviceFormat = AFMT_S32_NE;
\r
8901 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8903 else if ( mask & AFMT_S32_OE ) {
\r
8904 deviceFormat = AFMT_S32_OE;
\r
8905 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8906 stream_.doByteSwap[mode] = true;
\r
8910 if ( deviceFormat == -1 ) {
\r
8911 // The user requested format is not natively supported by the device.
\r
8912 if ( mask & AFMT_S16_NE ) {
\r
8913 deviceFormat = AFMT_S16_NE;
\r
8914 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8916 else if ( mask & AFMT_S32_NE ) {
\r
8917 deviceFormat = AFMT_S32_NE;
\r
8918 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8920 else if ( mask & AFMT_S24_NE ) {
\r
8921 deviceFormat = AFMT_S24_NE;
\r
8922 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8924 else if ( mask & AFMT_S16_OE ) {
\r
8925 deviceFormat = AFMT_S16_OE;
\r
8926 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8927 stream_.doByteSwap[mode] = true;
\r
8929 else if ( mask & AFMT_S32_OE ) {
\r
8930 deviceFormat = AFMT_S32_OE;
\r
8931 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8932 stream_.doByteSwap[mode] = true;
\r
8934 else if ( mask & AFMT_S24_OE ) {
\r
8935 deviceFormat = AFMT_S24_OE;
\r
8936 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8937 stream_.doByteSwap[mode] = true;
\r
8939 else if ( mask & AFMT_S8) {
\r
8940 deviceFormat = AFMT_S8;
\r
8941 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8945 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8946 // This really shouldn't happen ...
\r
8948 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8949 errorText_ = errorStream_.str();
\r
8953 // Set the data format.
\r
8954 int temp = deviceFormat;
\r
8955 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8956 if ( result == -1 || deviceFormat != temp ) {
\r
8958 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8959 errorText_ = errorStream_.str();
\r
8963 // Attempt to set the buffer size. According to OSS, the minimum
\r
8964 // number of buffers is two. The supposed minimum buffer size is 16
\r
8965 // bytes, so that will be our lower bound. The argument to this
\r
8966 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8967 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8968 // We'll check the actual value used near the end of the setup
\r
8970 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8971 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8973 if ( options ) buffers = options->numberOfBuffers;
\r
8974 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8975 if ( buffers < 2 ) buffers = 3;
\r
8976 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8977 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8978 if ( result == -1 ) {
\r
8980 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8981 errorText_ = errorStream_.str();
\r
8984 stream_.nBuffers = buffers;
\r
8986 // Save buffer size (in sample frames).
\r
8987 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8988 stream_.bufferSize = *bufferSize;
\r
8990 // Set the sample rate.
\r
8991 int srate = sampleRate;
\r
8992 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8993 if ( result == -1 ) {
\r
8995 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8996 errorText_ = errorStream_.str();
\r
9000 // Verify the sample rate setup worked.
\r
9001 if ( abs( srate - sampleRate ) > 100 ) {
\r
9003 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9004 errorText_ = errorStream_.str();
\r
9007 stream_.sampleRate = sampleRate;
\r
9009 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9010 // We're doing duplex setup here.
\r
9011 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9012 stream_.nDeviceChannels[0] = deviceChannels;
\r
9015 // Set interleaving parameters.
\r
9016 stream_.userInterleaved = true;
\r
9017 stream_.deviceInterleaved[mode] = true;
\r
9018 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9019 stream_.userInterleaved = false;
\r
9021 // Set flags for buffer conversion
\r
9022 stream_.doConvertBuffer[mode] = false;
\r
9023 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9024 stream_.doConvertBuffer[mode] = true;
\r
9025 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9026 stream_.doConvertBuffer[mode] = true;
\r
9027 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9028 stream_.nUserChannels[mode] > 1 )
\r
9029 stream_.doConvertBuffer[mode] = true;
\r
9031 // Allocate the stream handles if necessary and then save.
\r
9032 if ( stream_.apiHandle == 0 ) {
\r
9034 handle = new OssHandle;
\r
9036 catch ( std::bad_alloc& ) {
\r
9037 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9041 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9042 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9046 stream_.apiHandle = (void *) handle;
\r
9049 handle = (OssHandle *) stream_.apiHandle;
\r
9051 handle->id[mode] = fd;
\r
9053 // Allocate necessary internal buffers.
\r
9054 unsigned long bufferBytes;
\r
9055 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9056 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9057 if ( stream_.userBuffer[mode] == NULL ) {
\r
9058 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9062 if ( stream_.doConvertBuffer[mode] ) {
\r
9064 bool makeBuffer = true;
\r
9065 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9066 if ( mode == INPUT ) {
\r
9067 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9068 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9069 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9073 if ( makeBuffer ) {
\r
9074 bufferBytes *= *bufferSize;
\r
9075 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9076 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9077 if ( stream_.deviceBuffer == NULL ) {
\r
9078 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9084 stream_.device[mode] = device;
\r
9085 stream_.state = STREAM_STOPPED;
\r
9087 // Setup the buffer conversion information structure.
\r
9088 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9090 // Setup thread if necessary.
\r
9091 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9092 // We had already set up an output stream.
\r
9093 stream_.mode = DUPLEX;
\r
9094 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9097 stream_.mode = mode;
\r
9099 // Setup callback thread.
\r
9100 stream_.callbackInfo.object = (void *) this;
\r
9102 // Set the thread attributes for joinable and realtime scheduling
\r
9103 // priority. The higher priority will only take affect if the
\r
9104 // program is run as root or suid.
\r
9105 pthread_attr_t attr;
\r
9106 pthread_attr_init( &attr );
\r
9107 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9108 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9109 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9110 struct sched_param param;
\r
9111 int priority = options->priority;
\r
9112 int min = sched_get_priority_min( SCHED_RR );
\r
9113 int max = sched_get_priority_max( SCHED_RR );
\r
9114 if ( priority < min ) priority = min;
\r
9115 else if ( priority > max ) priority = max;
\r
9116 param.sched_priority = priority;
\r
9117 pthread_attr_setschedparam( &attr, ¶m );
\r
9118 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9121 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9123 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9126 stream_.callbackInfo.isRunning = true;
\r
9127 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9128 pthread_attr_destroy( &attr );
\r
9130 stream_.callbackInfo.isRunning = false;
\r
9131 errorText_ = "RtApiOss::error creating callback thread!";
\r
9140 pthread_cond_destroy( &handle->runnable );
\r
9141 if ( handle->id[0] ) close( handle->id[0] );
\r
9142 if ( handle->id[1] ) close( handle->id[1] );
\r
9144 stream_.apiHandle = 0;
\r
9147 for ( int i=0; i<2; i++ ) {
\r
9148 if ( stream_.userBuffer[i] ) {
\r
9149 free( stream_.userBuffer[i] );
\r
9150 stream_.userBuffer[i] = 0;
\r
9154 if ( stream_.deviceBuffer ) {
\r
9155 free( stream_.deviceBuffer );
\r
9156 stream_.deviceBuffer = 0;
\r
9162 void RtApiOss :: closeStream()
\r
9164 if ( stream_.state == STREAM_CLOSED ) {
\r
9165 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9166 error( RtAudioError::WARNING );
\r
9170 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9171 stream_.callbackInfo.isRunning = false;
\r
9172 MUTEX_LOCK( &stream_.mutex );
\r
9173 if ( stream_.state == STREAM_STOPPED )
\r
9174 pthread_cond_signal( &handle->runnable );
\r
9175 MUTEX_UNLOCK( &stream_.mutex );
\r
9176 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9178 if ( stream_.state == STREAM_RUNNING ) {
\r
9179 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9180 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9182 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9183 stream_.state = STREAM_STOPPED;
\r
9187 pthread_cond_destroy( &handle->runnable );
\r
9188 if ( handle->id[0] ) close( handle->id[0] );
\r
9189 if ( handle->id[1] ) close( handle->id[1] );
\r
9191 stream_.apiHandle = 0;
\r
9194 for ( int i=0; i<2; i++ ) {
\r
9195 if ( stream_.userBuffer[i] ) {
\r
9196 free( stream_.userBuffer[i] );
\r
9197 stream_.userBuffer[i] = 0;
\r
9201 if ( stream_.deviceBuffer ) {
\r
9202 free( stream_.deviceBuffer );
\r
9203 stream_.deviceBuffer = 0;
\r
9206 stream_.mode = UNINITIALIZED;
\r
9207 stream_.state = STREAM_CLOSED;
\r
9210 void RtApiOss :: startStream()
\r
9213 if ( stream_.state == STREAM_RUNNING ) {
\r
9214 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9215 error( RtAudioError::WARNING );
\r
9219 MUTEX_LOCK( &stream_.mutex );
\r
9221 stream_.state = STREAM_RUNNING;
\r
9223 // No need to do anything else here ... OSS automatically starts
\r
9224 // when fed samples.
\r
9226 MUTEX_UNLOCK( &stream_.mutex );
\r
9228 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9229 pthread_cond_signal( &handle->runnable );
\r
9232 void RtApiOss :: stopStream()
\r
9235 if ( stream_.state == STREAM_STOPPED ) {
\r
9236 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9237 error( RtAudioError::WARNING );
\r
9241 MUTEX_LOCK( &stream_.mutex );
\r
9243 // The state might change while waiting on a mutex.
\r
9244 if ( stream_.state == STREAM_STOPPED ) {
\r
9245 MUTEX_UNLOCK( &stream_.mutex );
\r
9250 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9251 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9253 // Flush the output with zeros a few times.
\r
9256 RtAudioFormat format;
\r
9258 if ( stream_.doConvertBuffer[0] ) {
\r
9259 buffer = stream_.deviceBuffer;
\r
9260 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9261 format = stream_.deviceFormat[0];
\r
9264 buffer = stream_.userBuffer[0];
\r
9265 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9266 format = stream_.userFormat;
\r
9269 memset( buffer, 0, samples * formatBytes(format) );
\r
9270 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9271 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9272 if ( result == -1 ) {
\r
9273 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9274 error( RtAudioError::WARNING );
\r
9278 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9279 if ( result == -1 ) {
\r
9280 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9281 errorText_ = errorStream_.str();
\r
9284 handle->triggered = false;
\r
9287 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9288 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9289 if ( result == -1 ) {
\r
9290 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9291 errorText_ = errorStream_.str();
\r
9297 stream_.state = STREAM_STOPPED;
\r
9298 MUTEX_UNLOCK( &stream_.mutex );
\r
9300 if ( result != -1 ) return;
\r
9301 error( RtAudioError::SYSTEM_ERROR );
\r
9304 void RtApiOss :: abortStream()
\r
9307 if ( stream_.state == STREAM_STOPPED ) {
\r
9308 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9309 error( RtAudioError::WARNING );
\r
9313 MUTEX_LOCK( &stream_.mutex );
\r
9315 // The state might change while waiting on a mutex.
\r
9316 if ( stream_.state == STREAM_STOPPED ) {
\r
9317 MUTEX_UNLOCK( &stream_.mutex );
\r
9322 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9323 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9324 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9325 if ( result == -1 ) {
\r
9326 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9327 errorText_ = errorStream_.str();
\r
9330 handle->triggered = false;
\r
9333 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9334 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9335 if ( result == -1 ) {
\r
9336 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9337 errorText_ = errorStream_.str();
\r
9343 stream_.state = STREAM_STOPPED;
\r
9344 MUTEX_UNLOCK( &stream_.mutex );
\r
9346 if ( result != -1 ) return;
\r
9347 error( RtAudioError::SYSTEM_ERROR );
\r
9350 void RtApiOss :: callbackEvent()
\r
9352 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9353 if ( stream_.state == STREAM_STOPPED ) {
\r
9354 MUTEX_LOCK( &stream_.mutex );
\r
9355 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9356 if ( stream_.state != STREAM_RUNNING ) {
\r
9357 MUTEX_UNLOCK( &stream_.mutex );
\r
9360 MUTEX_UNLOCK( &stream_.mutex );
\r
9363 if ( stream_.state == STREAM_CLOSED ) {
\r
9364 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9365 error( RtAudioError::WARNING );
\r
9369 // Invoke user callback to get fresh output data.
\r
9370 int doStopStream = 0;
\r
9371 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9372 double streamTime = getStreamTime();
\r
9373 RtAudioStreamStatus status = 0;
\r
9374 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9375 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9376 handle->xrun[0] = false;
\r
9378 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9379 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9380 handle->xrun[1] = false;
\r
9382 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9383 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9384 if ( doStopStream == 2 ) {
\r
9385 this->abortStream();
\r
9389 MUTEX_LOCK( &stream_.mutex );
\r
9391 // The state might change while waiting on a mutex.
\r
9392 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9397 RtAudioFormat format;
\r
9399 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9401 // Setup parameters and do buffer conversion if necessary.
\r
9402 if ( stream_.doConvertBuffer[0] ) {
\r
9403 buffer = stream_.deviceBuffer;
\r
9404 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9405 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9406 format = stream_.deviceFormat[0];
\r
9409 buffer = stream_.userBuffer[0];
\r
9410 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9411 format = stream_.userFormat;
\r
9414 // Do byte swapping if necessary.
\r
9415 if ( stream_.doByteSwap[0] )
\r
9416 byteSwapBuffer( buffer, samples, format );
\r
9418 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9420 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9421 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9422 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9423 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9424 handle->triggered = true;
\r
9427 // Write samples to device.
\r
9428 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9430 if ( result == -1 ) {
\r
9431 // We'll assume this is an underrun, though there isn't a
\r
9432 // specific means for determining that.
\r
9433 handle->xrun[0] = true;
\r
9434 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9435 error( RtAudioError::WARNING );
\r
9436 // Continue on to input section.
\r
9440 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9442 // Setup parameters.
\r
9443 if ( stream_.doConvertBuffer[1] ) {
\r
9444 buffer = stream_.deviceBuffer;
\r
9445 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9446 format = stream_.deviceFormat[1];
\r
9449 buffer = stream_.userBuffer[1];
\r
9450 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9451 format = stream_.userFormat;
\r
9454 // Read samples from device.
\r
9455 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9457 if ( result == -1 ) {
\r
9458 // We'll assume this is an overrun, though there isn't a
\r
9459 // specific means for determining that.
\r
9460 handle->xrun[1] = true;
\r
9461 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9462 error( RtAudioError::WARNING );
\r
9466 // Do byte swapping if necessary.
\r
9467 if ( stream_.doByteSwap[1] )
\r
9468 byteSwapBuffer( buffer, samples, format );
\r
9470 // Do buffer conversion if necessary.
\r
9471 if ( stream_.doConvertBuffer[1] )
\r
9472 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9476 MUTEX_UNLOCK( &stream_.mutex );
\r
9478 RtApi::tickStreamTime();
\r
9479 if ( doStopStream == 1 ) this->stopStream();
\r
9482 static void *ossCallbackHandler( void *ptr )
\r
9484 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9485 RtApiOss *object = (RtApiOss *) info->object;
\r
9486 bool *isRunning = &info->isRunning;
\r
9488 while ( *isRunning == true ) {
\r
9489 pthread_testcancel();
\r
9490 object->callbackEvent();
\r
9493 pthread_exit( NULL );
\r
9496 //******************** End of __LINUX_OSS__ *********************//
\r
9500 // *************************************************** //
\r
9502 // Protected common (OS-independent) RtAudio methods.
\r
9504 // *************************************************** //
\r
9506 // This method can be modified to control the behavior of error
\r
9507 // message printing.
\r
9508 void RtApi :: error( RtAudioError::Type type )
\r
9510 errorStream_.str(""); // clear the ostringstream
\r
9512 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9513 if ( errorCallback ) {
\r
9514 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9516 if ( firstErrorOccurred_ )
\r
9519 firstErrorOccurred_ = true;
\r
9520 const std::string errorMessage = errorText_;
\r
9522 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9523 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9527 errorCallback( type, errorMessage );
\r
9528 firstErrorOccurred_ = false;
\r
9532 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9533 std::cerr << '\n' << errorText_ << "\n\n";
\r
9534 else if ( type != RtAudioError::WARNING )
\r
9535 throw( RtAudioError( errorText_, type ) );
\r
9538 void RtApi :: verifyStream()
\r
9540 if ( stream_.state == STREAM_CLOSED ) {
\r
9541 errorText_ = "RtApi:: a stream is not open!";
\r
9542 error( RtAudioError::INVALID_USE );
\r
9546 void RtApi :: clearStreamInfo()
\r
9548 stream_.mode = UNINITIALIZED;
\r
9549 stream_.state = STREAM_CLOSED;
\r
9550 stream_.sampleRate = 0;
\r
9551 stream_.bufferSize = 0;
\r
9552 stream_.nBuffers = 0;
\r
9553 stream_.userFormat = 0;
\r
9554 stream_.userInterleaved = true;
\r
9555 stream_.streamTime = 0.0;
\r
9556 stream_.apiHandle = 0;
\r
9557 stream_.deviceBuffer = 0;
\r
9558 stream_.callbackInfo.callback = 0;
\r
9559 stream_.callbackInfo.userData = 0;
\r
9560 stream_.callbackInfo.isRunning = false;
\r
9561 stream_.callbackInfo.errorCallback = 0;
\r
9562 for ( int i=0; i<2; i++ ) {
\r
9563 stream_.device[i] = 11111;
\r
9564 stream_.doConvertBuffer[i] = false;
\r
9565 stream_.deviceInterleaved[i] = true;
\r
9566 stream_.doByteSwap[i] = false;
\r
9567 stream_.nUserChannels[i] = 0;
\r
9568 stream_.nDeviceChannels[i] = 0;
\r
9569 stream_.channelOffset[i] = 0;
\r
9570 stream_.deviceFormat[i] = 0;
\r
9571 stream_.latency[i] = 0;
\r
9572 stream_.userBuffer[i] = 0;
\r
9573 stream_.convertInfo[i].channels = 0;
\r
9574 stream_.convertInfo[i].inJump = 0;
\r
9575 stream_.convertInfo[i].outJump = 0;
\r
9576 stream_.convertInfo[i].inFormat = 0;
\r
9577 stream_.convertInfo[i].outFormat = 0;
\r
9578 stream_.convertInfo[i].inOffset.clear();
\r
9579 stream_.convertInfo[i].outOffset.clear();
\r
9583 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9585 if ( format == RTAUDIO_SINT16 )
\r
9587 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9589 else if ( format == RTAUDIO_FLOAT64 )
\r
9591 else if ( format == RTAUDIO_SINT24 )
\r
9593 else if ( format == RTAUDIO_SINT8 )
\r
9596 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9597 error( RtAudioError::WARNING );
\r
9602 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9604 if ( mode == INPUT ) { // convert device to user buffer
\r
9605 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9606 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9607 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9608 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9610 else { // convert user to device buffer
\r
9611 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9612 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9613 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9614 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9617 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9618 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9620 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9622 // Set up the interleave/deinterleave offsets.
\r
9623 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9624 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9625 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9626 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9627 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9628 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9629 stream_.convertInfo[mode].inJump = 1;
\r
9633 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9634 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9635 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9636 stream_.convertInfo[mode].outJump = 1;
\r
9640 else { // no (de)interleaving
\r
9641 if ( stream_.userInterleaved ) {
\r
9642 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9643 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9644 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9648 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9649 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9650 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9651 stream_.convertInfo[mode].inJump = 1;
\r
9652 stream_.convertInfo[mode].outJump = 1;
\r
9657 // Add channel offset.
\r
9658 if ( firstChannel > 0 ) {
\r
9659 if ( stream_.deviceInterleaved[mode] ) {
\r
9660 if ( mode == OUTPUT ) {
\r
9661 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9662 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9665 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9666 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9670 if ( mode == OUTPUT ) {
\r
9671 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9672 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9675 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9676 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9682 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9684 // This function does format conversion, input/output channel compensation, and
\r
9685 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9686 // the lower three bytes of a 32-bit integer.
\r
9688 // Clear our device buffer when in/out duplex device channels are different
\r
9689 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9690 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9691 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9694 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9696 Float64 *out = (Float64 *)outBuffer;
\r
9698 if (info.inFormat == RTAUDIO_SINT8) {
\r
9699 signed char *in = (signed char *)inBuffer;
\r
9700 scale = 1.0 / 127.5;
\r
9701 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9702 for (j=0; j<info.channels; j++) {
\r
9703 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9704 out[info.outOffset[j]] += 0.5;
\r
9705 out[info.outOffset[j]] *= scale;
\r
9707 in += info.inJump;
\r
9708 out += info.outJump;
\r
9711 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9712 Int16 *in = (Int16 *)inBuffer;
\r
9713 scale = 1.0 / 32767.5;
\r
9714 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9715 for (j=0; j<info.channels; j++) {
\r
9716 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9717 out[info.outOffset[j]] += 0.5;
\r
9718 out[info.outOffset[j]] *= scale;
\r
9720 in += info.inJump;
\r
9721 out += info.outJump;
\r
9724 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9725 Int24 *in = (Int24 *)inBuffer;
\r
9726 scale = 1.0 / 8388607.5;
\r
9727 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9728 for (j=0; j<info.channels; j++) {
\r
9729 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9730 out[info.outOffset[j]] += 0.5;
\r
9731 out[info.outOffset[j]] *= scale;
\r
9733 in += info.inJump;
\r
9734 out += info.outJump;
\r
9737 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9738 Int32 *in = (Int32 *)inBuffer;
\r
9739 scale = 1.0 / 2147483647.5;
\r
9740 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9741 for (j=0; j<info.channels; j++) {
\r
9742 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9743 out[info.outOffset[j]] += 0.5;
\r
9744 out[info.outOffset[j]] *= scale;
\r
9746 in += info.inJump;
\r
9747 out += info.outJump;
\r
9750 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9751 Float32 *in = (Float32 *)inBuffer;
\r
9752 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9753 for (j=0; j<info.channels; j++) {
\r
9754 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9756 in += info.inJump;
\r
9757 out += info.outJump;
\r
9760 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9761 // Channel compensation and/or (de)interleaving only.
\r
9762 Float64 *in = (Float64 *)inBuffer;
\r
9763 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9764 for (j=0; j<info.channels; j++) {
\r
9765 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9767 in += info.inJump;
\r
9768 out += info.outJump;
\r
9772 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9774 Float32 *out = (Float32 *)outBuffer;
\r
9776 if (info.inFormat == RTAUDIO_SINT8) {
\r
9777 signed char *in = (signed char *)inBuffer;
\r
9778 scale = (Float32) ( 1.0 / 127.5 );
\r
9779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9780 for (j=0; j<info.channels; j++) {
\r
9781 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9782 out[info.outOffset[j]] += 0.5;
\r
9783 out[info.outOffset[j]] *= scale;
\r
9785 in += info.inJump;
\r
9786 out += info.outJump;
\r
9789 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9790 Int16 *in = (Int16 *)inBuffer;
\r
9791 scale = (Float32) ( 1.0 / 32767.5 );
\r
9792 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9793 for (j=0; j<info.channels; j++) {
\r
9794 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9795 out[info.outOffset[j]] += 0.5;
\r
9796 out[info.outOffset[j]] *= scale;
\r
9798 in += info.inJump;
\r
9799 out += info.outJump;
\r
9802 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9803 Int24 *in = (Int24 *)inBuffer;
\r
9804 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9805 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9806 for (j=0; j<info.channels; j++) {
\r
9807 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9808 out[info.outOffset[j]] += 0.5;
\r
9809 out[info.outOffset[j]] *= scale;
\r
9811 in += info.inJump;
\r
9812 out += info.outJump;
\r
9815 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9816 Int32 *in = (Int32 *)inBuffer;
\r
9817 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9819 for (j=0; j<info.channels; j++) {
\r
9820 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9821 out[info.outOffset[j]] += 0.5;
\r
9822 out[info.outOffset[j]] *= scale;
\r
9824 in += info.inJump;
\r
9825 out += info.outJump;
\r
9828 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9829 // Channel compensation and/or (de)interleaving only.
\r
9830 Float32 *in = (Float32 *)inBuffer;
\r
9831 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9832 for (j=0; j<info.channels; j++) {
\r
9833 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9835 in += info.inJump;
\r
9836 out += info.outJump;
\r
9839 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9840 Float64 *in = (Float64 *)inBuffer;
\r
9841 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9842 for (j=0; j<info.channels; j++) {
\r
9843 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9845 in += info.inJump;
\r
9846 out += info.outJump;
\r
9850 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9851 Int32 *out = (Int32 *)outBuffer;
\r
9852 if (info.inFormat == RTAUDIO_SINT8) {
\r
9853 signed char *in = (signed char *)inBuffer;
\r
9854 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9855 for (j=0; j<info.channels; j++) {
\r
9856 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9857 out[info.outOffset[j]] <<= 24;
\r
9859 in += info.inJump;
\r
9860 out += info.outJump;
\r
9863 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9864 Int16 *in = (Int16 *)inBuffer;
\r
9865 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9866 for (j=0; j<info.channels; j++) {
\r
9867 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9868 out[info.outOffset[j]] <<= 16;
\r
9870 in += info.inJump;
\r
9871 out += info.outJump;
\r
9874 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9875 Int24 *in = (Int24 *)inBuffer;
\r
9876 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9877 for (j=0; j<info.channels; j++) {
\r
9878 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9879 out[info.outOffset[j]] <<= 8;
\r
9881 in += info.inJump;
\r
9882 out += info.outJump;
\r
9885 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9886 // Channel compensation and/or (de)interleaving only.
\r
9887 Int32 *in = (Int32 *)inBuffer;
\r
9888 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9889 for (j=0; j<info.channels; j++) {
\r
9890 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9892 in += info.inJump;
\r
9893 out += info.outJump;
\r
9896 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9897 Float32 *in = (Float32 *)inBuffer;
\r
9898 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9899 for (j=0; j<info.channels; j++) {
\r
9900 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9902 in += info.inJump;
\r
9903 out += info.outJump;
\r
9906 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9907 Float64 *in = (Float64 *)inBuffer;
\r
9908 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9909 for (j=0; j<info.channels; j++) {
\r
9910 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9912 in += info.inJump;
\r
9913 out += info.outJump;
\r
9917 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9918 Int24 *out = (Int24 *)outBuffer;
\r
9919 if (info.inFormat == RTAUDIO_SINT8) {
\r
9920 signed char *in = (signed char *)inBuffer;
\r
9921 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9922 for (j=0; j<info.channels; j++) {
\r
9923 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9924 //out[info.outOffset[j]] <<= 16;
\r
9926 in += info.inJump;
\r
9927 out += info.outJump;
\r
9930 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9931 Int16 *in = (Int16 *)inBuffer;
\r
9932 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9933 for (j=0; j<info.channels; j++) {
\r
9934 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9935 //out[info.outOffset[j]] <<= 8;
\r
9937 in += info.inJump;
\r
9938 out += info.outJump;
\r
9941 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9942 // Channel compensation and/or (de)interleaving only.
\r
9943 Int24 *in = (Int24 *)inBuffer;
\r
9944 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9945 for (j=0; j<info.channels; j++) {
\r
9946 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9948 in += info.inJump;
\r
9949 out += info.outJump;
\r
9952 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9953 Int32 *in = (Int32 *)inBuffer;
\r
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9955 for (j=0; j<info.channels; j++) {
\r
9956 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9957 //out[info.outOffset[j]] >>= 8;
\r
9959 in += info.inJump;
\r
9960 out += info.outJump;
\r
9963 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9964 Float32 *in = (Float32 *)inBuffer;
\r
9965 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9966 for (j=0; j<info.channels; j++) {
\r
9967 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9969 in += info.inJump;
\r
9970 out += info.outJump;
\r
9973 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9974 Float64 *in = (Float64 *)inBuffer;
\r
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9976 for (j=0; j<info.channels; j++) {
\r
9977 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9979 in += info.inJump;
\r
9980 out += info.outJump;
\r
9984 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9985 Int16 *out = (Int16 *)outBuffer;
\r
9986 if (info.inFormat == RTAUDIO_SINT8) {
\r
9987 signed char *in = (signed char *)inBuffer;
\r
9988 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9989 for (j=0; j<info.channels; j++) {
\r
9990 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9991 out[info.outOffset[j]] <<= 8;
\r
9993 in += info.inJump;
\r
9994 out += info.outJump;
\r
9997 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9998 // Channel compensation and/or (de)interleaving only.
\r
9999 Int16 *in = (Int16 *)inBuffer;
\r
10000 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10001 for (j=0; j<info.channels; j++) {
\r
10002 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10004 in += info.inJump;
\r
10005 out += info.outJump;
\r
10008 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10009 Int24 *in = (Int24 *)inBuffer;
\r
10010 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10011 for (j=0; j<info.channels; j++) {
\r
10012 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10014 in += info.inJump;
\r
10015 out += info.outJump;
\r
10018 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10019 Int32 *in = (Int32 *)inBuffer;
\r
10020 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10021 for (j=0; j<info.channels; j++) {
\r
10022 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10024 in += info.inJump;
\r
10025 out += info.outJump;
\r
10028 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10029 Float32 *in = (Float32 *)inBuffer;
\r
10030 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10031 for (j=0; j<info.channels; j++) {
\r
10032 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10034 in += info.inJump;
\r
10035 out += info.outJump;
\r
10038 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10039 Float64 *in = (Float64 *)inBuffer;
\r
10040 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10041 for (j=0; j<info.channels; j++) {
\r
10042 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10044 in += info.inJump;
\r
10045 out += info.outJump;
\r
10049 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10050 signed char *out = (signed char *)outBuffer;
\r
10051 if (info.inFormat == RTAUDIO_SINT8) {
\r
10052 // Channel compensation and/or (de)interleaving only.
\r
10053 signed char *in = (signed char *)inBuffer;
\r
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10055 for (j=0; j<info.channels; j++) {
\r
10056 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10058 in += info.inJump;
\r
10059 out += info.outJump;
\r
10062 if (info.inFormat == RTAUDIO_SINT16) {
\r
10063 Int16 *in = (Int16 *)inBuffer;
\r
10064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10065 for (j=0; j<info.channels; j++) {
\r
10066 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10068 in += info.inJump;
\r
10069 out += info.outJump;
\r
10072 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10073 Int24 *in = (Int24 *)inBuffer;
\r
10074 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10075 for (j=0; j<info.channels; j++) {
\r
10076 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10078 in += info.inJump;
\r
10079 out += info.outJump;
\r
10082 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10083 Int32 *in = (Int32 *)inBuffer;
\r
10084 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10085 for (j=0; j<info.channels; j++) {
\r
10086 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10088 in += info.inJump;
\r
10089 out += info.outJump;
\r
10092 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10093 Float32 *in = (Float32 *)inBuffer;
\r
10094 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10095 for (j=0; j<info.channels; j++) {
\r
10096 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10098 in += info.inJump;
\r
10099 out += info.outJump;
\r
10102 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10103 Float64 *in = (Float64 *)inBuffer;
\r
10104 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10105 for (j=0; j<info.channels; j++) {
\r
10106 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10108 in += info.inJump;
\r
10109 out += info.outJump;
\r
10115 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10116 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10117 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10119 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10121 register char val;
\r
10122 register char *ptr;
\r
10125 if ( format == RTAUDIO_SINT16 ) {
\r
10126 for ( unsigned int i=0; i<samples; i++ ) {
\r
10127 // Swap 1st and 2nd bytes.
\r
10129 *(ptr) = *(ptr+1);
\r
10132 // Increment 2 bytes.
\r
10136 else if ( format == RTAUDIO_SINT32 ||
\r
10137 format == RTAUDIO_FLOAT32 ) {
\r
10138 for ( unsigned int i=0; i<samples; i++ ) {
\r
10139 // Swap 1st and 4th bytes.
\r
10141 *(ptr) = *(ptr+3);
\r
10144 // Swap 2nd and 3rd bytes.
\r
10147 *(ptr) = *(ptr+1);
\r
10150 // Increment 3 more bytes.
\r
10154 else if ( format == RTAUDIO_SINT24 ) {
\r
10155 for ( unsigned int i=0; i<samples; i++ ) {
\r
10156 // Swap 1st and 3rd bytes.
\r
10158 *(ptr) = *(ptr+2);
\r
10161 // Increment 2 more bytes.
\r
10165 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10166 for ( unsigned int i=0; i<samples; i++ ) {
\r
10167 // Swap 1st and 8th bytes
\r
10169 *(ptr) = *(ptr+7);
\r
10172 // Swap 2nd and 7th bytes
\r
10175 *(ptr) = *(ptr+5);
\r
10178 // Swap 3rd and 6th bytes
\r
10181 *(ptr) = *(ptr+3);
\r
10184 // Swap 4th and 5th bytes
\r
10187 *(ptr) = *(ptr+1);
\r
10190 // Increment 5 more bytes.
\r
10196 // Indentation settings for Vim and Emacs
\r
10198 // Local Variables:
\r
10199 // c-basic-offset: 2
\r
10200 // indent-tabs-mode: nil
\r
10203 // vim: et sts=2 sw=2
\r