1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1410 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1411 kAudioObjectPropertyScopeGlobal,
\r
1412 kAudioObjectPropertyElementMaster };
\r
1414 property.mSelector = kAudioDeviceProcessorOverload;
\r
1415 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1416 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1417 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1418 error( RtAudioError::WARNING );
\r
1421 if ( stream_.state == STREAM_RUNNING )
\r
1422 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1423 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1424 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1426 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1427 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1431 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1433 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1434 kAudioObjectPropertyScopeGlobal,
\r
1435 kAudioObjectPropertyElementMaster };
\r
1437 property.mSelector = kAudioDeviceProcessorOverload;
\r
1438 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1439 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1440 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1441 error( RtAudioError::WARNING );
\r
1444 if ( stream_.state == STREAM_RUNNING )
\r
1445 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1446 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1447 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1449 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1450 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1454 for ( int i=0; i<2; i++ ) {
\r
1455 if ( stream_.userBuffer[i] ) {
\r
1456 free( stream_.userBuffer[i] );
\r
1457 stream_.userBuffer[i] = 0;
\r
1461 if ( stream_.deviceBuffer ) {
\r
1462 free( stream_.deviceBuffer );
\r
1463 stream_.deviceBuffer = 0;
\r
1466 // Destroy pthread condition variable.
\r
1467 pthread_cond_destroy( &handle->condition );
\r
1469 stream_.apiHandle = 0;
\r
1471 stream_.mode = UNINITIALIZED;
\r
1472 stream_.state = STREAM_CLOSED;
\r
1475 void RtApiCore :: startStream( void )
\r
1478 if ( stream_.state == STREAM_RUNNING ) {
\r
1479 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1480 error( RtAudioError::WARNING );
\r
1484 OSStatus result = noErr;
\r
1485 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1488 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1489 if ( result != noErr ) {
\r
1490 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1491 errorText_ = errorStream_.str();
\r
1496 if ( stream_.mode == INPUT ||
\r
1497 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1499 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1500 if ( result != noErr ) {
\r
1501 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1502 errorText_ = errorStream_.str();
\r
1507 handle->drainCounter = 0;
\r
1508 handle->internalDrain = false;
\r
1509 stream_.state = STREAM_RUNNING;
\r
1512 if ( result == noErr ) return;
\r
1513 error( RtAudioError::SYSTEM_ERROR );
\r
1516 void RtApiCore :: stopStream( void )
\r
1519 if ( stream_.state == STREAM_STOPPED ) {
\r
1520 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1521 error( RtAudioError::WARNING );
\r
1525 OSStatus result = noErr;
\r
1526 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1529 if ( handle->drainCounter == 0 ) {
\r
1530 handle->drainCounter = 2;
\r
1531 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1534 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1535 if ( result != noErr ) {
\r
1536 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1537 errorText_ = errorStream_.str();
\r
1542 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1544 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1545 if ( result != noErr ) {
\r
1546 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1547 errorText_ = errorStream_.str();
\r
1552 stream_.state = STREAM_STOPPED;
\r
1555 if ( result == noErr ) return;
\r
1556 error( RtAudioError::SYSTEM_ERROR );
\r
1559 void RtApiCore :: abortStream( void )
\r
1562 if ( stream_.state == STREAM_STOPPED ) {
\r
1563 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1564 error( RtAudioError::WARNING );
\r
1568 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1569 handle->drainCounter = 2;
\r
1574 // This function will be called by a spawned thread when the user
\r
1575 // callback function signals that the stream should be stopped or
\r
1576 // aborted. It is better to handle it this way because the
\r
1577 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1578 // function is called.
\r
1579 static void *coreStopStream( void *ptr )
\r
1581 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1582 RtApiCore *object = (RtApiCore *) info->object;
\r
1584 object->stopStream();
\r
1585 pthread_exit( NULL );
\r
1588 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1589 const AudioBufferList *inBufferList,
\r
1590 const AudioBufferList *outBufferList )
\r
1592 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1593 if ( stream_.state == STREAM_CLOSED ) {
\r
1594 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1595 error( RtAudioError::WARNING );
\r
1599 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1602 // Check if we were draining the stream and signal is finished.
\r
1603 if ( handle->drainCounter > 3 ) {
\r
1604 ThreadHandle threadId;
\r
1606 stream_.state = STREAM_STOPPING;
\r
1607 if ( handle->internalDrain == true )
\r
1608 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1609 else // external call to stopStream()
\r
1610 pthread_cond_signal( &handle->condition );
\r
1614 AudioDeviceID outputDevice = handle->id[0];
\r
1616 // Invoke user callback to get fresh output data UNLESS we are
\r
1617 // draining stream or duplex mode AND the input/output devices are
\r
1618 // different AND this function is called for the input device.
\r
1619 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1620 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1621 double streamTime = getStreamTime();
\r
1622 RtAudioStreamStatus status = 0;
\r
1623 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1624 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1625 handle->xrun[0] = false;
\r
1627 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1628 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1629 handle->xrun[1] = false;
\r
1632 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1633 stream_.bufferSize, streamTime, status, info->userData );
\r
1634 if ( cbReturnValue == 2 ) {
\r
1635 stream_.state = STREAM_STOPPING;
\r
1636 handle->drainCounter = 2;
\r
1640 else if ( cbReturnValue == 1 ) {
\r
1641 handle->drainCounter = 1;
\r
1642 handle->internalDrain = true;
\r
1646 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1648 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1650 if ( handle->nStreams[0] == 1 ) {
\r
1651 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1653 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1655 else { // fill multiple streams with zeros
\r
1656 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1657 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1659 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1663 else if ( handle->nStreams[0] == 1 ) {
\r
1664 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1665 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1666 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1668 else { // copy from user buffer
\r
1669 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0],
\r
1671 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1674 else { // fill multiple streams
\r
1675 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1676 if ( stream_.doConvertBuffer[0] ) {
\r
1677 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1678 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1681 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1682 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1683 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1684 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1685 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1688 else { // fill multiple multi-channel streams with interleaved data
\r
1689 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1690 Float32 *out, *in;
\r
1692 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1693 UInt32 inChannels = stream_.nUserChannels[0];
\r
1694 if ( stream_.doConvertBuffer[0] ) {
\r
1695 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1696 inChannels = stream_.nDeviceChannels[0];
\r
1699 if ( inInterleaved ) inOffset = 1;
\r
1700 else inOffset = stream_.bufferSize;
\r
1702 channelsLeft = inChannels;
\r
1703 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1705 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1706 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1709 // Account for possible channel offset in first stream
\r
1710 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1711 streamChannels -= stream_.channelOffset[0];
\r
1712 outJump = stream_.channelOffset[0];
\r
1716 // Account for possible unfilled channels at end of the last stream
\r
1717 if ( streamChannels > channelsLeft ) {
\r
1718 outJump = streamChannels - channelsLeft;
\r
1719 streamChannels = channelsLeft;
\r
1722 // Determine input buffer offsets and skips
\r
1723 if ( inInterleaved ) {
\r
1724 inJump = inChannels;
\r
1725 in += inChannels - channelsLeft;
\r
1729 in += (inChannels - channelsLeft) * inOffset;
\r
1732 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1733 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1734 *out++ = in[j*inOffset];
\r
1739 channelsLeft -= streamChannels;
\r
1745 // Don't bother draining input
\r
1746 if ( handle->drainCounter ) {
\r
1747 handle->drainCounter++;
\r
1751 AudioDeviceID inputDevice;
\r
1752 inputDevice = handle->id[1];
\r
1753 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1755 if ( handle->nStreams[1] == 1 ) {
\r
1756 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1757 convertBuffer( stream_.userBuffer[1],
\r
1758 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1759 stream_.convertInfo[1] );
\r
1761 else { // copy to user buffer
\r
1762 memcpy( stream_.userBuffer[1],
\r
1763 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1767 else { // read from multiple streams
\r
1768 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1769 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1771 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1772 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1773 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1774 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1775 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1778 else { // read from multiple multi-channel streams
\r
1779 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1780 Float32 *out, *in;
\r
1782 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1783 UInt32 outChannels = stream_.nUserChannels[1];
\r
1784 if ( stream_.doConvertBuffer[1] ) {
\r
1785 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1786 outChannels = stream_.nDeviceChannels[1];
\r
1789 if ( outInterleaved ) outOffset = 1;
\r
1790 else outOffset = stream_.bufferSize;
\r
1792 channelsLeft = outChannels;
\r
1793 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1795 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1796 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1799 // Account for possible channel offset in first stream
\r
1800 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1801 streamChannels -= stream_.channelOffset[1];
\r
1802 inJump = stream_.channelOffset[1];
\r
1806 // Account for possible unread channels at end of the last stream
\r
1807 if ( streamChannels > channelsLeft ) {
\r
1808 inJump = streamChannels - channelsLeft;
\r
1809 streamChannels = channelsLeft;
\r
1812 // Determine output buffer offsets and skips
\r
1813 if ( outInterleaved ) {
\r
1814 outJump = outChannels;
\r
1815 out += outChannels - channelsLeft;
\r
1819 out += (outChannels - channelsLeft) * outOffset;
\r
1822 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1823 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1824 out[j*outOffset] = *in++;
\r
1829 channelsLeft -= streamChannels;
\r
1833 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1834 convertBuffer( stream_.userBuffer[1],
\r
1835 stream_.deviceBuffer,
\r
1836 stream_.convertInfo[1] );
\r
1842 //MUTEX_UNLOCK( &stream_.mutex );
\r
1844 RtApi::tickStreamTime();
\r
1848 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1852 case kAudioHardwareNotRunningError:
\r
1853 return "kAudioHardwareNotRunningError";
\r
1855 case kAudioHardwareUnspecifiedError:
\r
1856 return "kAudioHardwareUnspecifiedError";
\r
1858 case kAudioHardwareUnknownPropertyError:
\r
1859 return "kAudioHardwareUnknownPropertyError";
\r
1861 case kAudioHardwareBadPropertySizeError:
\r
1862 return "kAudioHardwareBadPropertySizeError";
\r
1864 case kAudioHardwareIllegalOperationError:
\r
1865 return "kAudioHardwareIllegalOperationError";
\r
1867 case kAudioHardwareBadObjectError:
\r
1868 return "kAudioHardwareBadObjectError";
\r
1870 case kAudioHardwareBadDeviceError:
\r
1871 return "kAudioHardwareBadDeviceError";
\r
1873 case kAudioHardwareBadStreamError:
\r
1874 return "kAudioHardwareBadStreamError";
\r
1876 case kAudioHardwareUnsupportedOperationError:
\r
1877 return "kAudioHardwareUnsupportedOperationError";
\r
1879 case kAudioDeviceUnsupportedFormatError:
\r
1880 return "kAudioDeviceUnsupportedFormatError";
\r
1882 case kAudioDevicePermissionsError:
\r
1883 return "kAudioDevicePermissionsError";
\r
1886 return "CoreAudio unknown error";
\r
1890 //******************** End of __MACOSX_CORE__ *********************//
\r
1893 #if defined(__UNIX_JACK__)
\r
1895 // JACK is a low-latency audio server, originally written for the
\r
1896 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1897 // connect a number of different applications to an audio device, as
\r
1898 // well as allowing them to share audio between themselves.
\r
1900 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1901 // have ports connected to the server. The JACK server is typically
\r
1902 // started in a terminal as follows:
\r
1904 // .jackd -d alsa -d hw:0
\r
1906 // or through an interface program such as qjackctl. Many of the
\r
1907 // parameters normally set for a stream are fixed by the JACK server
\r
1908 // and can be specified when the JACK server is started. In
\r
1911 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1913 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1914 // frames, and number of buffers = 4. Once the server is running, it
\r
1915 // is not possible to override these values. If the values are not
\r
1916 // specified in the command-line, the JACK server uses default values.
\r
1918 // The JACK server does not have to be running when an instance of
\r
1919 // RtApiJack is created, though the function getDeviceCount() will
\r
1920 // report 0 devices found until JACK has been started. When no
\r
1921 // devices are available (i.e., the JACK server is not running), a
\r
1922 // stream cannot be opened.
\r
1924 #include <jack/jack.h>
\r
1925 #include <unistd.h>
\r
1928 // A structure to hold various information related to the Jack API
\r
1929 // implementation.
\r
1930 struct JackHandle {
\r
1931 jack_client_t *client;
\r
1932 jack_port_t **ports[2];
\r
1933 std::string deviceName[2];
\r
1935 pthread_cond_t condition;
\r
1936 int drainCounter; // Tracks callback counts when draining
\r
1937 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1940 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1943 static void jackSilentError( const char * ) {};
\r
1945 RtApiJack :: RtApiJack()
\r
1947 // Nothing to do here.
\r
1948 #if !defined(__RTAUDIO_DEBUG__)
\r
1949 // Turn off Jack's internal error reporting.
\r
1950 jack_set_error_function( &jackSilentError );
\r
1954 RtApiJack :: ~RtApiJack()
\r
1956 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1959 unsigned int RtApiJack :: getDeviceCount( void )
\r
1961 // See if we can become a jack client.
\r
1962 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1963 jack_status_t *status = NULL;
\r
1964 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1965 if ( client == 0 ) return 0;
\r
1967 const char **ports;
\r
1968 std::string port, previousPort;
\r
1969 unsigned int nChannels = 0, nDevices = 0;
\r
1970 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1972 // Parse the port names up to the first colon (:).
\r
1973 size_t iColon = 0;
\r
1975 port = (char *) ports[ nChannels ];
\r
1976 iColon = port.find(":");
\r
1977 if ( iColon != std::string::npos ) {
\r
1978 port = port.substr( 0, iColon + 1 );
\r
1979 if ( port != previousPort ) {
\r
1981 previousPort = port;
\r
1984 } while ( ports[++nChannels] );
\r
1988 jack_client_close( client );
\r
1992 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1994 RtAudio::DeviceInfo info;
\r
1995 info.probed = false;
\r
1997 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1998 jack_status_t *status = NULL;
\r
1999 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2000 if ( client == 0 ) {
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 const char **ports;
\r
2007 std::string port, previousPort;
\r
2008 unsigned int nPorts = 0, nDevices = 0;
\r
2009 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2011 // Parse the port names up to the first colon (:).
\r
2012 size_t iColon = 0;
\r
2014 port = (char *) ports[ nPorts ];
\r
2015 iColon = port.find(":");
\r
2016 if ( iColon != std::string::npos ) {
\r
2017 port = port.substr( 0, iColon );
\r
2018 if ( port != previousPort ) {
\r
2019 if ( nDevices == device ) info.name = port;
\r
2021 previousPort = port;
\r
2024 } while ( ports[++nPorts] );
\r
2028 if ( device >= nDevices ) {
\r
2029 jack_client_close( client );
\r
2030 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2031 error( RtAudioError::INVALID_USE );
\r
2035 // Get the current jack server sample rate.
\r
2036 info.sampleRates.clear();
\r
2038 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2039 info.sampleRates.push_back( info.preferredSampleRate );
\r
2041 // Count the available ports containing the client name as device
\r
2042 // channels. Jack "input ports" equal RtAudio output channels.
\r
2043 unsigned int nChannels = 0;
\r
2044 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2046 while ( ports[ nChannels ] ) nChannels++;
\r
2048 info.outputChannels = nChannels;
\r
2051 // Jack "output ports" equal RtAudio input channels.
\r
2053 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2055 while ( ports[ nChannels ] ) nChannels++;
\r
2057 info.inputChannels = nChannels;
\r
2060 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2061 jack_client_close(client);
\r
2062 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2063 error( RtAudioError::WARNING );
\r
2067 // If device opens for both playback and capture, we determine the channels.
\r
2068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2071 // Jack always uses 32-bit floats.
\r
2072 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2074 // Jack doesn't provide default devices so we'll use the first available one.
\r
2075 if ( device == 0 && info.outputChannels > 0 )
\r
2076 info.isDefaultOutput = true;
\r
2077 if ( device == 0 && info.inputChannels > 0 )
\r
2078 info.isDefaultInput = true;
\r
2080 jack_client_close(client);
\r
2081 info.probed = true;
\r
2085 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2087 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2089 RtApiJack *object = (RtApiJack *) info->object;
\r
2090 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2095 // This function will be called by a spawned thread when the Jack
\r
2096 // server signals that it is shutting down. It is necessary to handle
\r
2097 // it this way because the jackShutdown() function must return before
\r
2098 // the jack_deactivate() function (in closeStream()) will return.
\r
2099 static void *jackCloseStream( void *ptr )
\r
2101 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2102 RtApiJack *object = (RtApiJack *) info->object;
\r
2104 object->closeStream();
\r
2106 pthread_exit( NULL );
\r
2108 static void jackShutdown( void *infoPointer )
\r
2110 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2111 RtApiJack *object = (RtApiJack *) info->object;
\r
2113 // Check current stream state. If stopped, then we'll assume this
\r
2114 // was called as a result of a call to RtApiJack::stopStream (the
\r
2115 // deactivation of a client handle causes this function to be called).
\r
2116 // If not, we'll assume the Jack server is shutting down or some
\r
2117 // other problem occurred and we should close the stream.
\r
2118 if ( object->isStreamRunning() == false ) return;
\r
2120 ThreadHandle threadId;
\r
2121 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2122 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2125 static int jackXrun( void *infoPointer )
\r
2127 JackHandle *handle = (JackHandle *) infoPointer;
\r
2129 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2130 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2135 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2136 unsigned int firstChannel, unsigned int sampleRate,
\r
2137 RtAudioFormat format, unsigned int *bufferSize,
\r
2138 RtAudio::StreamOptions *options )
\r
2140 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2142 // Look for jack server and try to become a client (only do once per stream).
\r
2143 jack_client_t *client = 0;
\r
2144 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2145 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2146 jack_status_t *status = NULL;
\r
2147 if ( options && !options->streamName.empty() )
\r
2148 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2150 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2151 if ( client == 0 ) {
\r
2152 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2153 error( RtAudioError::WARNING );
\r
2158 // The handle must have been created on an earlier pass.
\r
2159 client = handle->client;
\r
2162 const char **ports;
\r
2163 std::string port, previousPort, deviceName;
\r
2164 unsigned int nPorts = 0, nDevices = 0;
\r
2165 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2167 // Parse the port names up to the first colon (:).
\r
2168 size_t iColon = 0;
\r
2170 port = (char *) ports[ nPorts ];
\r
2171 iColon = port.find(":");
\r
2172 if ( iColon != std::string::npos ) {
\r
2173 port = port.substr( 0, iColon );
\r
2174 if ( port != previousPort ) {
\r
2175 if ( nDevices == device ) deviceName = port;
\r
2177 previousPort = port;
\r
2180 } while ( ports[++nPorts] );
\r
2184 if ( device >= nDevices ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2189 // Count the available ports containing the client name as device
\r
2190 // channels. Jack "input ports" equal RtAudio output channels.
\r
2191 unsigned int nChannels = 0;
\r
2192 unsigned long flag = JackPortIsInput;
\r
2193 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2196 while ( ports[ nChannels ] ) nChannels++;
\r
2200 // Compare the jack ports for specified client to the requested number of channels.
\r
2201 if ( nChannels < (channels + firstChannel) ) {
\r
2202 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2203 errorText_ = errorStream_.str();
\r
2207 // Check the jack server sample rate.
\r
2208 unsigned int jackRate = jack_get_sample_rate( client );
\r
2209 if ( sampleRate != jackRate ) {
\r
2210 jack_client_close( client );
\r
2211 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2212 errorText_ = errorStream_.str();
\r
2215 stream_.sampleRate = jackRate;
\r
2217 // Get the latency of the JACK port.
\r
2218 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2219 if ( ports[ firstChannel ] ) {
\r
2220 // Added by Ge Wang
\r
2221 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2222 // the range (usually the min and max are equal)
\r
2223 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2224 // get the latency range
\r
2225 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2226 // be optimistic, use the min!
\r
2227 stream_.latency[mode] = latrange.min;
\r
2228 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2232 // The jack server always uses 32-bit floating-point data.
\r
2233 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2234 stream_.userFormat = format;
\r
2236 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2237 else stream_.userInterleaved = true;
\r
2239 // Jack always uses non-interleaved buffers.
\r
2240 stream_.deviceInterleaved[mode] = false;
\r
2242 // Jack always provides host byte-ordered data.
\r
2243 stream_.doByteSwap[mode] = false;
\r
2245 // Get the buffer size. The buffer size and number of buffers
\r
2246 // (periods) is set when the jack server is started.
\r
2247 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2248 *bufferSize = stream_.bufferSize;
\r
2250 stream_.nDeviceChannels[mode] = channels;
\r
2251 stream_.nUserChannels[mode] = channels;
\r
2253 // Set flags for buffer conversion.
\r
2254 stream_.doConvertBuffer[mode] = false;
\r
2255 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2256 stream_.doConvertBuffer[mode] = true;
\r
2257 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2258 stream_.nUserChannels[mode] > 1 )
\r
2259 stream_.doConvertBuffer[mode] = true;
\r
2261 // Allocate our JackHandle structure for the stream.
\r
2262 if ( handle == 0 ) {
\r
2264 handle = new JackHandle;
\r
2266 catch ( std::bad_alloc& ) {
\r
2267 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2271 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2272 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2275 stream_.apiHandle = (void *) handle;
\r
2276 handle->client = client;
\r
2278 handle->deviceName[mode] = deviceName;
\r
2280 // Allocate necessary internal buffers.
\r
2281 unsigned long bufferBytes;
\r
2282 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2283 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2284 if ( stream_.userBuffer[mode] == NULL ) {
\r
2285 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2289 if ( stream_.doConvertBuffer[mode] ) {
\r
2291 bool makeBuffer = true;
\r
2292 if ( mode == OUTPUT )
\r
2293 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2294 else { // mode == INPUT
\r
2295 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2296 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2297 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2298 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2302 if ( makeBuffer ) {
\r
2303 bufferBytes *= *bufferSize;
\r
2304 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2305 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2306 if ( stream_.deviceBuffer == NULL ) {
\r
2307 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2313 // Allocate memory for the Jack ports (channels) identifiers.
\r
2314 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2315 if ( handle->ports[mode] == NULL ) {
\r
2316 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2320 stream_.device[mode] = device;
\r
2321 stream_.channelOffset[mode] = firstChannel;
\r
2322 stream_.state = STREAM_STOPPED;
\r
2323 stream_.callbackInfo.object = (void *) this;
\r
2325 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2326 // We had already set up the stream for output.
\r
2327 stream_.mode = DUPLEX;
\r
2329 stream_.mode = mode;
\r
2330 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2331 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2332 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2335 // Register our ports.
\r
2337 if ( mode == OUTPUT ) {
\r
2338 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2339 snprintf( label, 64, "outport %d", i );
\r
2340 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2341 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2345 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2346 snprintf( label, 64, "inport %d", i );
\r
2347 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2348 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2352 // Setup the buffer conversion information structure. We don't use
\r
2353 // buffers to do channel offsets, so we override that parameter
\r
2355 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2361 pthread_cond_destroy( &handle->condition );
\r
2362 jack_client_close( handle->client );
\r
2364 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2365 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2368 stream_.apiHandle = 0;
\r
2371 for ( int i=0; i<2; i++ ) {
\r
2372 if ( stream_.userBuffer[i] ) {
\r
2373 free( stream_.userBuffer[i] );
\r
2374 stream_.userBuffer[i] = 0;
\r
2378 if ( stream_.deviceBuffer ) {
\r
2379 free( stream_.deviceBuffer );
\r
2380 stream_.deviceBuffer = 0;
\r
2386 void RtApiJack :: closeStream( void )
\r
2388 if ( stream_.state == STREAM_CLOSED ) {
\r
2389 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2390 error( RtAudioError::WARNING );
\r
2394 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2397 if ( stream_.state == STREAM_RUNNING )
\r
2398 jack_deactivate( handle->client );
\r
2400 jack_client_close( handle->client );
\r
2404 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2405 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2406 pthread_cond_destroy( &handle->condition );
\r
2408 stream_.apiHandle = 0;
\r
2411 for ( int i=0; i<2; i++ ) {
\r
2412 if ( stream_.userBuffer[i] ) {
\r
2413 free( stream_.userBuffer[i] );
\r
2414 stream_.userBuffer[i] = 0;
\r
2418 if ( stream_.deviceBuffer ) {
\r
2419 free( stream_.deviceBuffer );
\r
2420 stream_.deviceBuffer = 0;
\r
2423 stream_.mode = UNINITIALIZED;
\r
2424 stream_.state = STREAM_CLOSED;
\r
2427 void RtApiJack :: startStream( void )
\r
2430 if ( stream_.state == STREAM_RUNNING ) {
\r
2431 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2432 error( RtAudioError::WARNING );
\r
2436 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2437 int result = jack_activate( handle->client );
\r
2439 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2443 const char **ports;
\r
2445 // Get the list of available ports.
\r
2446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2454 // Now make the port connections. Since RtAudio wasn't designed to
\r
2455 // allow the user to select particular channels of a device, we'll
\r
2456 // just open the first "nChannels" ports with offset.
\r
2457 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2459 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2460 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2463 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2470 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2472 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2473 if ( ports == NULL) {
\r
2474 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2478 // Now make the port connections. See note above.
\r
2479 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2481 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2482 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2485 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2492 handle->drainCounter = 0;
\r
2493 handle->internalDrain = false;
\r
2494 stream_.state = STREAM_RUNNING;
\r
2497 if ( result == 0 ) return;
\r
2498 error( RtAudioError::SYSTEM_ERROR );
\r
2501 void RtApiJack :: stopStream( void )
\r
2504 if ( stream_.state == STREAM_STOPPED ) {
\r
2505 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2506 error( RtAudioError::WARNING );
\r
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2511 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2513 if ( handle->drainCounter == 0 ) {
\r
2514 handle->drainCounter = 2;
\r
2515 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2519 jack_deactivate( handle->client );
\r
2520 stream_.state = STREAM_STOPPED;
\r
2523 void RtApiJack :: abortStream( void )
\r
2526 if ( stream_.state == STREAM_STOPPED ) {
\r
2527 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2528 error( RtAudioError::WARNING );
\r
2532 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2533 handle->drainCounter = 2;
\r
2538 // This function will be called by a spawned thread when the user
\r
2539 // callback function signals that the stream should be stopped or
\r
2540 // aborted. It is necessary to handle it this way because the
\r
2541 // callbackEvent() function must return before the jack_deactivate()
\r
2542 // function will return.
\r
2543 static void *jackStopStream( void *ptr )
\r
2545 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2546 RtApiJack *object = (RtApiJack *) info->object;
\r
2548 object->stopStream();
\r
2549 pthread_exit( NULL );
\r
2552 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2554 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2555 if ( stream_.state == STREAM_CLOSED ) {
\r
2556 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2557 error( RtAudioError::WARNING );
\r
2560 if ( stream_.bufferSize != nframes ) {
\r
2561 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2562 error( RtAudioError::WARNING );
\r
2566 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2567 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2569 // Check if we were draining the stream and signal is finished.
\r
2570 if ( handle->drainCounter > 3 ) {
\r
2571 ThreadHandle threadId;
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 if ( handle->internalDrain == true )
\r
2575 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2577 pthread_cond_signal( &handle->condition );
\r
2581 // Invoke user callback first, to get fresh output data.
\r
2582 if ( handle->drainCounter == 0 ) {
\r
2583 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2584 double streamTime = getStreamTime();
\r
2585 RtAudioStreamStatus status = 0;
\r
2586 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2587 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2588 handle->xrun[0] = false;
\r
2590 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2591 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2592 handle->xrun[1] = false;
\r
2594 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2595 stream_.bufferSize, streamTime, status, info->userData );
\r
2596 if ( cbReturnValue == 2 ) {
\r
2597 stream_.state = STREAM_STOPPING;
\r
2598 handle->drainCounter = 2;
\r
2600 pthread_create( &id, NULL, jackStopStream, info );
\r
2603 else if ( cbReturnValue == 1 ) {
\r
2604 handle->drainCounter = 1;
\r
2605 handle->internalDrain = true;
\r
2609 jack_default_audio_sample_t *jackbuffer;
\r
2610 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2611 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2613 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2615 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2616 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2617 memset( jackbuffer, 0, bufferBytes );
\r
2621 else if ( stream_.doConvertBuffer[0] ) {
\r
2623 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2627 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2630 else { // no buffer conversion
\r
2631 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2632 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2633 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2638 // Don't bother draining input
\r
2639 if ( handle->drainCounter ) {
\r
2640 handle->drainCounter++;
\r
2644 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2646 if ( stream_.doConvertBuffer[1] ) {
\r
2647 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2648 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2649 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2651 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2653 else { // no buffer conversion
\r
2654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2655 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2656 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2662 RtApi::tickStreamTime();
\r
2665 //******************** End of __UNIX_JACK__ *********************//
\r
2668 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2670 // The ASIO API is designed around a callback scheme, so this
\r
2671 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2672 // Jack. The primary constraint with ASIO is that it only allows
\r
2673 // access to a single driver at a time. Thus, it is not possible to
\r
2674 // have more than one simultaneous RtAudio stream.
\r
2676 // This implementation also requires a number of external ASIO files
\r
2677 // and a few global variables. The ASIO callback scheme does not
\r
2678 // allow for the passing of user data, so we must create a global
\r
2679 // pointer to our callbackInfo structure.
\r
2681 // On unix systems, we make use of a pthread condition variable.
\r
2682 // Since there is no equivalent in Windows, I hacked something based
\r
2683 // on information found in
\r
2684 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2686 #include "asiosys.h"
\r
2688 #include "iasiothiscallresolver.h"
\r
2689 #include "asiodrivers.h"
\r
2692 static AsioDrivers drivers;
\r
2693 static ASIOCallbacks asioCallbacks;
\r
2694 static ASIODriverInfo driverInfo;
\r
2695 static CallbackInfo *asioCallbackInfo;
\r
2696 static bool asioXRun;
\r
2698 struct AsioHandle {
\r
2699 int drainCounter; // Tracks callback counts when draining
\r
2700 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2701 ASIOBufferInfo *bufferInfos;
\r
2705 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2708 // Function declarations (definitions at end of section)
\r
2709 static const char* getAsioErrorString( ASIOError result );
\r
2710 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2711 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2713 RtApiAsio :: RtApiAsio()
\r
2715 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2716 // CoInitialize beforehand, but it must be for appartment threading
\r
2717 // (in which case, CoInitilialize will return S_FALSE here).
\r
2718 coInitialized_ = false;
\r
2719 HRESULT hr = CoInitialize( NULL );
\r
2720 if ( FAILED(hr) ) {
\r
2721 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2722 error( RtAudioError::WARNING );
\r
2724 coInitialized_ = true;
\r
2726 drivers.removeCurrentDriver();
\r
2727 driverInfo.asioVersion = 2;
\r
2729 // See note in DirectSound implementation about GetDesktopWindow().
\r
2730 driverInfo.sysRef = GetForegroundWindow();
\r
2733 RtApiAsio :: ~RtApiAsio()
\r
2735 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2736 if ( coInitialized_ ) CoUninitialize();
\r
2739 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2741 return (unsigned int) drivers.asioGetNumDev();
\r
2744 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2746 RtAudio::DeviceInfo info;
\r
2747 info.probed = false;
\r
2750 unsigned int nDevices = getDeviceCount();
\r
2751 if ( nDevices == 0 ) {
\r
2752 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2753 error( RtAudioError::INVALID_USE );
\r
2757 if ( device >= nDevices ) {
\r
2758 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2759 error( RtAudioError::INVALID_USE );
\r
2763 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2764 if ( stream_.state != STREAM_CLOSED ) {
\r
2765 if ( device >= devices_.size() ) {
\r
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2767 error( RtAudioError::WARNING );
\r
2770 return devices_[ device ];
\r
2773 char driverName[32];
\r
2774 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2775 if ( result != ASE_OK ) {
\r
2776 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2777 errorText_ = errorStream_.str();
\r
2778 error( RtAudioError::WARNING );
\r
2782 info.name = driverName;
\r
2784 if ( !drivers.loadDriver( driverName ) ) {
\r
2785 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2786 errorText_ = errorStream_.str();
\r
2787 error( RtAudioError::WARNING );
\r
2791 result = ASIOInit( &driverInfo );
\r
2792 if ( result != ASE_OK ) {
\r
2793 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2794 errorText_ = errorStream_.str();
\r
2795 error( RtAudioError::WARNING );
\r
2799 // Determine the device channel information.
\r
2800 long inputChannels, outputChannels;
\r
2801 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2802 if ( result != ASE_OK ) {
\r
2803 drivers.removeCurrentDriver();
\r
2804 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2805 errorText_ = errorStream_.str();
\r
2806 error( RtAudioError::WARNING );
\r
2810 info.outputChannels = outputChannels;
\r
2811 info.inputChannels = inputChannels;
\r
2812 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2813 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2815 // Determine the supported sample rates.
\r
2816 info.sampleRates.clear();
\r
2817 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2818 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2819 if ( result == ASE_OK ) {
\r
2820 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2822 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2823 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2827 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2828 ASIOChannelInfo channelInfo;
\r
2829 channelInfo.channel = 0;
\r
2830 channelInfo.isInput = true;
\r
2831 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2832 result = ASIOGetChannelInfo( &channelInfo );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2837 error( RtAudioError::WARNING );
\r
2841 info.nativeFormats = 0;
\r
2842 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2843 info.nativeFormats |= RTAUDIO_SINT16;
\r
2844 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2845 info.nativeFormats |= RTAUDIO_SINT32;
\r
2846 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2847 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2848 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2849 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2850 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2851 info.nativeFormats |= RTAUDIO_SINT24;
\r
2853 if ( info.outputChannels > 0 )
\r
2854 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2855 if ( info.inputChannels > 0 )
\r
2856 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2858 info.probed = true;
\r
2859 drivers.removeCurrentDriver();
\r
2863 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2865 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2866 object->callbackEvent( index );
\r
2869 void RtApiAsio :: saveDeviceInfo( void )
\r
2873 unsigned int nDevices = getDeviceCount();
\r
2874 devices_.resize( nDevices );
\r
2875 for ( unsigned int i=0; i<nDevices; i++ )
\r
2876 devices_[i] = getDeviceInfo( i );
\r
2879 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2880 unsigned int firstChannel, unsigned int sampleRate,
\r
2881 RtAudioFormat format, unsigned int *bufferSize,
\r
2882 RtAudio::StreamOptions *options )
\r
2883 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2885 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2887 // For ASIO, a duplex stream MUST use the same driver.
\r
2888 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2889 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2893 char driverName[32];
\r
2894 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2895 if ( result != ASE_OK ) {
\r
2896 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2897 errorText_ = errorStream_.str();
\r
2901 // Only load the driver once for duplex stream.
\r
2902 if ( !isDuplexInput ) {
\r
2903 // The getDeviceInfo() function will not work when a stream is open
\r
2904 // because ASIO does not allow multiple devices to run at the same
\r
2905 // time. Thus, we'll probe the system before opening a stream and
\r
2906 // save the results for use by getDeviceInfo().
\r
2907 this->saveDeviceInfo();
\r
2909 if ( !drivers.loadDriver( driverName ) ) {
\r
2910 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2911 errorText_ = errorStream_.str();
\r
2915 result = ASIOInit( &driverInfo );
\r
2916 if ( result != ASE_OK ) {
\r
2917 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2918 errorText_ = errorStream_.str();
\r
2923 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2924 bool buffersAllocated = false;
\r
2925 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2926 unsigned int nChannels;
\r
2929 // Check the device channel count.
\r
2930 long inputChannels, outputChannels;
\r
2931 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2932 if ( result != ASE_OK ) {
\r
2933 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2934 errorText_ = errorStream_.str();
\r
2938 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2939 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2940 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2941 errorText_ = errorStream_.str();
\r
2944 stream_.nDeviceChannels[mode] = channels;
\r
2945 stream_.nUserChannels[mode] = channels;
\r
2946 stream_.channelOffset[mode] = firstChannel;
\r
2948 // Verify the sample rate is supported.
\r
2949 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2950 if ( result != ASE_OK ) {
\r
2951 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2952 errorText_ = errorStream_.str();
\r
2956 // Get the current sample rate
\r
2957 ASIOSampleRate currentRate;
\r
2958 result = ASIOGetSampleRate( ¤tRate );
\r
2959 if ( result != ASE_OK ) {
\r
2960 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2961 errorText_ = errorStream_.str();
\r
2965 // Set the sample rate only if necessary
\r
2966 if ( currentRate != sampleRate ) {
\r
2967 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2968 if ( result != ASE_OK ) {
\r
2969 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2970 errorText_ = errorStream_.str();
\r
2975 // Determine the driver data type.
\r
2976 ASIOChannelInfo channelInfo;
\r
2977 channelInfo.channel = 0;
\r
2978 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2979 else channelInfo.isInput = true;
\r
2980 result = ASIOGetChannelInfo( &channelInfo );
\r
2981 if ( result != ASE_OK ) {
\r
2982 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2983 errorText_ = errorStream_.str();
\r
2987 // Assuming WINDOWS host is always little-endian.
\r
2988 stream_.doByteSwap[mode] = false;
\r
2989 stream_.userFormat = format;
\r
2990 stream_.deviceFormat[mode] = 0;
\r
2991 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2992 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2993 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2995 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2996 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2997 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2999 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3000 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3001 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3003 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3004 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3005 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3007 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3008 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3009 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3012 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3013 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3014 errorText_ = errorStream_.str();
\r
3018 // Set the buffer size. For a duplex stream, this will end up
\r
3019 // setting the buffer size based on the input constraints, which
\r
3021 long minSize, maxSize, preferSize, granularity;
\r
3022 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3023 if ( result != ASE_OK ) {
\r
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3025 errorText_ = errorStream_.str();
\r
3029 if ( isDuplexInput ) {
\r
3030 // When this is the duplex input (output was opened before), then we have to use the same
\r
3031 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3032 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3033 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3034 // to the "bufferSize" param as usual to set up processing buffers.
\r
3036 *bufferSize = stream_.bufferSize;
\r
3039 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3040 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3041 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3042 else if ( granularity == -1 ) {
\r
3043 // Make sure bufferSize is a power of two.
\r
3044 int log2_of_min_size = 0;
\r
3045 int log2_of_max_size = 0;
\r
3047 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3048 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3049 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3052 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3053 int min_delta_num = log2_of_min_size;
\r
3055 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3056 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3057 if (current_delta < min_delta) {
\r
3058 min_delta = current_delta;
\r
3059 min_delta_num = i;
\r
3063 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3064 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3065 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3067 else if ( granularity != 0 ) {
\r
3068 // Set to an even multiple of granularity, rounding up.
\r
3069 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3074 // we don't use it anymore, see above!
\r
3075 // Just left it here for the case...
\r
3076 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3077 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3082 stream_.bufferSize = *bufferSize;
\r
3083 stream_.nBuffers = 2;
\r
3085 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3086 else stream_.userInterleaved = true;
\r
3088 // ASIO always uses non-interleaved buffers.
\r
3089 stream_.deviceInterleaved[mode] = false;
\r
3091 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3092 if ( handle == 0 ) {
\r
3094 handle = new AsioHandle;
\r
3096 catch ( std::bad_alloc& ) {
\r
3097 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3100 handle->bufferInfos = 0;
\r
3102 // Create a manual-reset event.
\r
3103 handle->condition = CreateEvent( NULL, // no security
\r
3104 TRUE, // manual-reset
\r
3105 FALSE, // non-signaled initially
\r
3106 NULL ); // unnamed
\r
3107 stream_.apiHandle = (void *) handle;
\r
3110 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3111 // and output separately, we'll have to dispose of previously
\r
3112 // created output buffers for a duplex stream.
\r
3113 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3114 ASIODisposeBuffers();
\r
3115 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3118 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3120 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3121 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3122 if ( handle->bufferInfos == NULL ) {
\r
3123 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3124 errorText_ = errorStream_.str();
\r
3128 ASIOBufferInfo *infos;
\r
3129 infos = handle->bufferInfos;
\r
3130 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3131 infos->isInput = ASIOFalse;
\r
3132 infos->channelNum = i + stream_.channelOffset[0];
\r
3133 infos->buffers[0] = infos->buffers[1] = 0;
\r
3135 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3136 infos->isInput = ASIOTrue;
\r
3137 infos->channelNum = i + stream_.channelOffset[1];
\r
3138 infos->buffers[0] = infos->buffers[1] = 0;
\r
3141 // prepare for callbacks
\r
3142 stream_.sampleRate = sampleRate;
\r
3143 stream_.device[mode] = device;
\r
3144 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3146 // store this class instance before registering callbacks, that are going to use it
\r
3147 asioCallbackInfo = &stream_.callbackInfo;
\r
3148 stream_.callbackInfo.object = (void *) this;
\r
3150 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3151 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3152 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3153 asioCallbacks.asioMessage = &asioMessages;
\r
3154 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3155 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3156 if ( result != ASE_OK ) {
\r
3157 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3158 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3159 // in that case, let's be naïve and try that instead
\r
3160 *bufferSize = preferSize;
\r
3161 stream_.bufferSize = *bufferSize;
\r
3162 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3165 if ( result != ASE_OK ) {
\r
3166 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3167 errorText_ = errorStream_.str();
\r
3170 buffersAllocated = true;
\r
3171 stream_.state = STREAM_STOPPED;
\r
3173 // Set flags for buffer conversion.
\r
3174 stream_.doConvertBuffer[mode] = false;
\r
3175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3176 stream_.doConvertBuffer[mode] = true;
\r
3177 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3178 stream_.nUserChannels[mode] > 1 )
\r
3179 stream_.doConvertBuffer[mode] = true;
\r
3181 // Allocate necessary internal buffers
\r
3182 unsigned long bufferBytes;
\r
3183 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3184 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3185 if ( stream_.userBuffer[mode] == NULL ) {
\r
3186 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3190 if ( stream_.doConvertBuffer[mode] ) {
\r
3192 bool makeBuffer = true;
\r
3193 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3194 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3195 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3196 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3199 if ( makeBuffer ) {
\r
3200 bufferBytes *= *bufferSize;
\r
3201 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3202 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3203 if ( stream_.deviceBuffer == NULL ) {
\r
3204 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3210 // Determine device latencies
\r
3211 long inputLatency, outputLatency;
\r
3212 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3213 if ( result != ASE_OK ) {
\r
3214 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3215 errorText_ = errorStream_.str();
\r
3216 error( RtAudioError::WARNING); // warn but don't fail
\r
3219 stream_.latency[0] = outputLatency;
\r
3220 stream_.latency[1] = inputLatency;
\r
3223 // Setup the buffer conversion information structure. We don't use
\r
3224 // buffers to do channel offsets, so we override that parameter
\r
3226 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3231 if ( !isDuplexInput ) {
\r
3232 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3233 // So we clean up for single channel only
\r
3235 if ( buffersAllocated )
\r
3236 ASIODisposeBuffers();
\r
3238 drivers.removeCurrentDriver();
\r
3241 CloseHandle( handle->condition );
\r
3242 if ( handle->bufferInfos )
\r
3243 free( handle->bufferInfos );
\r
3246 stream_.apiHandle = 0;
\r
3250 if ( stream_.userBuffer[mode] ) {
\r
3251 free( stream_.userBuffer[mode] );
\r
3252 stream_.userBuffer[mode] = 0;
\r
3255 if ( stream_.deviceBuffer ) {
\r
3256 free( stream_.deviceBuffer );
\r
3257 stream_.deviceBuffer = 0;
\r
3262 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3264 void RtApiAsio :: closeStream()
\r
3266 if ( stream_.state == STREAM_CLOSED ) {
\r
3267 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3268 error( RtAudioError::WARNING );
\r
3272 if ( stream_.state == STREAM_RUNNING ) {
\r
3273 stream_.state = STREAM_STOPPED;
\r
3276 ASIODisposeBuffers();
\r
3277 drivers.removeCurrentDriver();
\r
3279 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3281 CloseHandle( handle->condition );
\r
3282 if ( handle->bufferInfos )
\r
3283 free( handle->bufferInfos );
\r
3285 stream_.apiHandle = 0;
\r
3288 for ( int i=0; i<2; i++ ) {
\r
3289 if ( stream_.userBuffer[i] ) {
\r
3290 free( stream_.userBuffer[i] );
\r
3291 stream_.userBuffer[i] = 0;
\r
3295 if ( stream_.deviceBuffer ) {
\r
3296 free( stream_.deviceBuffer );
\r
3297 stream_.deviceBuffer = 0;
\r
3300 stream_.mode = UNINITIALIZED;
\r
3301 stream_.state = STREAM_CLOSED;
\r
3304 bool stopThreadCalled = false;
\r
3306 void RtApiAsio :: startStream()
\r
3309 if ( stream_.state == STREAM_RUNNING ) {
\r
3310 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3311 error( RtAudioError::WARNING );
\r
3315 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3316 ASIOError result = ASIOStart();
\r
3317 if ( result != ASE_OK ) {
\r
3318 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3319 errorText_ = errorStream_.str();
\r
3323 handle->drainCounter = 0;
\r
3324 handle->internalDrain = false;
\r
3325 ResetEvent( handle->condition );
\r
3326 stream_.state = STREAM_RUNNING;
\r
3330 stopThreadCalled = false;
\r
3332 if ( result == ASE_OK ) return;
\r
3333 error( RtAudioError::SYSTEM_ERROR );
\r
3336 void RtApiAsio :: stopStream()
\r
3339 if ( stream_.state == STREAM_STOPPED ) {
\r
3340 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3341 error( RtAudioError::WARNING );
\r
3345 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3347 if ( handle->drainCounter == 0 ) {
\r
3348 handle->drainCounter = 2;
\r
3349 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3353 stream_.state = STREAM_STOPPED;
\r
3355 ASIOError result = ASIOStop();
\r
3356 if ( result != ASE_OK ) {
\r
3357 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3358 errorText_ = errorStream_.str();
\r
3361 if ( result == ASE_OK ) return;
\r
3362 error( RtAudioError::SYSTEM_ERROR );
\r
3365 void RtApiAsio :: abortStream()
\r
3368 if ( stream_.state == STREAM_STOPPED ) {
\r
3369 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3370 error( RtAudioError::WARNING );
\r
3374 // The following lines were commented-out because some behavior was
\r
3375 // noted where the device buffers need to be zeroed to avoid
\r
3376 // continuing sound, even when the device buffers are completely
\r
3377 // disposed. So now, calling abort is the same as calling stop.
\r
3378 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3379 // handle->drainCounter = 2;
\r
3383 // This function will be called by a spawned thread when the user
\r
3384 // callback function signals that the stream should be stopped or
\r
3385 // aborted. It is necessary to handle it this way because the
\r
3386 // callbackEvent() function must return before the ASIOStop()
\r
3387 // function will return.
\r
3388 static unsigned __stdcall asioStopStream( void *ptr )
\r
3390 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3391 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3393 object->stopStream();
\r
3394 _endthreadex( 0 );
\r
3398 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3400 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3401 if ( stream_.state == STREAM_CLOSED ) {
\r
3402 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3403 error( RtAudioError::WARNING );
\r
3407 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3408 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3410 // Check if we were draining the stream and signal if finished.
\r
3411 if ( handle->drainCounter > 3 ) {
\r
3413 stream_.state = STREAM_STOPPING;
\r
3414 if ( handle->internalDrain == false )
\r
3415 SetEvent( handle->condition );
\r
3416 else { // spawn a thread to stop the stream
\r
3417 unsigned threadId;
\r
3418 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3419 &stream_.callbackInfo, 0, &threadId );
\r
3424 // Invoke user callback to get fresh output data UNLESS we are
\r
3425 // draining stream.
\r
3426 if ( handle->drainCounter == 0 ) {
\r
3427 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3428 double streamTime = getStreamTime();
\r
3429 RtAudioStreamStatus status = 0;
\r
3430 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3431 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3434 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3435 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3438 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3439 stream_.bufferSize, streamTime, status, info->userData );
\r
3440 if ( cbReturnValue == 2 ) {
\r
3441 stream_.state = STREAM_STOPPING;
\r
3442 handle->drainCounter = 2;
\r
3443 unsigned threadId;
\r
3444 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3445 &stream_.callbackInfo, 0, &threadId );
\r
3448 else if ( cbReturnValue == 1 ) {
\r
3449 handle->drainCounter = 1;
\r
3450 handle->internalDrain = true;
\r
3454 unsigned int nChannels, bufferBytes, i, j;
\r
3455 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3458 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3460 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3462 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3463 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3464 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3468 else if ( stream_.doConvertBuffer[0] ) {
\r
3470 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3471 if ( stream_.doByteSwap[0] )
\r
3472 byteSwapBuffer( stream_.deviceBuffer,
\r
3473 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3474 stream_.deviceFormat[0] );
\r
3476 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3477 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3478 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3479 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3485 if ( stream_.doByteSwap[0] )
\r
3486 byteSwapBuffer( stream_.userBuffer[0],
\r
3487 stream_.bufferSize * stream_.nUserChannels[0],
\r
3488 stream_.userFormat );
\r
3490 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3491 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3492 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3493 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3499 // Don't bother draining input
\r
3500 if ( handle->drainCounter ) {
\r
3501 handle->drainCounter++;
\r
3505 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3507 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3509 if (stream_.doConvertBuffer[1]) {
\r
3511 // Always interleave ASIO input data.
\r
3512 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3513 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3514 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3515 handle->bufferInfos[i].buffers[bufferIndex],
\r
3519 if ( stream_.doByteSwap[1] )
\r
3520 byteSwapBuffer( stream_.deviceBuffer,
\r
3521 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3522 stream_.deviceFormat[1] );
\r
3523 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3527 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3528 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3529 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3530 handle->bufferInfos[i].buffers[bufferIndex],
\r
3535 if ( stream_.doByteSwap[1] )
\r
3536 byteSwapBuffer( stream_.userBuffer[1],
\r
3537 stream_.bufferSize * stream_.nUserChannels[1],
\r
3538 stream_.userFormat );
\r
3543 // The following call was suggested by Malte Clasen. While the API
\r
3544 // documentation indicates it should not be required, some device
\r
3545 // drivers apparently do not function correctly without it.
\r
3546 ASIOOutputReady();
\r
3548 RtApi::tickStreamTime();
\r
3552 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3554 // The ASIO documentation says that this usually only happens during
\r
3555 // external sync. Audio processing is not stopped by the driver,
\r
3556 // actual sample rate might not have even changed, maybe only the
\r
3557 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3560 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3562 object->stopStream();
\r
3564 catch ( RtAudioError &exception ) {
\r
3565 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3569 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3572 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3576 switch( selector ) {
\r
3577 case kAsioSelectorSupported:
\r
3578 if ( value == kAsioResetRequest
\r
3579 || value == kAsioEngineVersion
\r
3580 || value == kAsioResyncRequest
\r
3581 || value == kAsioLatenciesChanged
\r
3582 // The following three were added for ASIO 2.0, you don't
\r
3583 // necessarily have to support them.
\r
3584 || value == kAsioSupportsTimeInfo
\r
3585 || value == kAsioSupportsTimeCode
\r
3586 || value == kAsioSupportsInputMonitor)
\r
3589 case kAsioResetRequest:
\r
3590 // Defer the task and perform the reset of the driver during the
\r
3591 // next "safe" situation. You cannot reset the driver right now,
\r
3592 // as this code is called from the driver. Reset the driver is
\r
3593 // done by completely destruct is. I.e. ASIOStop(),
\r
3594 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3596 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3599 case kAsioResyncRequest:
\r
3600 // This informs the application that the driver encountered some
\r
3601 // non-fatal data loss. It is used for synchronization purposes
\r
3602 // of different media. Added mainly to work around the Win16Mutex
\r
3603 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3604 // which could lose data because the Mutex was held too long by
\r
3605 // another thread. However a driver can issue it in other
\r
3606 // situations, too.
\r
3607 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3611 case kAsioLatenciesChanged:
\r
3612 // This will inform the host application that the drivers were
\r
3613 // latencies changed. Beware, it this does not mean that the
\r
3614 // buffer sizes have changed! You might need to update internal
\r
3616 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3619 case kAsioEngineVersion:
\r
3620 // Return the supported ASIO version of the host application. If
\r
3621 // a host application does not implement this selector, ASIO 1.0
\r
3622 // is assumed by the driver.
\r
3625 case kAsioSupportsTimeInfo:
\r
3626 // Informs the driver whether the
\r
3627 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3628 // For compatibility with ASIO 1.0 drivers the host application
\r
3629 // should always support the "old" bufferSwitch method, too.
\r
3632 case kAsioSupportsTimeCode:
\r
3633 // Informs the driver whether application is interested in time
\r
3634 // code info. If an application does not need to know about time
\r
3635 // code, the driver has less work to do.
\r
3642 static const char* getAsioErrorString( ASIOError result )
\r
3647 const char*message;
\r
3650 static const Messages m[] =
\r
3652 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3653 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3654 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3655 { ASE_InvalidMode, "Invalid mode." },
\r
3656 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3657 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3658 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3661 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3662 if ( m[i].value == result ) return m[i].message;
\r
3664 return "Unknown error.";
\r
3667 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3671 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3673 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3674 // - Introduces support for the Windows WASAPI API
\r
3675 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3676 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3677 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3682 #include <audioclient.h>
\r
3684 #include <mmdeviceapi.h>
\r
3685 #include <functiondiscoverykeys_devpkey.h>
\r
3687 //=============================================================================
\r
3689 #define SAFE_RELEASE( objectPtr )\
\r
3692 objectPtr->Release();\
\r
3693 objectPtr = NULL;\
\r
3696 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3698 //-----------------------------------------------------------------------------
\r
3700 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3701 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3702 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3703 // provide intermediate storage for read / write synchronization.
\r
3704 class WasapiBuffer
\r
3708 : buffer_( NULL ),
\r
3717 // sets the length of the internal ring buffer
\r
3718 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3721 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3723 bufferSize_ = bufferSize;
\r
3728 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3729 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3731 if ( !buffer || // incoming buffer is NULL
\r
3732 bufferSize == 0 || // incoming buffer has no data
\r
3733 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3738 unsigned int relOutIndex = outIndex_;
\r
3739 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3740 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3741 relOutIndex += bufferSize_;
\r
3744 // "in" index can end on the "out" index but cannot begin at it
\r
3745 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3746 return false; // not enough space between "in" index and "out" index
\r
3749 // copy buffer from external to internal
\r
3750 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3751 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3752 int fromInSize = bufferSize - fromZeroSize;
\r
3756 case RTAUDIO_SINT8:
\r
3757 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3758 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3760 case RTAUDIO_SINT16:
\r
3761 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3762 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3764 case RTAUDIO_SINT24:
\r
3765 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3766 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3768 case RTAUDIO_SINT32:
\r
3769 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3770 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3772 case RTAUDIO_FLOAT32:
\r
3773 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3774 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3776 case RTAUDIO_FLOAT64:
\r
3777 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3778 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3782 // update "in" index
\r
3783 inIndex_ += bufferSize;
\r
3784 inIndex_ %= bufferSize_;
\r
3789 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3790 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3792 if ( !buffer || // incoming buffer is NULL
\r
3793 bufferSize == 0 || // incoming buffer has no data
\r
3794 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3799 unsigned int relInIndex = inIndex_;
\r
3800 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3801 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3802 relInIndex += bufferSize_;
\r
3805 // "out" index can begin at and end on the "in" index
\r
3806 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3807 return false; // not enough space between "out" index and "in" index
\r
3810 // copy buffer from internal to external
\r
3811 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3812 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3813 int fromOutSize = bufferSize - fromZeroSize;
\r
3817 case RTAUDIO_SINT8:
\r
3818 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3819 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3821 case RTAUDIO_SINT16:
\r
3822 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3823 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3825 case RTAUDIO_SINT24:
\r
3826 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3827 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3829 case RTAUDIO_SINT32:
\r
3830 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3831 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3833 case RTAUDIO_FLOAT32:
\r
3834 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3835 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3837 case RTAUDIO_FLOAT64:
\r
3838 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3839 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3843 // update "out" index
\r
3844 outIndex_ += bufferSize;
\r
3845 outIndex_ %= bufferSize_;
\r
3852 unsigned int bufferSize_;
\r
3853 unsigned int inIndex_;
\r
3854 unsigned int outIndex_;
\r
3857 //-----------------------------------------------------------------------------
\r
3859 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3860 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3861 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3862 // This sample rate converter works best with conversions between one rate and its multiple.
\r
3863 void convertBufferWasapi( char* outBuffer,
\r
3864 const char* inBuffer,
\r
3865 const unsigned int& channelCount,
\r
3866 const unsigned int& inSampleRate,
\r
3867 const unsigned int& outSampleRate,
\r
3868 const unsigned int& inSampleCount,
\r
3869 unsigned int& outSampleCount,
\r
3870 const RtAudioFormat& format )
\r
3872 // calculate the new outSampleCount and relative sampleStep
\r
3873 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3874 float sampleRatioInv = ( float ) 1 / sampleRatio;
\r
3875 float sampleStep = 1.0f / sampleRatio;
\r
3876 float inSampleFraction = 0.0f;
\r
3878 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3880 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
\r
3881 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
\r
3883 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3884 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3886 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3890 case RTAUDIO_SINT8:
\r
3891 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3893 case RTAUDIO_SINT16:
\r
3894 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3896 case RTAUDIO_SINT24:
\r
3897 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3899 case RTAUDIO_SINT32:
\r
3900 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3902 case RTAUDIO_FLOAT32:
\r
3903 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3905 case RTAUDIO_FLOAT64:
\r
3906 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3910 // jump to next in sample
\r
3911 inSampleFraction += sampleStep;
\r
3914 else // else interpolate
\r
3916 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3917 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3919 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3920 float inSampleDec = inSampleFraction - inSample;
\r
3921 unsigned int frameInSample = inSample * channelCount;
\r
3922 unsigned int frameOutSample = outSample * channelCount;
\r
3926 case RTAUDIO_SINT8:
\r
3928 char* convInBuffer = ( char* ) inBuffer;
\r
3929 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3931 char fromSample = convInBuffer[ frameInSample + channel ];
\r
3932 char toSample = convInBuffer[ frameInSample + channelCount + channel ];
\r
3933 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
\r
3934 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3938 case RTAUDIO_SINT16:
\r
3940 short* convInBuffer = ( short* ) inBuffer;
\r
3941 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3943 short fromSample = convInBuffer[ frameInSample + channel ];
\r
3944 short toSample = convInBuffer[ frameInSample + channelCount + channel ];
\r
3945 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
\r
3946 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3950 case RTAUDIO_SINT24:
\r
3952 S24* convInBuffer = ( S24* ) inBuffer;
\r
3953 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3955 int fromSample = convInBuffer[ frameInSample + channel ].asInt();
\r
3956 int toSample = convInBuffer[ frameInSample + channelCount + channel ].asInt();
\r
3957 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3958 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3962 case RTAUDIO_SINT32:
\r
3964 int* convInBuffer = ( int* ) inBuffer;
\r
3965 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3967 int fromSample = convInBuffer[ frameInSample + channel ];
\r
3968 int toSample = convInBuffer[ frameInSample + channelCount + channel ];
\r
3969 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3970 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3974 case RTAUDIO_FLOAT32:
\r
3976 float* convInBuffer = ( float* ) inBuffer;
\r
3977 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3979 float fromSample = convInBuffer[ frameInSample + channel ];
\r
3980 float toSample = convInBuffer[ frameInSample + channelCount + channel ];
\r
3981 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3982 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3986 case RTAUDIO_FLOAT64:
\r
3988 double* convInBuffer = ( double* ) inBuffer;
\r
3989 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3991 double fromSample = convInBuffer[ frameInSample + channel ];
\r
3992 double toSample = convInBuffer[ frameInSample + channelCount + channel ];
\r
3993 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3994 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
4000 // jump to next in sample
\r
4001 inSampleFraction += sampleStep;
\r
4006 //-----------------------------------------------------------------------------
\r
4008 // A structure to hold various information related to the WASAPI implementation.
\r
4009 struct WasapiHandle
\r
4011 IAudioClient* captureAudioClient;
\r
4012 IAudioClient* renderAudioClient;
\r
4013 IAudioCaptureClient* captureClient;
\r
4014 IAudioRenderClient* renderClient;
\r
4015 HANDLE captureEvent;
\r
4016 HANDLE renderEvent;
\r
4019 : captureAudioClient( NULL ),
\r
4020 renderAudioClient( NULL ),
\r
4021 captureClient( NULL ),
\r
4022 renderClient( NULL ),
\r
4023 captureEvent( NULL ),
\r
4024 renderEvent( NULL ) {}
\r
4027 //=============================================================================
\r
4029 RtApiWasapi::RtApiWasapi()
\r
4030 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
4032 // WASAPI can run either apartment or multi-threaded
\r
4033 HRESULT hr = CoInitialize( NULL );
\r
4034 if ( !FAILED( hr ) )
\r
4035 coInitialized_ = true;
\r
4037 // Instantiate device enumerator
\r
4038 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
4039 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
4040 ( void** ) &deviceEnumerator_ );
\r
4042 if ( FAILED( hr ) ) {
\r
4043 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
4044 error( RtAudioError::DRIVER_ERROR );
\r
4048 //-----------------------------------------------------------------------------
\r
4050 RtApiWasapi::~RtApiWasapi()
\r
4052 if ( stream_.state != STREAM_CLOSED )
\r
4055 SAFE_RELEASE( deviceEnumerator_ );
\r
4057 // If this object previously called CoInitialize()
\r
4058 if ( coInitialized_ )
\r
4062 //=============================================================================
\r
4064 unsigned int RtApiWasapi::getDeviceCount( void )
\r
4066 unsigned int captureDeviceCount = 0;
\r
4067 unsigned int renderDeviceCount = 0;
\r
4069 IMMDeviceCollection* captureDevices = NULL;
\r
4070 IMMDeviceCollection* renderDevices = NULL;
\r
4072 // Count capture devices
\r
4073 errorText_.clear();
\r
4074 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4075 if ( FAILED( hr ) ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4080 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4081 if ( FAILED( hr ) ) {
\r
4082 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4086 // Count render devices
\r
4087 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4088 if ( FAILED( hr ) ) {
\r
4089 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4093 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4094 if ( FAILED( hr ) ) {
\r
4095 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4100 // release all references
\r
4101 SAFE_RELEASE( captureDevices );
\r
4102 SAFE_RELEASE( renderDevices );
\r
4104 if ( errorText_.empty() )
\r
4105 return captureDeviceCount + renderDeviceCount;
\r
4107 error( RtAudioError::DRIVER_ERROR );
\r
4111 //-----------------------------------------------------------------------------
\r
4113 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4115 RtAudio::DeviceInfo info;
\r
4116 unsigned int captureDeviceCount = 0;
\r
4117 unsigned int renderDeviceCount = 0;
\r
4118 std::string defaultDeviceName;
\r
4119 bool isCaptureDevice = false;
\r
4121 PROPVARIANT deviceNameProp;
\r
4122 PROPVARIANT defaultDeviceNameProp;
\r
4124 IMMDeviceCollection* captureDevices = NULL;
\r
4125 IMMDeviceCollection* renderDevices = NULL;
\r
4126 IMMDevice* devicePtr = NULL;
\r
4127 IMMDevice* defaultDevicePtr = NULL;
\r
4128 IAudioClient* audioClient = NULL;
\r
4129 IPropertyStore* devicePropStore = NULL;
\r
4130 IPropertyStore* defaultDevicePropStore = NULL;
\r
4132 WAVEFORMATEX* deviceFormat = NULL;
\r
4133 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4136 info.probed = false;
\r
4138 // Count capture devices
\r
4139 errorText_.clear();
\r
4140 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4141 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4142 if ( FAILED( hr ) ) {
\r
4143 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4147 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4148 if ( FAILED( hr ) ) {
\r
4149 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4153 // Count render devices
\r
4154 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4155 if ( FAILED( hr ) ) {
\r
4156 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4160 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4161 if ( FAILED( hr ) ) {
\r
4162 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4166 // validate device index
\r
4167 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4168 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4169 errorType = RtAudioError::INVALID_USE;
\r
4173 // determine whether index falls within capture or render devices
\r
4174 if ( device >= renderDeviceCount ) {
\r
4175 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4176 if ( FAILED( hr ) ) {
\r
4177 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4180 isCaptureDevice = true;
\r
4183 hr = renderDevices->Item( device, &devicePtr );
\r
4184 if ( FAILED( hr ) ) {
\r
4185 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4188 isCaptureDevice = false;
\r
4191 // get default device name
\r
4192 if ( isCaptureDevice ) {
\r
4193 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4194 if ( FAILED( hr ) ) {
\r
4195 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4200 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4201 if ( FAILED( hr ) ) {
\r
4202 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4207 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4208 if ( FAILED( hr ) ) {
\r
4209 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4212 PropVariantInit( &defaultDeviceNameProp );
\r
4214 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4215 if ( FAILED( hr ) ) {
\r
4216 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4220 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4223 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4224 if ( FAILED( hr ) ) {
\r
4225 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4229 PropVariantInit( &deviceNameProp );
\r
4231 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4232 if ( FAILED( hr ) ) {
\r
4233 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4237 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4240 if ( isCaptureDevice ) {
\r
4241 info.isDefaultInput = info.name == defaultDeviceName;
\r
4242 info.isDefaultOutput = false;
\r
4245 info.isDefaultInput = false;
\r
4246 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4250 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4251 if ( FAILED( hr ) ) {
\r
4252 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4256 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4257 if ( FAILED( hr ) ) {
\r
4258 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4262 if ( isCaptureDevice ) {
\r
4263 info.inputChannels = deviceFormat->nChannels;
\r
4264 info.outputChannels = 0;
\r
4265 info.duplexChannels = 0;
\r
4268 info.inputChannels = 0;
\r
4269 info.outputChannels = deviceFormat->nChannels;
\r
4270 info.duplexChannels = 0;
\r
4274 info.sampleRates.clear();
\r
4276 // allow support for all sample rates as we have a built-in sample rate converter
\r
4277 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4278 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4280 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4283 info.nativeFormats = 0;
\r
4285 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4286 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4287 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4289 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4290 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4292 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4293 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4296 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4297 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4298 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4300 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4301 info.nativeFormats |= RTAUDIO_SINT8;
\r
4303 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4304 info.nativeFormats |= RTAUDIO_SINT16;
\r
4306 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4307 info.nativeFormats |= RTAUDIO_SINT24;
\r
4309 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4310 info.nativeFormats |= RTAUDIO_SINT32;
\r
4315 info.probed = true;
\r
4318 // release all references
\r
4319 PropVariantClear( &deviceNameProp );
\r
4320 PropVariantClear( &defaultDeviceNameProp );
\r
4322 SAFE_RELEASE( captureDevices );
\r
4323 SAFE_RELEASE( renderDevices );
\r
4324 SAFE_RELEASE( devicePtr );
\r
4325 SAFE_RELEASE( defaultDevicePtr );
\r
4326 SAFE_RELEASE( audioClient );
\r
4327 SAFE_RELEASE( devicePropStore );
\r
4328 SAFE_RELEASE( defaultDevicePropStore );
\r
4330 CoTaskMemFree( deviceFormat );
\r
4331 CoTaskMemFree( closestMatchFormat );
\r
4333 if ( !errorText_.empty() )
\r
4334 error( errorType );
\r
4338 //-----------------------------------------------------------------------------
\r
4340 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4342 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4343 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4351 //-----------------------------------------------------------------------------
\r
4353 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4355 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4356 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4364 //-----------------------------------------------------------------------------
\r
4366 void RtApiWasapi::closeStream( void )
\r
4368 if ( stream_.state == STREAM_CLOSED ) {
\r
4369 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4370 error( RtAudioError::WARNING );
\r
4374 if ( stream_.state != STREAM_STOPPED )
\r
4377 // clean up stream memory
\r
4378 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4379 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4381 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4382 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4384 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4385 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4387 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4388 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4390 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4391 stream_.apiHandle = NULL;
\r
4393 for ( int i = 0; i < 2; i++ ) {
\r
4394 if ( stream_.userBuffer[i] ) {
\r
4395 free( stream_.userBuffer[i] );
\r
4396 stream_.userBuffer[i] = 0;
\r
4400 if ( stream_.deviceBuffer ) {
\r
4401 free( stream_.deviceBuffer );
\r
4402 stream_.deviceBuffer = 0;
\r
4405 // update stream state
\r
4406 stream_.state = STREAM_CLOSED;
\r
4409 //-----------------------------------------------------------------------------
\r
4411 void RtApiWasapi::startStream( void )
\r
4415 if ( stream_.state == STREAM_RUNNING ) {
\r
4416 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4417 error( RtAudioError::WARNING );
\r
4421 // update stream state
\r
4422 stream_.state = STREAM_RUNNING;
\r
4424 // create WASAPI stream thread
\r
4425 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4427 if ( !stream_.callbackInfo.thread ) {
\r
4428 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4429 error( RtAudioError::THREAD_ERROR );
\r
4432 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4433 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4437 //-----------------------------------------------------------------------------
\r
4439 void RtApiWasapi::stopStream( void )
\r
4443 if ( stream_.state == STREAM_STOPPED ) {
\r
4444 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4445 error( RtAudioError::WARNING );
\r
4449 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4450 stream_.state = STREAM_STOPPING;
\r
4452 // wait until stream thread is stopped
\r
4453 while( stream_.state != STREAM_STOPPED ) {
\r
4457 // Wait for the last buffer to play before stopping.
\r
4458 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4460 // stop capture client if applicable
\r
4461 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4462 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4463 if ( FAILED( hr ) ) {
\r
4464 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4465 error( RtAudioError::DRIVER_ERROR );
\r
4470 // stop render client if applicable
\r
4471 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4472 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4473 if ( FAILED( hr ) ) {
\r
4474 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4475 error( RtAudioError::DRIVER_ERROR );
\r
4480 // close thread handle
\r
4481 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4482 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4483 error( RtAudioError::THREAD_ERROR );
\r
4487 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4490 //-----------------------------------------------------------------------------
\r
4492 void RtApiWasapi::abortStream( void )
\r
4496 if ( stream_.state == STREAM_STOPPED ) {
\r
4497 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4498 error( RtAudioError::WARNING );
\r
4502 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4503 stream_.state = STREAM_STOPPING;
\r
4505 // wait until stream thread is stopped
\r
4506 while ( stream_.state != STREAM_STOPPED ) {
\r
4510 // stop capture client if applicable
\r
4511 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4512 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4513 if ( FAILED( hr ) ) {
\r
4514 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4515 error( RtAudioError::DRIVER_ERROR );
\r
4520 // stop render client if applicable
\r
4521 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4522 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4523 if ( FAILED( hr ) ) {
\r
4524 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4525 error( RtAudioError::DRIVER_ERROR );
\r
4530 // close thread handle
\r
4531 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4532 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4533 error( RtAudioError::THREAD_ERROR );
\r
4537 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4540 //-----------------------------------------------------------------------------
\r
4542 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4543 unsigned int firstChannel, unsigned int sampleRate,
\r
4544 RtAudioFormat format, unsigned int* bufferSize,
\r
4545 RtAudio::StreamOptions* options )
\r
4547 bool methodResult = FAILURE;
\r
4548 unsigned int captureDeviceCount = 0;
\r
4549 unsigned int renderDeviceCount = 0;
\r
4551 IMMDeviceCollection* captureDevices = NULL;
\r
4552 IMMDeviceCollection* renderDevices = NULL;
\r
4553 IMMDevice* devicePtr = NULL;
\r
4554 WAVEFORMATEX* deviceFormat = NULL;
\r
4555 unsigned int bufferBytes;
\r
4556 stream_.state = STREAM_STOPPED;
\r
4558 // create API Handle if not already created
\r
4559 if ( !stream_.apiHandle )
\r
4560 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4562 // Count capture devices
\r
4563 errorText_.clear();
\r
4564 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4565 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4566 if ( FAILED( hr ) ) {
\r
4567 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4571 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4572 if ( FAILED( hr ) ) {
\r
4573 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4577 // Count render devices
\r
4578 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4579 if ( FAILED( hr ) ) {
\r
4580 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4584 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4585 if ( FAILED( hr ) ) {
\r
4586 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4590 // validate device index
\r
4591 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4592 errorType = RtAudioError::INVALID_USE;
\r
4593 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4597 // determine whether index falls within capture or render devices
\r
4598 if ( device >= renderDeviceCount ) {
\r
4599 if ( mode != INPUT ) {
\r
4600 errorType = RtAudioError::INVALID_USE;
\r
4601 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4605 // retrieve captureAudioClient from devicePtr
\r
4606 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4608 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4609 if ( FAILED( hr ) ) {
\r
4610 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4614 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4615 NULL, ( void** ) &captureAudioClient );
\r
4616 if ( FAILED( hr ) ) {
\r
4617 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4621 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4622 if ( FAILED( hr ) ) {
\r
4623 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4627 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4628 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4631 if ( mode != OUTPUT ) {
\r
4632 errorType = RtAudioError::INVALID_USE;
\r
4633 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4637 // retrieve renderAudioClient from devicePtr
\r
4638 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4640 hr = renderDevices->Item( device, &devicePtr );
\r
4641 if ( FAILED( hr ) ) {
\r
4642 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4646 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4647 NULL, ( void** ) &renderAudioClient );
\r
4648 if ( FAILED( hr ) ) {
\r
4649 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4653 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4654 if ( FAILED( hr ) ) {
\r
4655 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4659 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4660 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4663 // fill stream data
\r
4664 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4665 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4666 stream_.mode = DUPLEX;
\r
4669 stream_.mode = mode;
\r
4672 stream_.device[mode] = device;
\r
4673 stream_.doByteSwap[mode] = false;
\r
4674 stream_.sampleRate = sampleRate;
\r
4675 stream_.bufferSize = *bufferSize;
\r
4676 stream_.nBuffers = 1;
\r
4677 stream_.nUserChannels[mode] = channels;
\r
4678 stream_.channelOffset[mode] = firstChannel;
\r
4679 stream_.userFormat = format;
\r
4680 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4682 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4683 stream_.userInterleaved = false;
\r
4685 stream_.userInterleaved = true;
\r
4686 stream_.deviceInterleaved[mode] = true;
\r
4688 // Set flags for buffer conversion.
\r
4689 stream_.doConvertBuffer[mode] = false;
\r
4690 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4691 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4692 stream_.doConvertBuffer[mode] = true;
\r
4693 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4694 stream_.nUserChannels[mode] > 1 )
\r
4695 stream_.doConvertBuffer[mode] = true;
\r
4697 if ( stream_.doConvertBuffer[mode] )
\r
4698 setConvertInfo( mode, 0 );
\r
4700 // Allocate necessary internal buffers
\r
4701 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4703 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4704 if ( !stream_.userBuffer[mode] ) {
\r
4705 errorType = RtAudioError::MEMORY_ERROR;
\r
4706 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4710 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4711 stream_.callbackInfo.priority = 15;
\r
4713 stream_.callbackInfo.priority = 0;
\r
4715 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4716 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4718 methodResult = SUCCESS;
\r
4722 SAFE_RELEASE( captureDevices );
\r
4723 SAFE_RELEASE( renderDevices );
\r
4724 SAFE_RELEASE( devicePtr );
\r
4725 CoTaskMemFree( deviceFormat );
\r
4727 // if method failed, close the stream
\r
4728 if ( methodResult == FAILURE )
\r
4731 if ( !errorText_.empty() )
\r
4732 error( errorType );
\r
4733 return methodResult;
\r
4736 //=============================================================================
\r
4738 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4741 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4746 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4749 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4754 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4757 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4762 //-----------------------------------------------------------------------------
\r
4764 void RtApiWasapi::wasapiThread()
\r
4766 // as this is a new thread, we must CoInitialize it
\r
4767 CoInitialize( NULL );
\r
4771 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4772 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4773 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4774 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4775 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4776 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4778 WAVEFORMATEX* captureFormat = NULL;
\r
4779 WAVEFORMATEX* renderFormat = NULL;
\r
4780 float captureSrRatio = 0.0f;
\r
4781 float renderSrRatio = 0.0f;
\r
4782 WasapiBuffer captureBuffer;
\r
4783 WasapiBuffer renderBuffer;
\r
4785 // declare local stream variables
\r
4786 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4787 BYTE* streamBuffer = NULL;
\r
4788 unsigned long captureFlags = 0;
\r
4789 unsigned int bufferFrameCount = 0;
\r
4790 unsigned int numFramesPadding = 0;
\r
4791 unsigned int convBufferSize = 0;
\r
4792 bool callbackPushed = false;
\r
4793 bool callbackPulled = false;
\r
4794 bool callbackStopped = false;
\r
4795 int callbackResult = 0;
\r
4797 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4798 char* convBuffer = NULL;
\r
4799 unsigned int convBuffSize = 0;
\r
4800 unsigned int deviceBuffSize = 0;
\r
4802 errorText_.clear();
\r
4803 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4805 // Attempt to assign "Pro Audio" characteristic to thread
\r
4806 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4808 DWORD taskIndex = 0;
\r
4809 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4810 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4811 FreeLibrary( AvrtDll );
\r
4814 // start capture stream if applicable
\r
4815 if ( captureAudioClient ) {
\r
4816 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4817 if ( FAILED( hr ) ) {
\r
4818 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4822 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4824 // initialize capture stream according to desire buffer size
\r
4825 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4826 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4828 if ( !captureClient ) {
\r
4829 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4830 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4831 desiredBufferPeriod,
\r
4832 desiredBufferPeriod,
\r
4835 if ( FAILED( hr ) ) {
\r
4836 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4840 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4841 ( void** ) &captureClient );
\r
4842 if ( FAILED( hr ) ) {
\r
4843 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4847 // configure captureEvent to trigger on every available capture buffer
\r
4848 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4849 if ( !captureEvent ) {
\r
4850 errorType = RtAudioError::SYSTEM_ERROR;
\r
4851 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4855 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4856 if ( FAILED( hr ) ) {
\r
4857 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4861 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4862 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4865 unsigned int inBufferSize = 0;
\r
4866 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4867 if ( FAILED( hr ) ) {
\r
4868 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4872 // scale outBufferSize according to stream->user sample rate ratio
\r
4873 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4874 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4876 // set captureBuffer size
\r
4877 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4879 // reset the capture stream
\r
4880 hr = captureAudioClient->Reset();
\r
4881 if ( FAILED( hr ) ) {
\r
4882 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4886 // start the capture stream
\r
4887 hr = captureAudioClient->Start();
\r
4888 if ( FAILED( hr ) ) {
\r
4889 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4894 // start render stream if applicable
\r
4895 if ( renderAudioClient ) {
\r
4896 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4897 if ( FAILED( hr ) ) {
\r
4898 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4902 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4904 // initialize render stream according to desire buffer size
\r
4905 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4906 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4908 if ( !renderClient ) {
\r
4909 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4910 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4911 desiredBufferPeriod,
\r
4912 desiredBufferPeriod,
\r
4915 if ( FAILED( hr ) ) {
\r
4916 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4920 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4921 ( void** ) &renderClient );
\r
4922 if ( FAILED( hr ) ) {
\r
4923 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4927 // configure renderEvent to trigger on every available render buffer
\r
4928 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4929 if ( !renderEvent ) {
\r
4930 errorType = RtAudioError::SYSTEM_ERROR;
\r
4931 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4935 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4936 if ( FAILED( hr ) ) {
\r
4937 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4941 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4942 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4945 unsigned int outBufferSize = 0;
\r
4946 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4947 if ( FAILED( hr ) ) {
\r
4948 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4952 // scale inBufferSize according to user->stream sample rate ratio
\r
4953 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4954 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4956 // set renderBuffer size
\r
4957 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4959 // reset the render stream
\r
4960 hr = renderAudioClient->Reset();
\r
4961 if ( FAILED( hr ) ) {
\r
4962 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4966 // start the render stream
\r
4967 hr = renderAudioClient->Start();
\r
4968 if ( FAILED( hr ) ) {
\r
4969 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4974 if ( stream_.mode == INPUT ) {
\r
4975 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4976 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4978 else if ( stream_.mode == OUTPUT ) {
\r
4979 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4980 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4982 else if ( stream_.mode == DUPLEX ) {
\r
4983 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4984 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4985 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4986 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4989 convBuffer = ( char* ) malloc( convBuffSize );
\r
4990 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4991 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4992 errorType = RtAudioError::MEMORY_ERROR;
\r
4993 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4997 // stream process loop
\r
4998 while ( stream_.state != STREAM_STOPPING ) {
\r
4999 if ( !callbackPulled ) {
\r
5002 // 1. Pull callback buffer from inputBuffer
\r
5003 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
5004 // Convert callback buffer to user format
\r
5006 if ( captureAudioClient ) {
\r
5007 // Pull callback buffer from inputBuffer
\r
5008 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
5009 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
5010 stream_.deviceFormat[INPUT] );
\r
5012 if ( callbackPulled ) {
\r
5013 // Convert callback buffer to user sample rate
\r
5014 convertBufferWasapi( stream_.deviceBuffer,
\r
5016 stream_.nDeviceChannels[INPUT],
\r
5017 captureFormat->nSamplesPerSec,
\r
5018 stream_.sampleRate,
\r
5019 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
5021 stream_.deviceFormat[INPUT] );
\r
5023 if ( stream_.doConvertBuffer[INPUT] ) {
\r
5024 // Convert callback buffer to user format
\r
5025 convertBuffer( stream_.userBuffer[INPUT],
\r
5026 stream_.deviceBuffer,
\r
5027 stream_.convertInfo[INPUT] );
\r
5030 // no further conversion, simple copy deviceBuffer to userBuffer
\r
5031 memcpy( stream_.userBuffer[INPUT],
\r
5032 stream_.deviceBuffer,
\r
5033 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
5038 // if there is no capture stream, set callbackPulled flag
\r
5039 callbackPulled = true;
\r
5042 // Execute Callback
\r
5043 // ================
\r
5044 // 1. Execute user callback method
\r
5045 // 2. Handle return value from callback
\r
5047 // if callback has not requested the stream to stop
\r
5048 if ( callbackPulled && !callbackStopped ) {
\r
5049 // Execute user callback method
\r
5050 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
5051 stream_.userBuffer[INPUT],
\r
5052 stream_.bufferSize,
\r
5054 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
5055 stream_.callbackInfo.userData );
\r
5057 // Handle return value from callback
\r
5058 if ( callbackResult == 1 ) {
\r
5059 // instantiate a thread to stop this thread
\r
5060 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
5061 if ( !threadHandle ) {
\r
5062 errorType = RtAudioError::THREAD_ERROR;
\r
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
5066 else if ( !CloseHandle( threadHandle ) ) {
\r
5067 errorType = RtAudioError::THREAD_ERROR;
\r
5068 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
5072 callbackStopped = true;
\r
5074 else if ( callbackResult == 2 ) {
\r
5075 // instantiate a thread to stop this thread
\r
5076 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
5077 if ( !threadHandle ) {
\r
5078 errorType = RtAudioError::THREAD_ERROR;
\r
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5082 else if ( !CloseHandle( threadHandle ) ) {
\r
5083 errorType = RtAudioError::THREAD_ERROR;
\r
5084 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5088 callbackStopped = true;
\r
5093 // Callback Output
\r
5094 // ===============
\r
5095 // 1. Convert callback buffer to stream format
\r
5096 // 2. Convert callback buffer to stream sample rate and channel count
\r
5097 // 3. Push callback buffer into outputBuffer
\r
5099 if ( renderAudioClient && callbackPulled ) {
\r
5100 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5101 // Convert callback buffer to stream format
\r
5102 convertBuffer( stream_.deviceBuffer,
\r
5103 stream_.userBuffer[OUTPUT],
\r
5104 stream_.convertInfo[OUTPUT] );
\r
5108 // Convert callback buffer to stream sample rate
\r
5109 convertBufferWasapi( convBuffer,
\r
5110 stream_.deviceBuffer,
\r
5111 stream_.nDeviceChannels[OUTPUT],
\r
5112 stream_.sampleRate,
\r
5113 renderFormat->nSamplesPerSec,
\r
5114 stream_.bufferSize,
\r
5116 stream_.deviceFormat[OUTPUT] );
\r
5118 // Push callback buffer into outputBuffer
\r
5119 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5120 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5121 stream_.deviceFormat[OUTPUT] );
\r
5124 // if there is no render stream, set callbackPushed flag
\r
5125 callbackPushed = true;
\r
5130 // 1. Get capture buffer from stream
\r
5131 // 2. Push capture buffer into inputBuffer
\r
5132 // 3. If 2. was successful: Release capture buffer
\r
5134 if ( captureAudioClient ) {
\r
5135 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5136 if ( !callbackPulled ) {
\r
5137 WaitForSingleObject( captureEvent, INFINITE );
\r
5140 // Get capture buffer from stream
\r
5141 hr = captureClient->GetBuffer( &streamBuffer,
\r
5142 &bufferFrameCount,
\r
5143 &captureFlags, NULL, NULL );
\r
5144 if ( FAILED( hr ) ) {
\r
5145 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5149 if ( bufferFrameCount != 0 ) {
\r
5150 // Push capture buffer into inputBuffer
\r
5151 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5152 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5153 stream_.deviceFormat[INPUT] ) )
\r
5155 // Release capture buffer
\r
5156 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5157 if ( FAILED( hr ) ) {
\r
5158 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5164 // Inform WASAPI that capture was unsuccessful
\r
5165 hr = captureClient->ReleaseBuffer( 0 );
\r
5166 if ( FAILED( hr ) ) {
\r
5167 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5174 // Inform WASAPI that capture was unsuccessful
\r
5175 hr = captureClient->ReleaseBuffer( 0 );
\r
5176 if ( FAILED( hr ) ) {
\r
5177 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5185 // 1. Get render buffer from stream
\r
5186 // 2. Pull next buffer from outputBuffer
\r
5187 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5188 // Release render buffer
\r
5190 if ( renderAudioClient ) {
\r
5191 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5192 if ( callbackPulled && !callbackPushed ) {
\r
5193 WaitForSingleObject( renderEvent, INFINITE );
\r
5196 // Get render buffer from stream
\r
5197 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5198 if ( FAILED( hr ) ) {
\r
5199 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5203 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5204 if ( FAILED( hr ) ) {
\r
5205 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5209 bufferFrameCount -= numFramesPadding;
\r
5211 if ( bufferFrameCount != 0 ) {
\r
5212 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5213 if ( FAILED( hr ) ) {
\r
5214 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5218 // Pull next buffer from outputBuffer
\r
5219 // Fill render buffer with next buffer
\r
5220 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5221 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5222 stream_.deviceFormat[OUTPUT] ) )
\r
5224 // Release render buffer
\r
5225 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5226 if ( FAILED( hr ) ) {
\r
5227 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5233 // Inform WASAPI that render was unsuccessful
\r
5234 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5235 if ( FAILED( hr ) ) {
\r
5236 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5243 // Inform WASAPI that render was unsuccessful
\r
5244 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5245 if ( FAILED( hr ) ) {
\r
5246 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5252 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5253 if ( callbackPushed ) {
\r
5254 callbackPulled = false;
\r
5255 // tick stream time
\r
5256 RtApi::tickStreamTime();
\r
5263 CoTaskMemFree( captureFormat );
\r
5264 CoTaskMemFree( renderFormat );
\r
5266 free ( convBuffer );
\r
5270 // update stream state
\r
5271 stream_.state = STREAM_STOPPED;
\r
5273 if ( errorText_.empty() )
\r
5276 error( errorType );
\r
5279 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5283 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5285 // Modified by Robin Davies, October 2005
\r
5286 // - Improvements to DirectX pointer chasing.
\r
5287 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5288 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5289 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5290 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5292 #include <dsound.h>
\r
5293 #include <assert.h>
\r
5294 #include <algorithm>
\r
5296 #if defined(__MINGW32__)
\r
5297 // missing from latest mingw winapi
\r
5298 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5299 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5300 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5301 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5304 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5306 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5307 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5310 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5312 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5313 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5314 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5315 return pointer >= earlierPointer && pointer < laterPointer;
\r
5318 // A structure to hold various information related to the DirectSound
\r
5319 // API implementation.
\r
5321 unsigned int drainCounter; // Tracks callback counts when draining
\r
5322 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5326 UINT bufferPointer[2];
\r
5327 DWORD dsBufferSize[2];
\r
5328 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5332 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5335 // Declarations for utility functions, callbacks, and structures
\r
5336 // specific to the DirectSound implementation.
\r
5337 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5338 LPCTSTR description,
\r
5340 LPVOID lpContext );
\r
5342 static const char* getErrorString( int code );
\r
5344 static unsigned __stdcall callbackHandler( void *ptr );
\r
5353 : found(false) { validId[0] = false; validId[1] = false; }
\r
5356 struct DsProbeData {
\r
5358 std::vector<struct DsDevice>* dsDevices;
\r
5361 RtApiDs :: RtApiDs()
\r
5363 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5364 // accept whatever the mainline chose for a threading model.
\r
5365 coInitialized_ = false;
\r
5366 HRESULT hr = CoInitialize( NULL );
\r
5367 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5370 RtApiDs :: ~RtApiDs()
\r
5372 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5373 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5376 // The DirectSound default output is always the first device.
\r
5377 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5382 // The DirectSound default input is always the first input device,
\r
5383 // which is the first capture device enumerated.
\r
5384 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5389 unsigned int RtApiDs :: getDeviceCount( void )
\r
5391 // Set query flag for previously found devices to false, so that we
\r
5392 // can check for any devices that have disappeared.
\r
5393 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5394 dsDevices[i].found = false;
\r
5396 // Query DirectSound devices.
\r
5397 struct DsProbeData probeInfo;
\r
5398 probeInfo.isInput = false;
\r
5399 probeInfo.dsDevices = &dsDevices;
\r
5400 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5401 if ( FAILED( result ) ) {
\r
5402 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5403 errorText_ = errorStream_.str();
\r
5404 error( RtAudioError::WARNING );
\r
5407 // Query DirectSoundCapture devices.
\r
5408 probeInfo.isInput = true;
\r
5409 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5410 if ( FAILED( result ) ) {
\r
5411 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5412 errorText_ = errorStream_.str();
\r
5413 error( RtAudioError::WARNING );
\r
5416 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5417 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5418 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5422 return static_cast<unsigned int>(dsDevices.size());
\r
5425 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5427 RtAudio::DeviceInfo info;
\r
5428 info.probed = false;
\r
5430 if ( dsDevices.size() == 0 ) {
\r
5431 // Force a query of all devices
\r
5433 if ( dsDevices.size() == 0 ) {
\r
5434 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5435 error( RtAudioError::INVALID_USE );
\r
5440 if ( device >= dsDevices.size() ) {
\r
5441 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5442 error( RtAudioError::INVALID_USE );
\r
5447 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5449 LPDIRECTSOUND output;
\r
5451 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5452 if ( FAILED( result ) ) {
\r
5453 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5454 errorText_ = errorStream_.str();
\r
5455 error( RtAudioError::WARNING );
\r
5459 outCaps.dwSize = sizeof( outCaps );
\r
5460 result = output->GetCaps( &outCaps );
\r
5461 if ( FAILED( result ) ) {
\r
5462 output->Release();
\r
5463 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5464 errorText_ = errorStream_.str();
\r
5465 error( RtAudioError::WARNING );
\r
5469 // Get output channel information.
\r
5470 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5472 // Get sample rate information.
\r
5473 info.sampleRates.clear();
\r
5474 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5475 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5476 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5477 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5479 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5480 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5484 // Get format information.
\r
5485 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5486 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5488 output->Release();
\r
5490 if ( getDefaultOutputDevice() == device )
\r
5491 info.isDefaultOutput = true;
\r
5493 if ( dsDevices[ device ].validId[1] == false ) {
\r
5494 info.name = dsDevices[ device ].name;
\r
5495 info.probed = true;
\r
5501 LPDIRECTSOUNDCAPTURE input;
\r
5502 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5503 if ( FAILED( result ) ) {
\r
5504 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5505 errorText_ = errorStream_.str();
\r
5506 error( RtAudioError::WARNING );
\r
5511 inCaps.dwSize = sizeof( inCaps );
\r
5512 result = input->GetCaps( &inCaps );
\r
5513 if ( FAILED( result ) ) {
\r
5515 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5516 errorText_ = errorStream_.str();
\r
5517 error( RtAudioError::WARNING );
\r
5521 // Get input channel information.
\r
5522 info.inputChannels = inCaps.dwChannels;
\r
5524 // Get sample rate and format information.
\r
5525 std::vector<unsigned int> rates;
\r
5526 if ( inCaps.dwChannels >= 2 ) {
\r
5527 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5528 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5529 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5530 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5531 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5532 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5533 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5534 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5536 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5537 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5538 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5539 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5540 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5542 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5543 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5544 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5545 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5546 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5549 else if ( inCaps.dwChannels == 1 ) {
\r
5550 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5551 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5552 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5553 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5554 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5555 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5556 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5557 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5559 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5560 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5561 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5562 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5563 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5565 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5566 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5567 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5568 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5569 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5572 else info.inputChannels = 0; // technically, this would be an error
\r
5576 if ( info.inputChannels == 0 ) return info;
\r
5578 // Copy the supported rates to the info structure but avoid duplication.
\r
5580 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5582 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5583 if ( rates[i] == info.sampleRates[j] ) {
\r
5588 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5590 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5592 // If device opens for both playback and capture, we determine the channels.
\r
5593 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5594 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5596 if ( device == 0 ) info.isDefaultInput = true;
\r
5598 // Copy name and return.
\r
5599 info.name = dsDevices[ device ].name;
\r
5600 info.probed = true;
\r
5604 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5605 unsigned int firstChannel, unsigned int sampleRate,
\r
5606 RtAudioFormat format, unsigned int *bufferSize,
\r
5607 RtAudio::StreamOptions *options )
\r
5609 if ( channels + firstChannel > 2 ) {
\r
5610 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5614 size_t nDevices = dsDevices.size();
\r
5615 if ( nDevices == 0 ) {
\r
5616 // This should not happen because a check is made before this function is called.
\r
5617 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5621 if ( device >= nDevices ) {
\r
5622 // This should not happen because a check is made before this function is called.
\r
5623 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5627 if ( mode == OUTPUT ) {
\r
5628 if ( dsDevices[ device ].validId[0] == false ) {
\r
5629 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5630 errorText_ = errorStream_.str();
\r
5634 else { // mode == INPUT
\r
5635 if ( dsDevices[ device ].validId[1] == false ) {
\r
5636 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5637 errorText_ = errorStream_.str();
\r
5642 // According to a note in PortAudio, using GetDesktopWindow()
\r
5643 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5644 // that occur when the application's window is not the foreground
\r
5645 // window. Also, if the application window closes before the
\r
5646 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5647 // problems when using GetDesktopWindow() but it seems fine now
\r
5648 // (January 2010). I'll leave it commented here.
\r
5649 // HWND hWnd = GetForegroundWindow();
\r
5650 HWND hWnd = GetDesktopWindow();
\r
5652 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5653 // two. This is a judgement call and a value of two is probably too
\r
5654 // low for capture, but it should work for playback.
\r
5656 if ( options ) nBuffers = options->numberOfBuffers;
\r
5657 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5658 if ( nBuffers < 2 ) nBuffers = 3;
\r
5660 // Check the lower range of the user-specified buffer size and set
\r
5661 // (arbitrarily) to a lower bound of 32.
\r
5662 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5664 // Create the wave format structure. The data format setting will
\r
5665 // be determined later.
\r
5666 WAVEFORMATEX waveFormat;
\r
5667 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5668 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5669 waveFormat.nChannels = channels + firstChannel;
\r
5670 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5672 // Determine the device buffer size. By default, we'll use the value
\r
5673 // defined above (32K), but we will grow it to make allowances for
\r
5674 // very large software buffer sizes.
\r
5675 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5676 DWORD dsPointerLeadTime = 0;
\r
5678 void *ohandle = 0, *bhandle = 0;
\r
5680 if ( mode == OUTPUT ) {
\r
5682 LPDIRECTSOUND output;
\r
5683 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5684 if ( FAILED( result ) ) {
\r
5685 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5686 errorText_ = errorStream_.str();
\r
5691 outCaps.dwSize = sizeof( outCaps );
\r
5692 result = output->GetCaps( &outCaps );
\r
5693 if ( FAILED( result ) ) {
\r
5694 output->Release();
\r
5695 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5696 errorText_ = errorStream_.str();
\r
5700 // Check channel information.
\r
5701 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5702 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5703 errorText_ = errorStream_.str();
\r
5707 // Check format information. Use 16-bit format unless not
\r
5708 // supported or user requests 8-bit.
\r
5709 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5710 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5711 waveFormat.wBitsPerSample = 16;
\r
5712 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5715 waveFormat.wBitsPerSample = 8;
\r
5716 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5718 stream_.userFormat = format;
\r
5720 // Update wave format structure and buffer information.
\r
5721 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5722 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5723 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5725 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5726 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5727 dsBufferSize *= 2;
\r
5729 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5730 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5731 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5732 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5733 if ( FAILED( result ) ) {
\r
5734 output->Release();
\r
5735 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5736 errorText_ = errorStream_.str();
\r
5740 // Even though we will write to the secondary buffer, we need to
\r
5741 // access the primary buffer to set the correct output format
\r
5742 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5743 // buffer description.
\r
5744 DSBUFFERDESC bufferDescription;
\r
5745 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5746 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5747 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5749 // Obtain the primary buffer
\r
5750 LPDIRECTSOUNDBUFFER buffer;
\r
5751 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5752 if ( FAILED( result ) ) {
\r
5753 output->Release();
\r
5754 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5755 errorText_ = errorStream_.str();
\r
5759 // Set the primary DS buffer sound format.
\r
5760 result = buffer->SetFormat( &waveFormat );
\r
5761 if ( FAILED( result ) ) {
\r
5762 output->Release();
\r
5763 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5764 errorText_ = errorStream_.str();
\r
5768 // Setup the secondary DS buffer description.
\r
5769 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5770 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5771 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5772 DSBCAPS_GLOBALFOCUS |
\r
5773 DSBCAPS_GETCURRENTPOSITION2 |
\r
5774 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5775 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5776 bufferDescription.lpwfxFormat = &waveFormat;
\r
5778 // Try to create the secondary DS buffer. If that doesn't work,
\r
5779 // try to use software mixing. Otherwise, there's a problem.
\r
5780 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5781 if ( FAILED( result ) ) {
\r
5782 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5783 DSBCAPS_GLOBALFOCUS |
\r
5784 DSBCAPS_GETCURRENTPOSITION2 |
\r
5785 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5786 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5787 if ( FAILED( result ) ) {
\r
5788 output->Release();
\r
5789 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5790 errorText_ = errorStream_.str();
\r
5795 // Get the buffer size ... might be different from what we specified.
\r
5797 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5798 result = buffer->GetCaps( &dsbcaps );
\r
5799 if ( FAILED( result ) ) {
\r
5800 output->Release();
\r
5801 buffer->Release();
\r
5802 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5803 errorText_ = errorStream_.str();
\r
5807 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5809 // Lock the DS buffer
\r
5812 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5813 if ( FAILED( result ) ) {
\r
5814 output->Release();
\r
5815 buffer->Release();
\r
5816 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5817 errorText_ = errorStream_.str();
\r
5821 // Zero the DS buffer
\r
5822 ZeroMemory( audioPtr, dataLen );
\r
5824 // Unlock the DS buffer
\r
5825 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5826 if ( FAILED( result ) ) {
\r
5827 output->Release();
\r
5828 buffer->Release();
\r
5829 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5830 errorText_ = errorStream_.str();
\r
5834 ohandle = (void *) output;
\r
5835 bhandle = (void *) buffer;
\r
5838 if ( mode == INPUT ) {
\r
5840 LPDIRECTSOUNDCAPTURE input;
\r
5841 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5842 if ( FAILED( result ) ) {
\r
5843 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5844 errorText_ = errorStream_.str();
\r
5849 inCaps.dwSize = sizeof( inCaps );
\r
5850 result = input->GetCaps( &inCaps );
\r
5851 if ( FAILED( result ) ) {
\r
5853 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5854 errorText_ = errorStream_.str();
\r
5858 // Check channel information.
\r
5859 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5860 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5864 // Check format information. Use 16-bit format unless user
\r
5865 // requests 8-bit.
\r
5866 DWORD deviceFormats;
\r
5867 if ( channels + firstChannel == 2 ) {
\r
5868 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5869 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5870 waveFormat.wBitsPerSample = 8;
\r
5871 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5873 else { // assume 16-bit is supported
\r
5874 waveFormat.wBitsPerSample = 16;
\r
5875 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5878 else { // channel == 1
\r
5879 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5880 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5881 waveFormat.wBitsPerSample = 8;
\r
5882 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5884 else { // assume 16-bit is supported
\r
5885 waveFormat.wBitsPerSample = 16;
\r
5886 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5889 stream_.userFormat = format;
\r
5891 // Update wave format structure and buffer information.
\r
5892 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5893 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5894 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5896 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5897 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5898 dsBufferSize *= 2;
\r
5900 // Setup the secondary DS buffer description.
\r
5901 DSCBUFFERDESC bufferDescription;
\r
5902 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5903 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5904 bufferDescription.dwFlags = 0;
\r
5905 bufferDescription.dwReserved = 0;
\r
5906 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5907 bufferDescription.lpwfxFormat = &waveFormat;
\r
5909 // Create the capture buffer.
\r
5910 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5911 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5912 if ( FAILED( result ) ) {
\r
5914 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5915 errorText_ = errorStream_.str();
\r
5919 // Get the buffer size ... might be different from what we specified.
\r
5920 DSCBCAPS dscbcaps;
\r
5921 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5922 result = buffer->GetCaps( &dscbcaps );
\r
5923 if ( FAILED( result ) ) {
\r
5925 buffer->Release();
\r
5926 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5927 errorText_ = errorStream_.str();
\r
5931 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5933 // NOTE: We could have a problem here if this is a duplex stream
\r
5934 // and the play and capture hardware buffer sizes are different
\r
5935 // (I'm actually not sure if that is a problem or not).
\r
5936 // Currently, we are not verifying that.
\r
5938 // Lock the capture buffer
\r
5941 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5942 if ( FAILED( result ) ) {
\r
5944 buffer->Release();
\r
5945 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5946 errorText_ = errorStream_.str();
\r
5950 // Zero the buffer
\r
5951 ZeroMemory( audioPtr, dataLen );
\r
5953 // Unlock the buffer
\r
5954 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5955 if ( FAILED( result ) ) {
\r
5957 buffer->Release();
\r
5958 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5959 errorText_ = errorStream_.str();
\r
5963 ohandle = (void *) input;
\r
5964 bhandle = (void *) buffer;
\r
5967 // Set various stream parameters
\r
5968 DsHandle *handle = 0;
\r
5969 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5970 stream_.nUserChannels[mode] = channels;
\r
5971 stream_.bufferSize = *bufferSize;
\r
5972 stream_.channelOffset[mode] = firstChannel;
\r
5973 stream_.deviceInterleaved[mode] = true;
\r
5974 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5975 else stream_.userInterleaved = true;
\r
5977 // Set flag for buffer conversion
\r
5978 stream_.doConvertBuffer[mode] = false;
\r
5979 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5980 stream_.doConvertBuffer[mode] = true;
\r
5981 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5982 stream_.doConvertBuffer[mode] = true;
\r
5983 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5984 stream_.nUserChannels[mode] > 1 )
\r
5985 stream_.doConvertBuffer[mode] = true;
\r
5987 // Allocate necessary internal buffers
\r
5988 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5989 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5990 if ( stream_.userBuffer[mode] == NULL ) {
\r
5991 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5995 if ( stream_.doConvertBuffer[mode] ) {
\r
5997 bool makeBuffer = true;
\r
5998 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5999 if ( mode == INPUT ) {
\r
6000 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6001 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6002 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
6006 if ( makeBuffer ) {
\r
6007 bufferBytes *= *bufferSize;
\r
6008 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6009 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6010 if ( stream_.deviceBuffer == NULL ) {
\r
6011 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
6017 // Allocate our DsHandle structures for the stream.
\r
6018 if ( stream_.apiHandle == 0 ) {
\r
6020 handle = new DsHandle;
\r
6022 catch ( std::bad_alloc& ) {
\r
6023 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
6027 // Create a manual-reset event.
\r
6028 handle->condition = CreateEvent( NULL, // no security
\r
6029 TRUE, // manual-reset
\r
6030 FALSE, // non-signaled initially
\r
6031 NULL ); // unnamed
\r
6032 stream_.apiHandle = (void *) handle;
\r
6035 handle = (DsHandle *) stream_.apiHandle;
\r
6036 handle->id[mode] = ohandle;
\r
6037 handle->buffer[mode] = bhandle;
\r
6038 handle->dsBufferSize[mode] = dsBufferSize;
\r
6039 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
6041 stream_.device[mode] = device;
\r
6042 stream_.state = STREAM_STOPPED;
\r
6043 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
6044 // We had already set up an output stream.
\r
6045 stream_.mode = DUPLEX;
\r
6047 stream_.mode = mode;
\r
6048 stream_.nBuffers = nBuffers;
\r
6049 stream_.sampleRate = sampleRate;
\r
6051 // Setup the buffer conversion information structure.
\r
6052 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6054 // Setup the callback thread.
\r
6055 if ( stream_.callbackInfo.isRunning == false ) {
\r
6056 unsigned threadId;
\r
6057 stream_.callbackInfo.isRunning = true;
\r
6058 stream_.callbackInfo.object = (void *) this;
\r
6059 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
6060 &stream_.callbackInfo, 0, &threadId );
\r
6061 if ( stream_.callbackInfo.thread == 0 ) {
\r
6062 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
6066 // Boost DS thread priority
\r
6067 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
6073 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6074 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6075 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6076 if ( buffer ) buffer->Release();
\r
6077 object->Release();
\r
6079 if ( handle->buffer[1] ) {
\r
6080 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6081 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6082 if ( buffer ) buffer->Release();
\r
6083 object->Release();
\r
6085 CloseHandle( handle->condition );
\r
6087 stream_.apiHandle = 0;
\r
6090 for ( int i=0; i<2; i++ ) {
\r
6091 if ( stream_.userBuffer[i] ) {
\r
6092 free( stream_.userBuffer[i] );
\r
6093 stream_.userBuffer[i] = 0;
\r
6097 if ( stream_.deviceBuffer ) {
\r
6098 free( stream_.deviceBuffer );
\r
6099 stream_.deviceBuffer = 0;
\r
6102 stream_.state = STREAM_CLOSED;
\r
6106 void RtApiDs :: closeStream()
\r
6108 if ( stream_.state == STREAM_CLOSED ) {
\r
6109 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6110 error( RtAudioError::WARNING );
\r
6114 // Stop the callback thread.
\r
6115 stream_.callbackInfo.isRunning = false;
\r
6116 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6117 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6119 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6121 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6122 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6123 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6126 buffer->Release();
\r
6128 object->Release();
\r
6130 if ( handle->buffer[1] ) {
\r
6131 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6132 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6135 buffer->Release();
\r
6137 object->Release();
\r
6139 CloseHandle( handle->condition );
\r
6141 stream_.apiHandle = 0;
\r
6144 for ( int i=0; i<2; i++ ) {
\r
6145 if ( stream_.userBuffer[i] ) {
\r
6146 free( stream_.userBuffer[i] );
\r
6147 stream_.userBuffer[i] = 0;
\r
6151 if ( stream_.deviceBuffer ) {
\r
6152 free( stream_.deviceBuffer );
\r
6153 stream_.deviceBuffer = 0;
\r
6156 stream_.mode = UNINITIALIZED;
\r
6157 stream_.state = STREAM_CLOSED;
\r
6160 void RtApiDs :: startStream()
\r
6163 if ( stream_.state == STREAM_RUNNING ) {
\r
6164 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6165 error( RtAudioError::WARNING );
\r
6169 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6171 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6172 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6173 // this is already in effect.
\r
6174 timeBeginPeriod( 1 );
\r
6176 buffersRolling = false;
\r
6177 duplexPrerollBytes = 0;
\r
6179 if ( stream_.mode == DUPLEX ) {
\r
6180 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6181 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6184 HRESULT result = 0;
\r
6185 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6187 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6188 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6189 if ( FAILED( result ) ) {
\r
6190 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6191 errorText_ = errorStream_.str();
\r
6196 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6198 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6199 result = buffer->Start( DSCBSTART_LOOPING );
\r
6200 if ( FAILED( result ) ) {
\r
6201 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6202 errorText_ = errorStream_.str();
\r
6207 handle->drainCounter = 0;
\r
6208 handle->internalDrain = false;
\r
6209 ResetEvent( handle->condition );
\r
6210 stream_.state = STREAM_RUNNING;
\r
6213 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6216 void RtApiDs :: stopStream()
\r
6219 if ( stream_.state == STREAM_STOPPED ) {
\r
6220 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6221 error( RtAudioError::WARNING );
\r
6225 HRESULT result = 0;
\r
6228 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6229 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6230 if ( handle->drainCounter == 0 ) {
\r
6231 handle->drainCounter = 2;
\r
6232 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6235 stream_.state = STREAM_STOPPED;
\r
6237 MUTEX_LOCK( &stream_.mutex );
\r
6239 // Stop the buffer and clear memory
\r
6240 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6241 result = buffer->Stop();
\r
6242 if ( FAILED( result ) ) {
\r
6243 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6244 errorText_ = errorStream_.str();
\r
6248 // Lock the buffer and clear it so that if we start to play again,
\r
6249 // we won't have old data playing.
\r
6250 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6251 if ( FAILED( result ) ) {
\r
6252 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6253 errorText_ = errorStream_.str();
\r
6257 // Zero the DS buffer
\r
6258 ZeroMemory( audioPtr, dataLen );
\r
6260 // Unlock the DS buffer
\r
6261 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6262 if ( FAILED( result ) ) {
\r
6263 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6264 errorText_ = errorStream_.str();
\r
6268 // If we start playing again, we must begin at beginning of buffer.
\r
6269 handle->bufferPointer[0] = 0;
\r
6272 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6273 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6277 stream_.state = STREAM_STOPPED;
\r
6279 if ( stream_.mode != DUPLEX )
\r
6280 MUTEX_LOCK( &stream_.mutex );
\r
6282 result = buffer->Stop();
\r
6283 if ( FAILED( result ) ) {
\r
6284 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6285 errorText_ = errorStream_.str();
\r
6289 // Lock the buffer and clear it so that if we start to play again,
\r
6290 // we won't have old data playing.
\r
6291 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6292 if ( FAILED( result ) ) {
\r
6293 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6294 errorText_ = errorStream_.str();
\r
6298 // Zero the DS buffer
\r
6299 ZeroMemory( audioPtr, dataLen );
\r
6301 // Unlock the DS buffer
\r
6302 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6303 if ( FAILED( result ) ) {
\r
6304 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6305 errorText_ = errorStream_.str();
\r
6309 // If we start recording again, we must begin at beginning of buffer.
\r
6310 handle->bufferPointer[1] = 0;
\r
6314 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6315 MUTEX_UNLOCK( &stream_.mutex );
\r
6317 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6320 void RtApiDs :: abortStream()
\r
6323 if ( stream_.state == STREAM_STOPPED ) {
\r
6324 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6325 error( RtAudioError::WARNING );
\r
6329 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6330 handle->drainCounter = 2;
\r
6335 void RtApiDs :: callbackEvent()
\r
6337 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6338 Sleep( 50 ); // sleep 50 milliseconds
\r
6342 if ( stream_.state == STREAM_CLOSED ) {
\r
6343 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6344 error( RtAudioError::WARNING );
\r
6348 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6349 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6351 // Check if we were draining the stream and signal is finished.
\r
6352 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6354 stream_.state = STREAM_STOPPING;
\r
6355 if ( handle->internalDrain == false )
\r
6356 SetEvent( handle->condition );
\r
6362 // Invoke user callback to get fresh output data UNLESS we are
\r
6363 // draining stream.
\r
6364 if ( handle->drainCounter == 0 ) {
\r
6365 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6366 double streamTime = getStreamTime();
\r
6367 RtAudioStreamStatus status = 0;
\r
6368 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6369 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6370 handle->xrun[0] = false;
\r
6372 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6373 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6374 handle->xrun[1] = false;
\r
6376 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6377 stream_.bufferSize, streamTime, status, info->userData );
\r
6378 if ( cbReturnValue == 2 ) {
\r
6379 stream_.state = STREAM_STOPPING;
\r
6380 handle->drainCounter = 2;
\r
6384 else if ( cbReturnValue == 1 ) {
\r
6385 handle->drainCounter = 1;
\r
6386 handle->internalDrain = true;
\r
6391 DWORD currentWritePointer, safeWritePointer;
\r
6392 DWORD currentReadPointer, safeReadPointer;
\r
6393 UINT nextWritePointer;
\r
6395 LPVOID buffer1 = NULL;
\r
6396 LPVOID buffer2 = NULL;
\r
6397 DWORD bufferSize1 = 0;
\r
6398 DWORD bufferSize2 = 0;
\r
6403 MUTEX_LOCK( &stream_.mutex );
\r
6404 if ( stream_.state == STREAM_STOPPED ) {
\r
6405 MUTEX_UNLOCK( &stream_.mutex );
\r
6409 if ( buffersRolling == false ) {
\r
6410 if ( stream_.mode == DUPLEX ) {
\r
6411 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6413 // It takes a while for the devices to get rolling. As a result,
\r
6414 // there's no guarantee that the capture and write device pointers
\r
6415 // will move in lockstep. Wait here for both devices to start
\r
6416 // rolling, and then set our buffer pointers accordingly.
\r
6417 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6418 // bytes later than the write buffer.
\r
6420 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6421 // take place between the two GetCurrentPosition calls... but I'm
\r
6422 // really not sure how to solve the problem. Temporarily boost to
\r
6423 // Realtime priority, maybe; but I'm not sure what priority the
\r
6424 // DirectSound service threads run at. We *should* be roughly
\r
6425 // within a ms or so of correct.
\r
6427 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6428 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6430 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6432 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6433 if ( FAILED( result ) ) {
\r
6434 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6435 errorText_ = errorStream_.str();
\r
6436 MUTEX_UNLOCK( &stream_.mutex );
\r
6437 error( RtAudioError::SYSTEM_ERROR );
\r
6440 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6441 if ( FAILED( result ) ) {
\r
6442 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6443 errorText_ = errorStream_.str();
\r
6444 MUTEX_UNLOCK( &stream_.mutex );
\r
6445 error( RtAudioError::SYSTEM_ERROR );
\r
6449 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6450 if ( FAILED( result ) ) {
\r
6451 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6452 errorText_ = errorStream_.str();
\r
6453 MUTEX_UNLOCK( &stream_.mutex );
\r
6454 error( RtAudioError::SYSTEM_ERROR );
\r
6457 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6458 if ( FAILED( result ) ) {
\r
6459 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6460 errorText_ = errorStream_.str();
\r
6461 MUTEX_UNLOCK( &stream_.mutex );
\r
6462 error( RtAudioError::SYSTEM_ERROR );
\r
6465 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6469 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6471 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6472 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6473 handle->bufferPointer[1] = safeReadPointer;
\r
6475 else if ( stream_.mode == OUTPUT ) {
\r
6477 // Set the proper nextWritePosition after initial startup.
\r
6478 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6479 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6480 if ( FAILED( result ) ) {
\r
6481 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6482 errorText_ = errorStream_.str();
\r
6483 MUTEX_UNLOCK( &stream_.mutex );
\r
6484 error( RtAudioError::SYSTEM_ERROR );
\r
6487 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6488 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6491 buffersRolling = true;
\r
6494 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6496 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6498 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6499 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6500 bufferBytes *= formatBytes( stream_.userFormat );
\r
6501 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6504 // Setup parameters and do buffer conversion if necessary.
\r
6505 if ( stream_.doConvertBuffer[0] ) {
\r
6506 buffer = stream_.deviceBuffer;
\r
6507 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6508 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6509 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6512 buffer = stream_.userBuffer[0];
\r
6513 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6514 bufferBytes *= formatBytes( stream_.userFormat );
\r
6517 // No byte swapping necessary in DirectSound implementation.
\r
6519 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6520 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6522 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6523 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6525 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6526 nextWritePointer = handle->bufferPointer[0];
\r
6528 DWORD endWrite, leadPointer;
\r
6530 // Find out where the read and "safe write" pointers are.
\r
6531 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6532 if ( FAILED( result ) ) {
\r
6533 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6534 errorText_ = errorStream_.str();
\r
6535 MUTEX_UNLOCK( &stream_.mutex );
\r
6536 error( RtAudioError::SYSTEM_ERROR );
\r
6540 // We will copy our output buffer into the region between
\r
6541 // safeWritePointer and leadPointer. If leadPointer is not
\r
6542 // beyond the next endWrite position, wait until it is.
\r
6543 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6544 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6545 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6546 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6547 endWrite = nextWritePointer + bufferBytes;
\r
6549 // Check whether the entire write region is behind the play pointer.
\r
6550 if ( leadPointer >= endWrite ) break;
\r
6552 // If we are here, then we must wait until the leadPointer advances
\r
6553 // beyond the end of our next write region. We use the
\r
6554 // Sleep() function to suspend operation until that happens.
\r
6555 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6556 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6557 if ( millis < 1.0 ) millis = 1.0;
\r
6558 Sleep( (DWORD) millis );
\r
6561 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6562 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6563 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6564 handle->xrun[0] = true;
\r
6565 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6566 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6567 handle->bufferPointer[0] = nextWritePointer;
\r
6568 endWrite = nextWritePointer + bufferBytes;
\r
6571 // Lock free space in the buffer
\r
6572 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6573 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6574 if ( FAILED( result ) ) {
\r
6575 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6576 errorText_ = errorStream_.str();
\r
6577 MUTEX_UNLOCK( &stream_.mutex );
\r
6578 error( RtAudioError::SYSTEM_ERROR );
\r
6582 // Copy our buffer into the DS buffer
\r
6583 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6584 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6586 // Update our buffer offset and unlock sound buffer
\r
6587 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6588 if ( FAILED( result ) ) {
\r
6589 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6590 errorText_ = errorStream_.str();
\r
6591 MUTEX_UNLOCK( &stream_.mutex );
\r
6592 error( RtAudioError::SYSTEM_ERROR );
\r
6595 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6596 handle->bufferPointer[0] = nextWritePointer;
\r
6599 // Don't bother draining input
\r
6600 if ( handle->drainCounter ) {
\r
6601 handle->drainCounter++;
\r
6605 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6607 // Setup parameters.
\r
6608 if ( stream_.doConvertBuffer[1] ) {
\r
6609 buffer = stream_.deviceBuffer;
\r
6610 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6611 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6614 buffer = stream_.userBuffer[1];
\r
6615 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6616 bufferBytes *= formatBytes( stream_.userFormat );
\r
6619 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6620 long nextReadPointer = handle->bufferPointer[1];
\r
6621 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6623 // Find out where the write and "safe read" pointers are.
\r
6624 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6625 if ( FAILED( result ) ) {
\r
6626 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6627 errorText_ = errorStream_.str();
\r
6628 MUTEX_UNLOCK( &stream_.mutex );
\r
6629 error( RtAudioError::SYSTEM_ERROR );
\r
6633 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6634 DWORD endRead = nextReadPointer + bufferBytes;
\r
6636 // Handling depends on whether we are INPUT or DUPLEX.
\r
6637 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6638 // then a wait here will drag the write pointers into the forbidden zone.
\r
6640 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6641 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6642 // practical way to sync up the read and write pointers reliably, given the
\r
6643 // the very complex relationship between phase and increment of the read and write
\r
6646 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6647 // provide a pre-roll period of 0.5 seconds in which we return
\r
6648 // zeros from the read buffer while the pointers sync up.
\r
6650 if ( stream_.mode == DUPLEX ) {
\r
6651 if ( safeReadPointer < endRead ) {
\r
6652 if ( duplexPrerollBytes <= 0 ) {
\r
6653 // Pre-roll time over. Be more agressive.
\r
6654 int adjustment = endRead-safeReadPointer;
\r
6656 handle->xrun[1] = true;
\r
6658 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6659 // and perform fine adjustments later.
\r
6660 // - small adjustments: back off by twice as much.
\r
6661 if ( adjustment >= 2*bufferBytes )
\r
6662 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6664 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6666 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6670 // In pre=roll time. Just do it.
\r
6671 nextReadPointer = safeReadPointer - bufferBytes;
\r
6672 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6674 endRead = nextReadPointer + bufferBytes;
\r
6677 else { // mode == INPUT
\r
6678 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6679 // See comments for playback.
\r
6680 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6681 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6682 if ( millis < 1.0 ) millis = 1.0;
\r
6683 Sleep( (DWORD) millis );
\r
6685 // Wake up and find out where we are now.
\r
6686 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6687 if ( FAILED( result ) ) {
\r
6688 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6689 errorText_ = errorStream_.str();
\r
6690 MUTEX_UNLOCK( &stream_.mutex );
\r
6691 error( RtAudioError::SYSTEM_ERROR );
\r
6695 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6699 // Lock free space in the buffer
\r
6700 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6701 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6702 if ( FAILED( result ) ) {
\r
6703 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6704 errorText_ = errorStream_.str();
\r
6705 MUTEX_UNLOCK( &stream_.mutex );
\r
6706 error( RtAudioError::SYSTEM_ERROR );
\r
6710 if ( duplexPrerollBytes <= 0 ) {
\r
6711 // Copy our buffer into the DS buffer
\r
6712 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6713 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6716 memset( buffer, 0, bufferSize1 );
\r
6717 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6718 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6721 // Update our buffer offset and unlock sound buffer
\r
6722 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6723 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6724 if ( FAILED( result ) ) {
\r
6725 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6726 errorText_ = errorStream_.str();
\r
6727 MUTEX_UNLOCK( &stream_.mutex );
\r
6728 error( RtAudioError::SYSTEM_ERROR );
\r
6731 handle->bufferPointer[1] = nextReadPointer;
\r
6733 // No byte swapping necessary in DirectSound implementation.
\r
6735 // If necessary, convert 8-bit data from unsigned to signed.
\r
6736 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6737 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6739 // Do buffer conversion if necessary.
\r
6740 if ( stream_.doConvertBuffer[1] )
\r
6741 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6745 MUTEX_UNLOCK( &stream_.mutex );
\r
6746 RtApi::tickStreamTime();
\r
6749 // Definitions for utility functions and callbacks
\r
6750 // specific to the DirectSound implementation.
\r
6752 static unsigned __stdcall callbackHandler( void *ptr )
\r
6754 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6755 RtApiDs *object = (RtApiDs *) info->object;
\r
6756 bool* isRunning = &info->isRunning;
\r
6758 while ( *isRunning == true ) {
\r
6759 object->callbackEvent();
\r
6762 _endthreadex( 0 );
\r
6766 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6767 LPCTSTR description,
\r
6768 LPCTSTR /*module*/,
\r
6769 LPVOID lpContext )
\r
6771 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6772 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6775 bool validDevice = false;
\r
6776 if ( probeInfo.isInput == true ) {
\r
6778 LPDIRECTSOUNDCAPTURE object;
\r
6780 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6781 if ( hr != DS_OK ) return TRUE;
\r
6783 caps.dwSize = sizeof(caps);
\r
6784 hr = object->GetCaps( &caps );
\r
6785 if ( hr == DS_OK ) {
\r
6786 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6787 validDevice = true;
\r
6789 object->Release();
\r
6793 LPDIRECTSOUND object;
\r
6794 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6795 if ( hr != DS_OK ) return TRUE;
\r
6797 caps.dwSize = sizeof(caps);
\r
6798 hr = object->GetCaps( &caps );
\r
6799 if ( hr == DS_OK ) {
\r
6800 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6801 validDevice = true;
\r
6803 object->Release();
\r
6806 // If good device, then save its name and guid.
\r
6807 std::string name = convertCharPointerToStdString( description );
\r
6808 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6809 if ( lpguid == NULL )
\r
6810 name = "Default Device";
\r
6811 if ( validDevice ) {
\r
6812 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6813 if ( dsDevices[i].name == name ) {
\r
6814 dsDevices[i].found = true;
\r
6815 if ( probeInfo.isInput ) {
\r
6816 dsDevices[i].id[1] = lpguid;
\r
6817 dsDevices[i].validId[1] = true;
\r
6820 dsDevices[i].id[0] = lpguid;
\r
6821 dsDevices[i].validId[0] = true;
\r
6828 device.name = name;
\r
6829 device.found = true;
\r
6830 if ( probeInfo.isInput ) {
\r
6831 device.id[1] = lpguid;
\r
6832 device.validId[1] = true;
\r
6835 device.id[0] = lpguid;
\r
6836 device.validId[0] = true;
\r
6838 dsDevices.push_back( device );
\r
6844 static const char* getErrorString( int code )
\r
6848 case DSERR_ALLOCATED:
\r
6849 return "Already allocated";
\r
6851 case DSERR_CONTROLUNAVAIL:
\r
6852 return "Control unavailable";
\r
6854 case DSERR_INVALIDPARAM:
\r
6855 return "Invalid parameter";
\r
6857 case DSERR_INVALIDCALL:
\r
6858 return "Invalid call";
\r
6860 case DSERR_GENERIC:
\r
6861 return "Generic error";
\r
6863 case DSERR_PRIOLEVELNEEDED:
\r
6864 return "Priority level needed";
\r
6866 case DSERR_OUTOFMEMORY:
\r
6867 return "Out of memory";
\r
6869 case DSERR_BADFORMAT:
\r
6870 return "The sample rate or the channel format is not supported";
\r
6872 case DSERR_UNSUPPORTED:
\r
6873 return "Not supported";
\r
6875 case DSERR_NODRIVER:
\r
6876 return "No driver";
\r
6878 case DSERR_ALREADYINITIALIZED:
\r
6879 return "Already initialized";
\r
6881 case DSERR_NOAGGREGATION:
\r
6882 return "No aggregation";
\r
6884 case DSERR_BUFFERLOST:
\r
6885 return "Buffer lost";
\r
6887 case DSERR_OTHERAPPHASPRIO:
\r
6888 return "Another application already has priority";
\r
6890 case DSERR_UNINITIALIZED:
\r
6891 return "Uninitialized";
\r
6894 return "DirectSound unknown error";
\r
6897 //******************** End of __WINDOWS_DS__ *********************//
\r
6901 #if defined(__LINUX_ALSA__)
\r
6903 #include <alsa/asoundlib.h>
\r
6904 #include <unistd.h>
\r
6906 // A structure to hold various information related to the ALSA API
\r
6907 // implementation.
\r
6908 struct AlsaHandle {
\r
6909 snd_pcm_t *handles[2];
\r
6910 bool synchronized;
\r
6912 pthread_cond_t runnable_cv;
\r
6916 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6919 static void *alsaCallbackHandler( void * ptr );
\r
6921 RtApiAlsa :: RtApiAlsa()
\r
6923 // Nothing to do here.
\r
6926 RtApiAlsa :: ~RtApiAlsa()
\r
6928 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6931 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6933 unsigned nDevices = 0;
\r
6934 int result, subdevice, card;
\r
6936 snd_ctl_t *handle;
\r
6938 // Count cards and devices
\r
6940 snd_card_next( &card );
\r
6941 while ( card >= 0 ) {
\r
6942 sprintf( name, "hw:%d", card );
\r
6943 result = snd_ctl_open( &handle, name, 0 );
\r
6944 if ( result < 0 ) {
\r
6945 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6946 errorText_ = errorStream_.str();
\r
6947 error( RtAudioError::WARNING );
\r
6952 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6953 if ( result < 0 ) {
\r
6954 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6955 errorText_ = errorStream_.str();
\r
6956 error( RtAudioError::WARNING );
\r
6959 if ( subdevice < 0 )
\r
6964 snd_ctl_close( handle );
\r
6965 snd_card_next( &card );
\r
6968 result = snd_ctl_open( &handle, "default", 0 );
\r
6969 if (result == 0) {
\r
6971 snd_ctl_close( handle );
\r
6977 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6979 RtAudio::DeviceInfo info;
\r
6980 info.probed = false;
\r
6982 unsigned nDevices = 0;
\r
6983 int result, subdevice, card;
\r
6985 snd_ctl_t *chandle;
\r
6987 // Count cards and devices
\r
6990 snd_card_next( &card );
\r
6991 while ( card >= 0 ) {
\r
6992 sprintf( name, "hw:%d", card );
\r
6993 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6994 if ( result < 0 ) {
\r
6995 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6996 errorText_ = errorStream_.str();
\r
6997 error( RtAudioError::WARNING );
\r
7002 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7003 if ( result < 0 ) {
\r
7004 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7005 errorText_ = errorStream_.str();
\r
7006 error( RtAudioError::WARNING );
\r
7009 if ( subdevice < 0 ) break;
\r
7010 if ( nDevices == device ) {
\r
7011 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7017 snd_ctl_close( chandle );
\r
7018 snd_card_next( &card );
\r
7021 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7022 if ( result == 0 ) {
\r
7023 if ( nDevices == device ) {
\r
7024 strcpy( name, "default" );
\r
7030 if ( nDevices == 0 ) {
\r
7031 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
7032 error( RtAudioError::INVALID_USE );
\r
7036 if ( device >= nDevices ) {
\r
7037 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
7038 error( RtAudioError::INVALID_USE );
\r
7044 // If a stream is already open, we cannot probe the stream devices.
\r
7045 // Thus, use the saved results.
\r
7046 if ( stream_.state != STREAM_CLOSED &&
\r
7047 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
7048 snd_ctl_close( chandle );
\r
7049 if ( device >= devices_.size() ) {
\r
7050 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
7051 error( RtAudioError::WARNING );
\r
7054 return devices_[ device ];
\r
7057 int openMode = SND_PCM_ASYNC;
\r
7058 snd_pcm_stream_t stream;
\r
7059 snd_pcm_info_t *pcminfo;
\r
7060 snd_pcm_info_alloca( &pcminfo );
\r
7061 snd_pcm_t *phandle;
\r
7062 snd_pcm_hw_params_t *params;
\r
7063 snd_pcm_hw_params_alloca( ¶ms );
\r
7065 // First try for playback unless default device (which has subdev -1)
\r
7066 stream = SND_PCM_STREAM_PLAYBACK;
\r
7067 snd_pcm_info_set_stream( pcminfo, stream );
\r
7068 if ( subdevice != -1 ) {
\r
7069 snd_pcm_info_set_device( pcminfo, subdevice );
\r
7070 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
7072 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7073 if ( result < 0 ) {
\r
7074 // Device probably doesn't support playback.
\r
7075 goto captureProbe;
\r
7079 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7080 if ( result < 0 ) {
\r
7081 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7082 errorText_ = errorStream_.str();
\r
7083 error( RtAudioError::WARNING );
\r
7084 goto captureProbe;
\r
7087 // The device is open ... fill the parameter structure.
\r
7088 result = snd_pcm_hw_params_any( phandle, params );
\r
7089 if ( result < 0 ) {
\r
7090 snd_pcm_close( phandle );
\r
7091 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7092 errorText_ = errorStream_.str();
\r
7093 error( RtAudioError::WARNING );
\r
7094 goto captureProbe;
\r
7097 // Get output channel information.
\r
7098 unsigned int value;
\r
7099 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7100 if ( result < 0 ) {
\r
7101 snd_pcm_close( phandle );
\r
7102 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7103 errorText_ = errorStream_.str();
\r
7104 error( RtAudioError::WARNING );
\r
7105 goto captureProbe;
\r
7107 info.outputChannels = value;
\r
7108 snd_pcm_close( phandle );
\r
7111 stream = SND_PCM_STREAM_CAPTURE;
\r
7112 snd_pcm_info_set_stream( pcminfo, stream );
\r
7114 // Now try for capture unless default device (with subdev = -1)
\r
7115 if ( subdevice != -1 ) {
\r
7116 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7117 snd_ctl_close( chandle );
\r
7118 if ( result < 0 ) {
\r
7119 // Device probably doesn't support capture.
\r
7120 if ( info.outputChannels == 0 ) return info;
\r
7121 goto probeParameters;
\r
7125 snd_ctl_close( chandle );
\r
7127 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7128 if ( result < 0 ) {
\r
7129 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7130 errorText_ = errorStream_.str();
\r
7131 error( RtAudioError::WARNING );
\r
7132 if ( info.outputChannels == 0 ) return info;
\r
7133 goto probeParameters;
\r
7136 // The device is open ... fill the parameter structure.
\r
7137 result = snd_pcm_hw_params_any( phandle, params );
\r
7138 if ( result < 0 ) {
\r
7139 snd_pcm_close( phandle );
\r
7140 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7141 errorText_ = errorStream_.str();
\r
7142 error( RtAudioError::WARNING );
\r
7143 if ( info.outputChannels == 0 ) return info;
\r
7144 goto probeParameters;
\r
7147 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7148 if ( result < 0 ) {
\r
7149 snd_pcm_close( phandle );
\r
7150 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7151 errorText_ = errorStream_.str();
\r
7152 error( RtAudioError::WARNING );
\r
7153 if ( info.outputChannels == 0 ) return info;
\r
7154 goto probeParameters;
\r
7156 info.inputChannels = value;
\r
7157 snd_pcm_close( phandle );
\r
7159 // If device opens for both playback and capture, we determine the channels.
\r
7160 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7161 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7163 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7164 if ( device == 0 && info.outputChannels > 0 )
\r
7165 info.isDefaultOutput = true;
\r
7166 if ( device == 0 && info.inputChannels > 0 )
\r
7167 info.isDefaultInput = true;
\r
7170 // At this point, we just need to figure out the supported data
\r
7171 // formats and sample rates. We'll proceed by opening the device in
\r
7172 // the direction with the maximum number of channels, or playback if
\r
7173 // they are equal. This might limit our sample rate options, but so
\r
7176 if ( info.outputChannels >= info.inputChannels )
\r
7177 stream = SND_PCM_STREAM_PLAYBACK;
\r
7179 stream = SND_PCM_STREAM_CAPTURE;
\r
7180 snd_pcm_info_set_stream( pcminfo, stream );
\r
7182 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7183 if ( result < 0 ) {
\r
7184 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7185 errorText_ = errorStream_.str();
\r
7186 error( RtAudioError::WARNING );
\r
7190 // The device is open ... fill the parameter structure.
\r
7191 result = snd_pcm_hw_params_any( phandle, params );
\r
7192 if ( result < 0 ) {
\r
7193 snd_pcm_close( phandle );
\r
7194 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7195 errorText_ = errorStream_.str();
\r
7196 error( RtAudioError::WARNING );
\r
7200 // Test our discrete set of sample rate values.
\r
7201 info.sampleRates.clear();
\r
7202 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7203 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7204 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7206 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7207 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7210 if ( info.sampleRates.size() == 0 ) {
\r
7211 snd_pcm_close( phandle );
\r
7212 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7213 errorText_ = errorStream_.str();
\r
7214 error( RtAudioError::WARNING );
\r
7218 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7219 snd_pcm_format_t format;
\r
7220 info.nativeFormats = 0;
\r
7221 format = SND_PCM_FORMAT_S8;
\r
7222 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7223 info.nativeFormats |= RTAUDIO_SINT8;
\r
7224 format = SND_PCM_FORMAT_S16;
\r
7225 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7226 info.nativeFormats |= RTAUDIO_SINT16;
\r
7227 format = SND_PCM_FORMAT_S24;
\r
7228 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7229 info.nativeFormats |= RTAUDIO_SINT24;
\r
7230 format = SND_PCM_FORMAT_S32;
\r
7231 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7232 info.nativeFormats |= RTAUDIO_SINT32;
\r
7233 format = SND_PCM_FORMAT_FLOAT;
\r
7234 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7235 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7236 format = SND_PCM_FORMAT_FLOAT64;
\r
7237 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7238 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7240 // Check that we have at least one supported format
\r
7241 if ( info.nativeFormats == 0 ) {
\r
7242 snd_pcm_close( phandle );
\r
7243 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7244 errorText_ = errorStream_.str();
\r
7245 error( RtAudioError::WARNING );
\r
7249 // Get the device name
\r
7251 result = snd_card_get_name( card, &cardname );
\r
7252 if ( result >= 0 ) {
\r
7253 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7258 // That's all ... close the device and return
\r
7259 snd_pcm_close( phandle );
\r
7260 info.probed = true;
\r
7264 void RtApiAlsa :: saveDeviceInfo( void )
\r
7268 unsigned int nDevices = getDeviceCount();
\r
7269 devices_.resize( nDevices );
\r
7270 for ( unsigned int i=0; i<nDevices; i++ )
\r
7271 devices_[i] = getDeviceInfo( i );
\r
7274 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7275 unsigned int firstChannel, unsigned int sampleRate,
\r
7276 RtAudioFormat format, unsigned int *bufferSize,
\r
7277 RtAudio::StreamOptions *options )
\r
7280 #if defined(__RTAUDIO_DEBUG__)
\r
7281 snd_output_t *out;
\r
7282 snd_output_stdio_attach(&out, stderr, 0);
\r
7285 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7287 unsigned nDevices = 0;
\r
7288 int result, subdevice, card;
\r
7290 snd_ctl_t *chandle;
\r
7292 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7293 snprintf(name, sizeof(name), "%s", "default");
\r
7295 // Count cards and devices
\r
7297 snd_card_next( &card );
\r
7298 while ( card >= 0 ) {
\r
7299 sprintf( name, "hw:%d", card );
\r
7300 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7301 if ( result < 0 ) {
\r
7302 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7303 errorText_ = errorStream_.str();
\r
7308 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7309 if ( result < 0 ) break;
\r
7310 if ( subdevice < 0 ) break;
\r
7311 if ( nDevices == device ) {
\r
7312 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7313 snd_ctl_close( chandle );
\r
7318 snd_ctl_close( chandle );
\r
7319 snd_card_next( &card );
\r
7322 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7323 if ( result == 0 ) {
\r
7324 if ( nDevices == device ) {
\r
7325 strcpy( name, "default" );
\r
7331 if ( nDevices == 0 ) {
\r
7332 // This should not happen because a check is made before this function is called.
\r
7333 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7337 if ( device >= nDevices ) {
\r
7338 // This should not happen because a check is made before this function is called.
\r
7339 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7346 // The getDeviceInfo() function will not work for a device that is
\r
7347 // already open. Thus, we'll probe the system before opening a
\r
7348 // stream and save the results for use by getDeviceInfo().
\r
7349 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7350 this->saveDeviceInfo();
\r
7352 snd_pcm_stream_t stream;
\r
7353 if ( mode == OUTPUT )
\r
7354 stream = SND_PCM_STREAM_PLAYBACK;
\r
7356 stream = SND_PCM_STREAM_CAPTURE;
\r
7358 snd_pcm_t *phandle;
\r
7359 int openMode = SND_PCM_ASYNC;
\r
7360 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7361 if ( result < 0 ) {
\r
7362 if ( mode == OUTPUT )
\r
7363 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7365 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7366 errorText_ = errorStream_.str();
\r
7370 // Fill the parameter structure.
\r
7371 snd_pcm_hw_params_t *hw_params;
\r
7372 snd_pcm_hw_params_alloca( &hw_params );
\r
7373 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7374 if ( result < 0 ) {
\r
7375 snd_pcm_close( phandle );
\r
7376 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7377 errorText_ = errorStream_.str();
\r
7381 #if defined(__RTAUDIO_DEBUG__)
\r
7382 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7383 snd_pcm_hw_params_dump( hw_params, out );
\r
7386 // Set access ... check user preference.
\r
7387 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7388 stream_.userInterleaved = false;
\r
7389 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7390 if ( result < 0 ) {
\r
7391 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7392 stream_.deviceInterleaved[mode] = true;
\r
7395 stream_.deviceInterleaved[mode] = false;
\r
7398 stream_.userInterleaved = true;
\r
7399 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7400 if ( result < 0 ) {
\r
7401 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7402 stream_.deviceInterleaved[mode] = false;
\r
7405 stream_.deviceInterleaved[mode] = true;
\r
7408 if ( result < 0 ) {
\r
7409 snd_pcm_close( phandle );
\r
7410 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7411 errorText_ = errorStream_.str();
\r
7415 // Determine how to set the device format.
\r
7416 stream_.userFormat = format;
\r
7417 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7419 if ( format == RTAUDIO_SINT8 )
\r
7420 deviceFormat = SND_PCM_FORMAT_S8;
\r
7421 else if ( format == RTAUDIO_SINT16 )
\r
7422 deviceFormat = SND_PCM_FORMAT_S16;
\r
7423 else if ( format == RTAUDIO_SINT24 )
\r
7424 deviceFormat = SND_PCM_FORMAT_S24;
\r
7425 else if ( format == RTAUDIO_SINT32 )
\r
7426 deviceFormat = SND_PCM_FORMAT_S32;
\r
7427 else if ( format == RTAUDIO_FLOAT32 )
\r
7428 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7429 else if ( format == RTAUDIO_FLOAT64 )
\r
7430 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7432 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7433 stream_.deviceFormat[mode] = format;
\r
7437 // The user requested format is not natively supported by the device.
\r
7438 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7439 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7440 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7444 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7445 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7446 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7450 deviceFormat = SND_PCM_FORMAT_S32;
\r
7451 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7452 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7456 deviceFormat = SND_PCM_FORMAT_S24;
\r
7457 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7458 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7462 deviceFormat = SND_PCM_FORMAT_S16;
\r
7463 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7464 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7468 deviceFormat = SND_PCM_FORMAT_S8;
\r
7469 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7470 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7474 // If we get here, no supported format was found.
\r
7475 snd_pcm_close( phandle );
\r
7476 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7477 errorText_ = errorStream_.str();
\r
7481 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7482 if ( result < 0 ) {
\r
7483 snd_pcm_close( phandle );
\r
7484 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7485 errorText_ = errorStream_.str();
\r
7489 // Determine whether byte-swaping is necessary.
\r
7490 stream_.doByteSwap[mode] = false;
\r
7491 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7492 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7493 if ( result == 0 )
\r
7494 stream_.doByteSwap[mode] = true;
\r
7495 else if (result < 0) {
\r
7496 snd_pcm_close( phandle );
\r
7497 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7498 errorText_ = errorStream_.str();
\r
7503 // Set the sample rate.
\r
7504 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7505 if ( result < 0 ) {
\r
7506 snd_pcm_close( phandle );
\r
7507 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7508 errorText_ = errorStream_.str();
\r
7512 // Determine the number of channels for this device. We support a possible
\r
7513 // minimum device channel number > than the value requested by the user.
\r
7514 stream_.nUserChannels[mode] = channels;
\r
7515 unsigned int value;
\r
7516 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7517 unsigned int deviceChannels = value;
\r
7518 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7519 snd_pcm_close( phandle );
\r
7520 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7521 errorText_ = errorStream_.str();
\r
7525 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7526 if ( result < 0 ) {
\r
7527 snd_pcm_close( phandle );
\r
7528 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7529 errorText_ = errorStream_.str();
\r
7532 deviceChannels = value;
\r
7533 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7534 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7536 // Set the device channels.
\r
7537 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7538 if ( result < 0 ) {
\r
7539 snd_pcm_close( phandle );
\r
7540 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7541 errorText_ = errorStream_.str();
\r
7545 // Set the buffer (or period) size.
\r
7547 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7548 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7549 if ( result < 0 ) {
\r
7550 snd_pcm_close( phandle );
\r
7551 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7552 errorText_ = errorStream_.str();
\r
7555 *bufferSize = periodSize;
\r
7557 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7558 unsigned int periods = 0;
\r
7559 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7560 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7561 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7562 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7563 if ( result < 0 ) {
\r
7564 snd_pcm_close( phandle );
\r
7565 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7566 errorText_ = errorStream_.str();
\r
7570 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7571 // MUST be the same in both directions!
\r
7572 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7573 snd_pcm_close( phandle );
\r
7574 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7575 errorText_ = errorStream_.str();
\r
7579 stream_.bufferSize = *bufferSize;
\r
7581 // Install the hardware configuration
\r
7582 result = snd_pcm_hw_params( phandle, hw_params );
\r
7583 if ( result < 0 ) {
\r
7584 snd_pcm_close( phandle );
\r
7585 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7586 errorText_ = errorStream_.str();
\r
7590 #if defined(__RTAUDIO_DEBUG__)
\r
7591 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7592 snd_pcm_hw_params_dump( hw_params, out );
\r
7595 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7596 snd_pcm_sw_params_t *sw_params = NULL;
\r
7597 snd_pcm_sw_params_alloca( &sw_params );
\r
7598 snd_pcm_sw_params_current( phandle, sw_params );
\r
7599 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7600 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7601 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7603 // The following two settings were suggested by Theo Veenker
\r
7604 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7605 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7607 // here are two options for a fix
\r
7608 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7609 snd_pcm_uframes_t val;
\r
7610 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7611 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7613 result = snd_pcm_sw_params( phandle, sw_params );
\r
7614 if ( result < 0 ) {
\r
7615 snd_pcm_close( phandle );
\r
7616 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7617 errorText_ = errorStream_.str();
\r
7621 #if defined(__RTAUDIO_DEBUG__)
\r
7622 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7623 snd_pcm_sw_params_dump( sw_params, out );
\r
7626 // Set flags for buffer conversion
\r
7627 stream_.doConvertBuffer[mode] = false;
\r
7628 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7629 stream_.doConvertBuffer[mode] = true;
\r
7630 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7631 stream_.doConvertBuffer[mode] = true;
\r
7632 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7633 stream_.nUserChannels[mode] > 1 )
\r
7634 stream_.doConvertBuffer[mode] = true;
\r
7636 // Allocate the ApiHandle if necessary and then save.
\r
7637 AlsaHandle *apiInfo = 0;
\r
7638 if ( stream_.apiHandle == 0 ) {
\r
7640 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7642 catch ( std::bad_alloc& ) {
\r
7643 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7647 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7648 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7652 stream_.apiHandle = (void *) apiInfo;
\r
7653 apiInfo->handles[0] = 0;
\r
7654 apiInfo->handles[1] = 0;
\r
7657 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7659 apiInfo->handles[mode] = phandle;
\r
7662 // Allocate necessary internal buffers.
\r
7663 unsigned long bufferBytes;
\r
7664 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7665 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7666 if ( stream_.userBuffer[mode] == NULL ) {
\r
7667 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7671 if ( stream_.doConvertBuffer[mode] ) {
\r
7673 bool makeBuffer = true;
\r
7674 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7675 if ( mode == INPUT ) {
\r
7676 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7677 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7678 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7682 if ( makeBuffer ) {
\r
7683 bufferBytes *= *bufferSize;
\r
7684 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7685 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7686 if ( stream_.deviceBuffer == NULL ) {
\r
7687 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7693 stream_.sampleRate = sampleRate;
\r
7694 stream_.nBuffers = periods;
\r
7695 stream_.device[mode] = device;
\r
7696 stream_.state = STREAM_STOPPED;
\r
7698 // Setup the buffer conversion information structure.
\r
7699 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7701 // Setup thread if necessary.
\r
7702 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7703 // We had already set up an output stream.
\r
7704 stream_.mode = DUPLEX;
\r
7705 // Link the streams if possible.
\r
7706 apiInfo->synchronized = false;
\r
7707 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7708 apiInfo->synchronized = true;
\r
7710 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7711 error( RtAudioError::WARNING );
\r
7715 stream_.mode = mode;
\r
7717 // Setup callback thread.
\r
7718 stream_.callbackInfo.object = (void *) this;
\r
7720 // Set the thread attributes for joinable and realtime scheduling
\r
7721 // priority (optional). The higher priority will only take affect
\r
7722 // if the program is run as root or suid. Note, under Linux
\r
7723 // processes with CAP_SYS_NICE privilege, a user can change
\r
7724 // scheduling policy and priority (thus need not be root). See
\r
7725 // POSIX "capabilities".
\r
7726 pthread_attr_t attr;
\r
7727 pthread_attr_init( &attr );
\r
7728 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7730 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7731 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7732 // We previously attempted to increase the audio callback priority
\r
7733 // to SCHED_RR here via the attributes. However, while no errors
\r
7734 // were reported in doing so, it did not work. So, now this is
\r
7735 // done in the alsaCallbackHandler function.
\r
7736 stream_.callbackInfo.doRealtime = true;
\r
7737 int priority = options->priority;
\r
7738 int min = sched_get_priority_min( SCHED_RR );
\r
7739 int max = sched_get_priority_max( SCHED_RR );
\r
7740 if ( priority < min ) priority = min;
\r
7741 else if ( priority > max ) priority = max;
\r
7742 stream_.callbackInfo.priority = priority;
\r
7746 stream_.callbackInfo.isRunning = true;
\r
7747 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7748 pthread_attr_destroy( &attr );
\r
7750 stream_.callbackInfo.isRunning = false;
\r
7751 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7760 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7761 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7762 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7764 stream_.apiHandle = 0;
\r
7767 if ( phandle) snd_pcm_close( phandle );
\r
7769 for ( int i=0; i<2; i++ ) {
\r
7770 if ( stream_.userBuffer[i] ) {
\r
7771 free( stream_.userBuffer[i] );
\r
7772 stream_.userBuffer[i] = 0;
\r
7776 if ( stream_.deviceBuffer ) {
\r
7777 free( stream_.deviceBuffer );
\r
7778 stream_.deviceBuffer = 0;
\r
7781 stream_.state = STREAM_CLOSED;
\r
7785 void RtApiAlsa :: closeStream()
\r
7787 if ( stream_.state == STREAM_CLOSED ) {
\r
7788 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7789 error( RtAudioError::WARNING );
\r
7793 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7794 stream_.callbackInfo.isRunning = false;
\r
7795 MUTEX_LOCK( &stream_.mutex );
\r
7796 if ( stream_.state == STREAM_STOPPED ) {
\r
7797 apiInfo->runnable = true;
\r
7798 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7800 MUTEX_UNLOCK( &stream_.mutex );
\r
7801 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7803 if ( stream_.state == STREAM_RUNNING ) {
\r
7804 stream_.state = STREAM_STOPPED;
\r
7805 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7806 snd_pcm_drop( apiInfo->handles[0] );
\r
7807 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7808 snd_pcm_drop( apiInfo->handles[1] );
\r
7812 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7813 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7814 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7816 stream_.apiHandle = 0;
\r
7819 for ( int i=0; i<2; i++ ) {
\r
7820 if ( stream_.userBuffer[i] ) {
\r
7821 free( stream_.userBuffer[i] );
\r
7822 stream_.userBuffer[i] = 0;
\r
7826 if ( stream_.deviceBuffer ) {
\r
7827 free( stream_.deviceBuffer );
\r
7828 stream_.deviceBuffer = 0;
\r
7831 stream_.mode = UNINITIALIZED;
\r
7832 stream_.state = STREAM_CLOSED;
\r
7835 void RtApiAlsa :: startStream()
\r
7837 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7840 if ( stream_.state == STREAM_RUNNING ) {
\r
7841 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7842 error( RtAudioError::WARNING );
\r
7846 MUTEX_LOCK( &stream_.mutex );
\r
7849 snd_pcm_state_t state;
\r
7850 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7851 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7852 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7853 state = snd_pcm_state( handle[0] );
\r
7854 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7855 result = snd_pcm_prepare( handle[0] );
\r
7856 if ( result < 0 ) {
\r
7857 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7858 errorText_ = errorStream_.str();
\r
7864 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7865 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7866 state = snd_pcm_state( handle[1] );
\r
7867 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7868 result = snd_pcm_prepare( handle[1] );
\r
7869 if ( result < 0 ) {
\r
7870 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7871 errorText_ = errorStream_.str();
\r
7877 stream_.state = STREAM_RUNNING;
\r
7880 apiInfo->runnable = true;
\r
7881 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7882 MUTEX_UNLOCK( &stream_.mutex );
\r
7884 if ( result >= 0 ) return;
\r
7885 error( RtAudioError::SYSTEM_ERROR );
\r
7888 void RtApiAlsa :: stopStream()
\r
7891 if ( stream_.state == STREAM_STOPPED ) {
\r
7892 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7893 error( RtAudioError::WARNING );
\r
7897 stream_.state = STREAM_STOPPED;
\r
7898 MUTEX_LOCK( &stream_.mutex );
\r
7901 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7902 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7903 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7904 if ( apiInfo->synchronized )
\r
7905 result = snd_pcm_drop( handle[0] );
\r
7907 result = snd_pcm_drain( handle[0] );
\r
7908 if ( result < 0 ) {
\r
7909 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7910 errorText_ = errorStream_.str();
\r
7915 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7916 result = snd_pcm_drop( handle[1] );
\r
7917 if ( result < 0 ) {
\r
7918 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7919 errorText_ = errorStream_.str();
\r
7925 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7926 MUTEX_UNLOCK( &stream_.mutex );
\r
7928 if ( result >= 0 ) return;
\r
7929 error( RtAudioError::SYSTEM_ERROR );
\r
7932 void RtApiAlsa :: abortStream()
\r
7935 if ( stream_.state == STREAM_STOPPED ) {
\r
7936 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7937 error( RtAudioError::WARNING );
\r
7941 stream_.state = STREAM_STOPPED;
\r
7942 MUTEX_LOCK( &stream_.mutex );
\r
7945 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7946 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7947 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7948 result = snd_pcm_drop( handle[0] );
\r
7949 if ( result < 0 ) {
\r
7950 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7951 errorText_ = errorStream_.str();
\r
7956 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7957 result = snd_pcm_drop( handle[1] );
\r
7958 if ( result < 0 ) {
\r
7959 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7960 errorText_ = errorStream_.str();
\r
7966 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7967 MUTEX_UNLOCK( &stream_.mutex );
\r
7969 if ( result >= 0 ) return;
\r
7970 error( RtAudioError::SYSTEM_ERROR );
\r
7973 void RtApiAlsa :: callbackEvent()
\r
7975 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7976 if ( stream_.state == STREAM_STOPPED ) {
\r
7977 MUTEX_LOCK( &stream_.mutex );
\r
7978 while ( !apiInfo->runnable )
\r
7979 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7981 if ( stream_.state != STREAM_RUNNING ) {
\r
7982 MUTEX_UNLOCK( &stream_.mutex );
\r
7985 MUTEX_UNLOCK( &stream_.mutex );
\r
7988 if ( stream_.state == STREAM_CLOSED ) {
\r
7989 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7990 error( RtAudioError::WARNING );
\r
7994 int doStopStream = 0;
\r
7995 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7996 double streamTime = getStreamTime();
\r
7997 RtAudioStreamStatus status = 0;
\r
7998 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7999 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
8000 apiInfo->xrun[0] = false;
\r
8002 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
8003 status |= RTAUDIO_INPUT_OVERFLOW;
\r
8004 apiInfo->xrun[1] = false;
\r
8006 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
8007 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
8009 if ( doStopStream == 2 ) {
\r
8014 MUTEX_LOCK( &stream_.mutex );
\r
8016 // The state might change while waiting on a mutex.
\r
8017 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
8022 snd_pcm_t **handle;
\r
8023 snd_pcm_sframes_t frames;
\r
8024 RtAudioFormat format;
\r
8025 handle = (snd_pcm_t **) apiInfo->handles;
\r
8027 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
8029 // Setup parameters.
\r
8030 if ( stream_.doConvertBuffer[1] ) {
\r
8031 buffer = stream_.deviceBuffer;
\r
8032 channels = stream_.nDeviceChannels[1];
\r
8033 format = stream_.deviceFormat[1];
\r
8036 buffer = stream_.userBuffer[1];
\r
8037 channels = stream_.nUserChannels[1];
\r
8038 format = stream_.userFormat;
\r
8041 // Read samples from device in interleaved/non-interleaved format.
\r
8042 if ( stream_.deviceInterleaved[1] )
\r
8043 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
8045 void *bufs[channels];
\r
8046 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8047 for ( int i=0; i<channels; i++ )
\r
8048 bufs[i] = (void *) (buffer + (i * offset));
\r
8049 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
8052 if ( result < (int) stream_.bufferSize ) {
\r
8053 // Either an error or overrun occured.
\r
8054 if ( result == -EPIPE ) {
\r
8055 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
8056 if ( state == SND_PCM_STATE_XRUN ) {
\r
8057 apiInfo->xrun[1] = true;
\r
8058 result = snd_pcm_prepare( handle[1] );
\r
8059 if ( result < 0 ) {
\r
8060 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
8061 errorText_ = errorStream_.str();
\r
8065 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8066 errorText_ = errorStream_.str();
\r
8070 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
8071 errorText_ = errorStream_.str();
\r
8073 error( RtAudioError::WARNING );
\r
8077 // Do byte swapping if necessary.
\r
8078 if ( stream_.doByteSwap[1] )
\r
8079 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8081 // Do buffer conversion if necessary.
\r
8082 if ( stream_.doConvertBuffer[1] )
\r
8083 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8085 // Check stream latency
\r
8086 result = snd_pcm_delay( handle[1], &frames );
\r
8087 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8092 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8094 // Setup parameters and do buffer conversion if necessary.
\r
8095 if ( stream_.doConvertBuffer[0] ) {
\r
8096 buffer = stream_.deviceBuffer;
\r
8097 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8098 channels = stream_.nDeviceChannels[0];
\r
8099 format = stream_.deviceFormat[0];
\r
8102 buffer = stream_.userBuffer[0];
\r
8103 channels = stream_.nUserChannels[0];
\r
8104 format = stream_.userFormat;
\r
8107 // Do byte swapping if necessary.
\r
8108 if ( stream_.doByteSwap[0] )
\r
8109 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8111 // Write samples to device in interleaved/non-interleaved format.
\r
8112 if ( stream_.deviceInterleaved[0] )
\r
8113 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8115 void *bufs[channels];
\r
8116 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8117 for ( int i=0; i<channels; i++ )
\r
8118 bufs[i] = (void *) (buffer + (i * offset));
\r
8119 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8122 if ( result < (int) stream_.bufferSize ) {
\r
8123 // Either an error or underrun occured.
\r
8124 if ( result == -EPIPE ) {
\r
8125 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8126 if ( state == SND_PCM_STATE_XRUN ) {
\r
8127 apiInfo->xrun[0] = true;
\r
8128 result = snd_pcm_prepare( handle[0] );
\r
8129 if ( result < 0 ) {
\r
8130 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8131 errorText_ = errorStream_.str();
\r
8134 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8137 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8138 errorText_ = errorStream_.str();
\r
8142 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8143 errorText_ = errorStream_.str();
\r
8145 error( RtAudioError::WARNING );
\r
8149 // Check stream latency
\r
8150 result = snd_pcm_delay( handle[0], &frames );
\r
8151 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8155 MUTEX_UNLOCK( &stream_.mutex );
\r
8157 RtApi::tickStreamTime();
\r
8158 if ( doStopStream == 1 ) this->stopStream();
\r
8161 static void *alsaCallbackHandler( void *ptr )
\r
8163 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8164 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8165 bool *isRunning = &info->isRunning;
\r
8167 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8168 if ( info->doRealtime ) {
\r
8169 pthread_t tID = pthread_self(); // ID of this thread
\r
8170 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8171 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8175 while ( *isRunning == true ) {
\r
8176 pthread_testcancel();
\r
8177 object->callbackEvent();
\r
8180 pthread_exit( NULL );
\r
8183 //******************** End of __LINUX_ALSA__ *********************//
\r
8186 #if defined(__LINUX_PULSE__)
\r
8188 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8189 // and Tristan Matthews.
\r
8191 #include <pulse/error.h>
\r
8192 #include <pulse/simple.h>
\r
8195 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8196 44100, 48000, 96000, 0};
\r
8198 struct rtaudio_pa_format_mapping_t {
\r
8199 RtAudioFormat rtaudio_format;
\r
8200 pa_sample_format_t pa_format;
\r
8203 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8204 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8205 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8206 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8207 {0, PA_SAMPLE_INVALID}};
\r
8209 struct PulseAudioHandle {
\r
8210 pa_simple *s_play;
\r
8213 pthread_cond_t runnable_cv;
\r
8215 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8218 RtApiPulse::~RtApiPulse()
\r
8220 if ( stream_.state != STREAM_CLOSED )
\r
8224 unsigned int RtApiPulse::getDeviceCount( void )
\r
8229 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8231 RtAudio::DeviceInfo info;
\r
8232 info.probed = true;
\r
8233 info.name = "PulseAudio";
\r
8234 info.outputChannels = 2;
\r
8235 info.inputChannels = 2;
\r
8236 info.duplexChannels = 2;
\r
8237 info.isDefaultOutput = true;
\r
8238 info.isDefaultInput = true;
\r
8240 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8241 info.sampleRates.push_back( *sr );
\r
8243 info.preferredSampleRate = 48000;
\r
8244 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8249 static void *pulseaudio_callback( void * user )
\r
8251 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8252 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8253 volatile bool *isRunning = &cbi->isRunning;
\r
8255 while ( *isRunning ) {
\r
8256 pthread_testcancel();
\r
8257 context->callbackEvent();
\r
8260 pthread_exit( NULL );
\r
8263 void RtApiPulse::closeStream( void )
\r
8265 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8267 stream_.callbackInfo.isRunning = false;
\r
8269 MUTEX_LOCK( &stream_.mutex );
\r
8270 if ( stream_.state == STREAM_STOPPED ) {
\r
8271 pah->runnable = true;
\r
8272 pthread_cond_signal( &pah->runnable_cv );
\r
8274 MUTEX_UNLOCK( &stream_.mutex );
\r
8276 pthread_join( pah->thread, 0 );
\r
8277 if ( pah->s_play ) {
\r
8278 pa_simple_flush( pah->s_play, NULL );
\r
8279 pa_simple_free( pah->s_play );
\r
8282 pa_simple_free( pah->s_rec );
\r
8284 pthread_cond_destroy( &pah->runnable_cv );
\r
8286 stream_.apiHandle = 0;
\r
8289 if ( stream_.userBuffer[0] ) {
\r
8290 free( stream_.userBuffer[0] );
\r
8291 stream_.userBuffer[0] = 0;
\r
8293 if ( stream_.userBuffer[1] ) {
\r
8294 free( stream_.userBuffer[1] );
\r
8295 stream_.userBuffer[1] = 0;
\r
8298 stream_.state = STREAM_CLOSED;
\r
8299 stream_.mode = UNINITIALIZED;
\r
8302 void RtApiPulse::callbackEvent( void )
\r
8304 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8306 if ( stream_.state == STREAM_STOPPED ) {
\r
8307 MUTEX_LOCK( &stream_.mutex );
\r
8308 while ( !pah->runnable )
\r
8309 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8311 if ( stream_.state != STREAM_RUNNING ) {
\r
8312 MUTEX_UNLOCK( &stream_.mutex );
\r
8315 MUTEX_UNLOCK( &stream_.mutex );
\r
8318 if ( stream_.state == STREAM_CLOSED ) {
\r
8319 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8320 "this shouldn't happen!";
\r
8321 error( RtAudioError::WARNING );
\r
8325 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8326 double streamTime = getStreamTime();
\r
8327 RtAudioStreamStatus status = 0;
\r
8328 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8329 stream_.bufferSize, streamTime, status,
\r
8330 stream_.callbackInfo.userData );
\r
8332 if ( doStopStream == 2 ) {
\r
8337 MUTEX_LOCK( &stream_.mutex );
\r
8338 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8339 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8341 if ( stream_.state != STREAM_RUNNING )
\r
8346 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8347 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8348 convertBuffer( stream_.deviceBuffer,
\r
8349 stream_.userBuffer[OUTPUT],
\r
8350 stream_.convertInfo[OUTPUT] );
\r
8351 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8352 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8354 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8355 formatBytes( stream_.userFormat );
\r
8357 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8358 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8359 pa_strerror( pa_error ) << ".";
\r
8360 errorText_ = errorStream_.str();
\r
8361 error( RtAudioError::WARNING );
\r
8365 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8366 if ( stream_.doConvertBuffer[INPUT] )
\r
8367 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8368 formatBytes( stream_.deviceFormat[INPUT] );
\r
8370 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8371 formatBytes( stream_.userFormat );
\r
8373 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8374 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8375 pa_strerror( pa_error ) << ".";
\r
8376 errorText_ = errorStream_.str();
\r
8377 error( RtAudioError::WARNING );
\r
8379 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8380 convertBuffer( stream_.userBuffer[INPUT],
\r
8381 stream_.deviceBuffer,
\r
8382 stream_.convertInfo[INPUT] );
\r
8387 MUTEX_UNLOCK( &stream_.mutex );
\r
8388 RtApi::tickStreamTime();
\r
8390 if ( doStopStream == 1 )
\r
8394 void RtApiPulse::startStream( void )
\r
8396 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8398 if ( stream_.state == STREAM_CLOSED ) {
\r
8399 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8400 error( RtAudioError::INVALID_USE );
\r
8403 if ( stream_.state == STREAM_RUNNING ) {
\r
8404 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8405 error( RtAudioError::WARNING );
\r
8409 MUTEX_LOCK( &stream_.mutex );
\r
8411 stream_.state = STREAM_RUNNING;
\r
8413 pah->runnable = true;
\r
8414 pthread_cond_signal( &pah->runnable_cv );
\r
8415 MUTEX_UNLOCK( &stream_.mutex );
\r
8418 void RtApiPulse::stopStream( void )
\r
8420 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8422 if ( stream_.state == STREAM_CLOSED ) {
\r
8423 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8424 error( RtAudioError::INVALID_USE );
\r
8427 if ( stream_.state == STREAM_STOPPED ) {
\r
8428 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8429 error( RtAudioError::WARNING );
\r
8433 stream_.state = STREAM_STOPPED;
\r
8434 MUTEX_LOCK( &stream_.mutex );
\r
8436 if ( pah && pah->s_play ) {
\r
8438 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8439 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8440 pa_strerror( pa_error ) << ".";
\r
8441 errorText_ = errorStream_.str();
\r
8442 MUTEX_UNLOCK( &stream_.mutex );
\r
8443 error( RtAudioError::SYSTEM_ERROR );
\r
8448 stream_.state = STREAM_STOPPED;
\r
8449 MUTEX_UNLOCK( &stream_.mutex );
\r
8452 void RtApiPulse::abortStream( void )
\r
8454 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8456 if ( stream_.state == STREAM_CLOSED ) {
\r
8457 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8458 error( RtAudioError::INVALID_USE );
\r
8461 if ( stream_.state == STREAM_STOPPED ) {
\r
8462 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8463 error( RtAudioError::WARNING );
\r
8467 stream_.state = STREAM_STOPPED;
\r
8468 MUTEX_LOCK( &stream_.mutex );
\r
8470 if ( pah && pah->s_play ) {
\r
8472 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8473 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8474 pa_strerror( pa_error ) << ".";
\r
8475 errorText_ = errorStream_.str();
\r
8476 MUTEX_UNLOCK( &stream_.mutex );
\r
8477 error( RtAudioError::SYSTEM_ERROR );
\r
8482 stream_.state = STREAM_STOPPED;
\r
8483 MUTEX_UNLOCK( &stream_.mutex );
\r
8486 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8487 unsigned int channels, unsigned int firstChannel,
\r
8488 unsigned int sampleRate, RtAudioFormat format,
\r
8489 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8491 PulseAudioHandle *pah = 0;
\r
8492 unsigned long bufferBytes = 0;
\r
8493 pa_sample_spec ss;
\r
8495 if ( device != 0 ) return false;
\r
8496 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8497 if ( channels != 1 && channels != 2 ) {
\r
8498 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8501 ss.channels = channels;
\r
8503 if ( firstChannel != 0 ) return false;
\r
8505 bool sr_found = false;
\r
8506 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8507 if ( sampleRate == *sr ) {
\r
8509 stream_.sampleRate = sampleRate;
\r
8510 ss.rate = sampleRate;
\r
8514 if ( !sr_found ) {
\r
8515 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8519 bool sf_found = 0;
\r
8520 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8521 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8522 if ( format == sf->rtaudio_format ) {
\r
8524 stream_.userFormat = sf->rtaudio_format;
\r
8525 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8526 ss.format = sf->pa_format;
\r
8530 if ( !sf_found ) { // Use internal data format conversion.
\r
8531 stream_.userFormat = format;
\r
8532 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8533 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8536 // Set other stream parameters.
\r
8537 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8538 else stream_.userInterleaved = true;
\r
8539 stream_.deviceInterleaved[mode] = true;
\r
8540 stream_.nBuffers = 1;
\r
8541 stream_.doByteSwap[mode] = false;
\r
8542 stream_.nUserChannels[mode] = channels;
\r
8543 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8544 stream_.channelOffset[mode] = 0;
\r
8545 std::string streamName = "RtAudio";
\r
8547 // Set flags for buffer conversion.
\r
8548 stream_.doConvertBuffer[mode] = false;
\r
8549 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8550 stream_.doConvertBuffer[mode] = true;
\r
8551 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8552 stream_.doConvertBuffer[mode] = true;
\r
8554 // Allocate necessary internal buffers.
\r
8555 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8556 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8557 if ( stream_.userBuffer[mode] == NULL ) {
\r
8558 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8561 stream_.bufferSize = *bufferSize;
\r
8563 if ( stream_.doConvertBuffer[mode] ) {
\r
8565 bool makeBuffer = true;
\r
8566 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8567 if ( mode == INPUT ) {
\r
8568 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8569 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8570 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8574 if ( makeBuffer ) {
\r
8575 bufferBytes *= *bufferSize;
\r
8576 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8577 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8578 if ( stream_.deviceBuffer == NULL ) {
\r
8579 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8585 stream_.device[mode] = device;
\r
8587 // Setup the buffer conversion information structure.
\r
8588 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8590 if ( !stream_.apiHandle ) {
\r
8591 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8593 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8597 stream_.apiHandle = pah;
\r
8598 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8599 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8603 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8606 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8609 pa_buffer_attr buffer_attr;
\r
8610 buffer_attr.fragsize = bufferBytes;
\r
8611 buffer_attr.maxlength = -1;
\r
8613 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8614 if ( !pah->s_rec ) {
\r
8615 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8620 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8621 if ( !pah->s_play ) {
\r
8622 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8630 if ( stream_.mode == UNINITIALIZED )
\r
8631 stream_.mode = mode;
\r
8632 else if ( stream_.mode == mode )
\r
8635 stream_.mode = DUPLEX;
\r
8637 if ( !stream_.callbackInfo.isRunning ) {
\r
8638 stream_.callbackInfo.object = this;
\r
8639 stream_.callbackInfo.isRunning = true;
\r
8640 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8641 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8646 stream_.state = STREAM_STOPPED;
\r
8650 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8651 pthread_cond_destroy( &pah->runnable_cv );
\r
8653 stream_.apiHandle = 0;
\r
8656 for ( int i=0; i<2; i++ ) {
\r
8657 if ( stream_.userBuffer[i] ) {
\r
8658 free( stream_.userBuffer[i] );
\r
8659 stream_.userBuffer[i] = 0;
\r
8663 if ( stream_.deviceBuffer ) {
\r
8664 free( stream_.deviceBuffer );
\r
8665 stream_.deviceBuffer = 0;
\r
8671 //******************** End of __LINUX_PULSE__ *********************//
\r
8674 #if defined(__LINUX_OSS__)
\r
8676 #include <unistd.h>
\r
8677 #include <sys/ioctl.h>
\r
8678 #include <unistd.h>
\r
8679 #include <fcntl.h>
\r
8680 #include <sys/soundcard.h>
\r
8681 #include <errno.h>
\r
8684 static void *ossCallbackHandler(void * ptr);
\r
8686 // A structure to hold various information related to the OSS API
\r
8687 // implementation.
\r
8688 struct OssHandle {
\r
8689 int id[2]; // device ids
\r
8692 pthread_cond_t runnable;
\r
8695 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8698 RtApiOss :: RtApiOss()
\r
8700 // Nothing to do here.
\r
8703 RtApiOss :: ~RtApiOss()
\r
8705 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8708 unsigned int RtApiOss :: getDeviceCount( void )
\r
8710 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8711 if ( mixerfd == -1 ) {
\r
8712 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8713 error( RtAudioError::WARNING );
\r
8717 oss_sysinfo sysinfo;
\r
8718 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8720 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8721 error( RtAudioError::WARNING );
\r
8726 return sysinfo.numaudios;
\r
8729 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8731 RtAudio::DeviceInfo info;
\r
8732 info.probed = false;
\r
8734 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8735 if ( mixerfd == -1 ) {
\r
8736 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8737 error( RtAudioError::WARNING );
\r
8741 oss_sysinfo sysinfo;
\r
8742 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8743 if ( result == -1 ) {
\r
8745 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8746 error( RtAudioError::WARNING );
\r
8750 unsigned nDevices = sysinfo.numaudios;
\r
8751 if ( nDevices == 0 ) {
\r
8753 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8754 error( RtAudioError::INVALID_USE );
\r
8758 if ( device >= nDevices ) {
\r
8760 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8761 error( RtAudioError::INVALID_USE );
\r
8765 oss_audioinfo ainfo;
\r
8766 ainfo.dev = device;
\r
8767 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8769 if ( result == -1 ) {
\r
8770 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8771 errorText_ = errorStream_.str();
\r
8772 error( RtAudioError::WARNING );
\r
8777 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8778 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8779 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8780 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8781 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8784 // Probe data formats ... do for input
\r
8785 unsigned long mask = ainfo.iformats;
\r
8786 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8787 info.nativeFormats |= RTAUDIO_SINT16;
\r
8788 if ( mask & AFMT_S8 )
\r
8789 info.nativeFormats |= RTAUDIO_SINT8;
\r
8790 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8791 info.nativeFormats |= RTAUDIO_SINT32;
\r
8792 if ( mask & AFMT_FLOAT )
\r
8793 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8794 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8795 info.nativeFormats |= RTAUDIO_SINT24;
\r
8797 // Check that we have at least one supported format
\r
8798 if ( info.nativeFormats == 0 ) {
\r
8799 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8800 errorText_ = errorStream_.str();
\r
8801 error( RtAudioError::WARNING );
\r
8805 // Probe the supported sample rates.
\r
8806 info.sampleRates.clear();
\r
8807 if ( ainfo.nrates ) {
\r
8808 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8809 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8810 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8811 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8813 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8814 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8822 // Check min and max rate values;
\r
8823 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8824 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8825 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8827 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8828 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8833 if ( info.sampleRates.size() == 0 ) {
\r
8834 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8835 errorText_ = errorStream_.str();
\r
8836 error( RtAudioError::WARNING );
\r
8839 info.probed = true;
\r
8840 info.name = ainfo.name;
\r
8847 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8848 unsigned int firstChannel, unsigned int sampleRate,
\r
8849 RtAudioFormat format, unsigned int *bufferSize,
\r
8850 RtAudio::StreamOptions *options )
\r
8852 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8853 if ( mixerfd == -1 ) {
\r
8854 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8858 oss_sysinfo sysinfo;
\r
8859 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8860 if ( result == -1 ) {
\r
8862 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8866 unsigned nDevices = sysinfo.numaudios;
\r
8867 if ( nDevices == 0 ) {
\r
8868 // This should not happen because a check is made before this function is called.
\r
8870 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8874 if ( device >= nDevices ) {
\r
8875 // This should not happen because a check is made before this function is called.
\r
8877 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8881 oss_audioinfo ainfo;
\r
8882 ainfo.dev = device;
\r
8883 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8885 if ( result == -1 ) {
\r
8886 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8887 errorText_ = errorStream_.str();
\r
8891 // Check if device supports input or output
\r
8892 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8893 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8894 if ( mode == OUTPUT )
\r
8895 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8897 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8898 errorText_ = errorStream_.str();
\r
8903 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8904 if ( mode == OUTPUT )
\r
8905 flags |= O_WRONLY;
\r
8906 else { // mode == INPUT
\r
8907 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8908 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8909 close( handle->id[0] );
\r
8910 handle->id[0] = 0;
\r
8911 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8912 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8913 errorText_ = errorStream_.str();
\r
8916 // Check that the number previously set channels is the same.
\r
8917 if ( stream_.nUserChannels[0] != channels ) {
\r
8918 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8919 errorText_ = errorStream_.str();
\r
8925 flags |= O_RDONLY;
\r
8928 // Set exclusive access if specified.
\r
8929 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8931 // Try to open the device.
\r
8933 fd = open( ainfo.devnode, flags, 0 );
\r
8935 if ( errno == EBUSY )
\r
8936 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8938 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8939 errorText_ = errorStream_.str();
\r
8943 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8945 if ( flags | O_RDWR ) {
\r
8946 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8947 if ( result == -1) {
\r
8948 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8949 errorText_ = errorStream_.str();
\r
8955 // Check the device channel support.
\r
8956 stream_.nUserChannels[mode] = channels;
\r
8957 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8959 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8960 errorText_ = errorStream_.str();
\r
8964 // Set the number of channels.
\r
8965 int deviceChannels = channels + firstChannel;
\r
8966 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8967 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8969 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8970 errorText_ = errorStream_.str();
\r
8973 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8975 // Get the data format mask
\r
8977 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8978 if ( result == -1 ) {
\r
8980 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8981 errorText_ = errorStream_.str();
\r
8985 // Determine how to set the device format.
\r
8986 stream_.userFormat = format;
\r
8987 int deviceFormat = -1;
\r
8988 stream_.doByteSwap[mode] = false;
\r
8989 if ( format == RTAUDIO_SINT8 ) {
\r
8990 if ( mask & AFMT_S8 ) {
\r
8991 deviceFormat = AFMT_S8;
\r
8992 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8995 else if ( format == RTAUDIO_SINT16 ) {
\r
8996 if ( mask & AFMT_S16_NE ) {
\r
8997 deviceFormat = AFMT_S16_NE;
\r
8998 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9000 else if ( mask & AFMT_S16_OE ) {
\r
9001 deviceFormat = AFMT_S16_OE;
\r
9002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9003 stream_.doByteSwap[mode] = true;
\r
9006 else if ( format == RTAUDIO_SINT24 ) {
\r
9007 if ( mask & AFMT_S24_NE ) {
\r
9008 deviceFormat = AFMT_S24_NE;
\r
9009 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9011 else if ( mask & AFMT_S24_OE ) {
\r
9012 deviceFormat = AFMT_S24_OE;
\r
9013 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9014 stream_.doByteSwap[mode] = true;
\r
9017 else if ( format == RTAUDIO_SINT32 ) {
\r
9018 if ( mask & AFMT_S32_NE ) {
\r
9019 deviceFormat = AFMT_S32_NE;
\r
9020 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9022 else if ( mask & AFMT_S32_OE ) {
\r
9023 deviceFormat = AFMT_S32_OE;
\r
9024 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9025 stream_.doByteSwap[mode] = true;
\r
9029 if ( deviceFormat == -1 ) {
\r
9030 // The user requested format is not natively supported by the device.
\r
9031 if ( mask & AFMT_S16_NE ) {
\r
9032 deviceFormat = AFMT_S16_NE;
\r
9033 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9035 else if ( mask & AFMT_S32_NE ) {
\r
9036 deviceFormat = AFMT_S32_NE;
\r
9037 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9039 else if ( mask & AFMT_S24_NE ) {
\r
9040 deviceFormat = AFMT_S24_NE;
\r
9041 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9043 else if ( mask & AFMT_S16_OE ) {
\r
9044 deviceFormat = AFMT_S16_OE;
\r
9045 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9046 stream_.doByteSwap[mode] = true;
\r
9048 else if ( mask & AFMT_S32_OE ) {
\r
9049 deviceFormat = AFMT_S32_OE;
\r
9050 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9051 stream_.doByteSwap[mode] = true;
\r
9053 else if ( mask & AFMT_S24_OE ) {
\r
9054 deviceFormat = AFMT_S24_OE;
\r
9055 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9056 stream_.doByteSwap[mode] = true;
\r
9058 else if ( mask & AFMT_S8) {
\r
9059 deviceFormat = AFMT_S8;
\r
9060 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9064 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9065 // This really shouldn't happen ...
\r
9067 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9068 errorText_ = errorStream_.str();
\r
9072 // Set the data format.
\r
9073 int temp = deviceFormat;
\r
9074 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9075 if ( result == -1 || deviceFormat != temp ) {
\r
9077 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9078 errorText_ = errorStream_.str();
\r
9082 // Attempt to set the buffer size. According to OSS, the minimum
\r
9083 // number of buffers is two. The supposed minimum buffer size is 16
\r
9084 // bytes, so that will be our lower bound. The argument to this
\r
9085 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9086 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9087 // We'll check the actual value used near the end of the setup
\r
9089 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9090 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9092 if ( options ) buffers = options->numberOfBuffers;
\r
9093 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9094 if ( buffers < 2 ) buffers = 3;
\r
9095 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9096 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9097 if ( result == -1 ) {
\r
9099 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9100 errorText_ = errorStream_.str();
\r
9103 stream_.nBuffers = buffers;
\r
9105 // Save buffer size (in sample frames).
\r
9106 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9107 stream_.bufferSize = *bufferSize;
\r
9109 // Set the sample rate.
\r
9110 int srate = sampleRate;
\r
9111 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9112 if ( result == -1 ) {
\r
9114 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9115 errorText_ = errorStream_.str();
\r
9119 // Verify the sample rate setup worked.
\r
9120 if ( abs( srate - sampleRate ) > 100 ) {
\r
9122 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9123 errorText_ = errorStream_.str();
\r
9126 stream_.sampleRate = sampleRate;
\r
9128 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9129 // We're doing duplex setup here.
\r
9130 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9131 stream_.nDeviceChannels[0] = deviceChannels;
\r
9134 // Set interleaving parameters.
\r
9135 stream_.userInterleaved = true;
\r
9136 stream_.deviceInterleaved[mode] = true;
\r
9137 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9138 stream_.userInterleaved = false;
\r
9140 // Set flags for buffer conversion
\r
9141 stream_.doConvertBuffer[mode] = false;
\r
9142 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9143 stream_.doConvertBuffer[mode] = true;
\r
9144 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9145 stream_.doConvertBuffer[mode] = true;
\r
9146 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9147 stream_.nUserChannels[mode] > 1 )
\r
9148 stream_.doConvertBuffer[mode] = true;
\r
9150 // Allocate the stream handles if necessary and then save.
\r
9151 if ( stream_.apiHandle == 0 ) {
\r
9153 handle = new OssHandle;
\r
9155 catch ( std::bad_alloc& ) {
\r
9156 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9160 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9161 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9165 stream_.apiHandle = (void *) handle;
\r
9168 handle = (OssHandle *) stream_.apiHandle;
\r
9170 handle->id[mode] = fd;
\r
9172 // Allocate necessary internal buffers.
\r
9173 unsigned long bufferBytes;
\r
9174 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9175 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9176 if ( stream_.userBuffer[mode] == NULL ) {
\r
9177 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9181 if ( stream_.doConvertBuffer[mode] ) {
\r
9183 bool makeBuffer = true;
\r
9184 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9185 if ( mode == INPUT ) {
\r
9186 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9187 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9188 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9192 if ( makeBuffer ) {
\r
9193 bufferBytes *= *bufferSize;
\r
9194 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9195 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9196 if ( stream_.deviceBuffer == NULL ) {
\r
9197 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9203 stream_.device[mode] = device;
\r
9204 stream_.state = STREAM_STOPPED;
\r
9206 // Setup the buffer conversion information structure.
\r
9207 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9209 // Setup thread if necessary.
\r
9210 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9211 // We had already set up an output stream.
\r
9212 stream_.mode = DUPLEX;
\r
9213 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9216 stream_.mode = mode;
\r
9218 // Setup callback thread.
\r
9219 stream_.callbackInfo.object = (void *) this;
\r
9221 // Set the thread attributes for joinable and realtime scheduling
\r
9222 // priority. The higher priority will only take affect if the
\r
9223 // program is run as root or suid.
\r
9224 pthread_attr_t attr;
\r
9225 pthread_attr_init( &attr );
\r
9226 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9227 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9228 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9229 struct sched_param param;
\r
9230 int priority = options->priority;
\r
9231 int min = sched_get_priority_min( SCHED_RR );
\r
9232 int max = sched_get_priority_max( SCHED_RR );
\r
9233 if ( priority < min ) priority = min;
\r
9234 else if ( priority > max ) priority = max;
\r
9235 param.sched_priority = priority;
\r
9236 pthread_attr_setschedparam( &attr, ¶m );
\r
9237 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9240 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9242 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9245 stream_.callbackInfo.isRunning = true;
\r
9246 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9247 pthread_attr_destroy( &attr );
\r
9249 stream_.callbackInfo.isRunning = false;
\r
9250 errorText_ = "RtApiOss::error creating callback thread!";
\r
9259 pthread_cond_destroy( &handle->runnable );
\r
9260 if ( handle->id[0] ) close( handle->id[0] );
\r
9261 if ( handle->id[1] ) close( handle->id[1] );
\r
9263 stream_.apiHandle = 0;
\r
9266 for ( int i=0; i<2; i++ ) {
\r
9267 if ( stream_.userBuffer[i] ) {
\r
9268 free( stream_.userBuffer[i] );
\r
9269 stream_.userBuffer[i] = 0;
\r
9273 if ( stream_.deviceBuffer ) {
\r
9274 free( stream_.deviceBuffer );
\r
9275 stream_.deviceBuffer = 0;
\r
9281 void RtApiOss :: closeStream()
\r
9283 if ( stream_.state == STREAM_CLOSED ) {
\r
9284 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9285 error( RtAudioError::WARNING );
\r
9289 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9290 stream_.callbackInfo.isRunning = false;
\r
9291 MUTEX_LOCK( &stream_.mutex );
\r
9292 if ( stream_.state == STREAM_STOPPED )
\r
9293 pthread_cond_signal( &handle->runnable );
\r
9294 MUTEX_UNLOCK( &stream_.mutex );
\r
9295 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9297 if ( stream_.state == STREAM_RUNNING ) {
\r
9298 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9299 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9301 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9302 stream_.state = STREAM_STOPPED;
\r
9306 pthread_cond_destroy( &handle->runnable );
\r
9307 if ( handle->id[0] ) close( handle->id[0] );
\r
9308 if ( handle->id[1] ) close( handle->id[1] );
\r
9310 stream_.apiHandle = 0;
\r
9313 for ( int i=0; i<2; i++ ) {
\r
9314 if ( stream_.userBuffer[i] ) {
\r
9315 free( stream_.userBuffer[i] );
\r
9316 stream_.userBuffer[i] = 0;
\r
9320 if ( stream_.deviceBuffer ) {
\r
9321 free( stream_.deviceBuffer );
\r
9322 stream_.deviceBuffer = 0;
\r
9325 stream_.mode = UNINITIALIZED;
\r
9326 stream_.state = STREAM_CLOSED;
\r
9329 void RtApiOss :: startStream()
\r
9332 if ( stream_.state == STREAM_RUNNING ) {
\r
9333 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9334 error( RtAudioError::WARNING );
\r
9338 MUTEX_LOCK( &stream_.mutex );
\r
9340 stream_.state = STREAM_RUNNING;
\r
9342 // No need to do anything else here ... OSS automatically starts
\r
9343 // when fed samples.
\r
9345 MUTEX_UNLOCK( &stream_.mutex );
\r
9347 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9348 pthread_cond_signal( &handle->runnable );
\r
9351 void RtApiOss :: stopStream()
\r
9354 if ( stream_.state == STREAM_STOPPED ) {
\r
9355 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9356 error( RtAudioError::WARNING );
\r
9360 MUTEX_LOCK( &stream_.mutex );
\r
9362 // The state might change while waiting on a mutex.
\r
9363 if ( stream_.state == STREAM_STOPPED ) {
\r
9364 MUTEX_UNLOCK( &stream_.mutex );
\r
9369 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9370 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9372 // Flush the output with zeros a few times.
\r
9375 RtAudioFormat format;
\r
9377 if ( stream_.doConvertBuffer[0] ) {
\r
9378 buffer = stream_.deviceBuffer;
\r
9379 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9380 format = stream_.deviceFormat[0];
\r
9383 buffer = stream_.userBuffer[0];
\r
9384 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9385 format = stream_.userFormat;
\r
9388 memset( buffer, 0, samples * formatBytes(format) );
\r
9389 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9390 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9391 if ( result == -1 ) {
\r
9392 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9393 error( RtAudioError::WARNING );
\r
9397 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9398 if ( result == -1 ) {
\r
9399 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9400 errorText_ = errorStream_.str();
\r
9403 handle->triggered = false;
\r
9406 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9407 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9408 if ( result == -1 ) {
\r
9409 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9410 errorText_ = errorStream_.str();
\r
9416 stream_.state = STREAM_STOPPED;
\r
9417 MUTEX_UNLOCK( &stream_.mutex );
\r
9419 if ( result != -1 ) return;
\r
9420 error( RtAudioError::SYSTEM_ERROR );
\r
9423 void RtApiOss :: abortStream()
\r
9426 if ( stream_.state == STREAM_STOPPED ) {
\r
9427 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9428 error( RtAudioError::WARNING );
\r
9432 MUTEX_LOCK( &stream_.mutex );
\r
9434 // The state might change while waiting on a mutex.
\r
9435 if ( stream_.state == STREAM_STOPPED ) {
\r
9436 MUTEX_UNLOCK( &stream_.mutex );
\r
9441 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9442 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9443 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9444 if ( result == -1 ) {
\r
9445 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9446 errorText_ = errorStream_.str();
\r
9449 handle->triggered = false;
\r
9452 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9453 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9454 if ( result == -1 ) {
\r
9455 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9456 errorText_ = errorStream_.str();
\r
9462 stream_.state = STREAM_STOPPED;
\r
9463 MUTEX_UNLOCK( &stream_.mutex );
\r
9465 if ( result != -1 ) return;
\r
9466 error( RtAudioError::SYSTEM_ERROR );
\r
9469 void RtApiOss :: callbackEvent()
\r
9471 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9472 if ( stream_.state == STREAM_STOPPED ) {
\r
9473 MUTEX_LOCK( &stream_.mutex );
\r
9474 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9475 if ( stream_.state != STREAM_RUNNING ) {
\r
9476 MUTEX_UNLOCK( &stream_.mutex );
\r
9479 MUTEX_UNLOCK( &stream_.mutex );
\r
9482 if ( stream_.state == STREAM_CLOSED ) {
\r
9483 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9484 error( RtAudioError::WARNING );
\r
9488 // Invoke user callback to get fresh output data.
\r
9489 int doStopStream = 0;
\r
9490 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9491 double streamTime = getStreamTime();
\r
9492 RtAudioStreamStatus status = 0;
\r
9493 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9494 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9495 handle->xrun[0] = false;
\r
9497 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9498 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9499 handle->xrun[1] = false;
\r
9501 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9502 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9503 if ( doStopStream == 2 ) {
\r
9504 this->abortStream();
\r
9508 MUTEX_LOCK( &stream_.mutex );
\r
9510 // The state might change while waiting on a mutex.
\r
9511 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9516 RtAudioFormat format;
\r
9518 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9520 // Setup parameters and do buffer conversion if necessary.
\r
9521 if ( stream_.doConvertBuffer[0] ) {
\r
9522 buffer = stream_.deviceBuffer;
\r
9523 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9524 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9525 format = stream_.deviceFormat[0];
\r
9528 buffer = stream_.userBuffer[0];
\r
9529 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9530 format = stream_.userFormat;
\r
9533 // Do byte swapping if necessary.
\r
9534 if ( stream_.doByteSwap[0] )
\r
9535 byteSwapBuffer( buffer, samples, format );
\r
9537 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9539 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9540 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9541 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9542 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9543 handle->triggered = true;
\r
9546 // Write samples to device.
\r
9547 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9549 if ( result == -1 ) {
\r
9550 // We'll assume this is an underrun, though there isn't a
\r
9551 // specific means for determining that.
\r
9552 handle->xrun[0] = true;
\r
9553 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9554 error( RtAudioError::WARNING );
\r
9555 // Continue on to input section.
\r
9559 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9561 // Setup parameters.
\r
9562 if ( stream_.doConvertBuffer[1] ) {
\r
9563 buffer = stream_.deviceBuffer;
\r
9564 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9565 format = stream_.deviceFormat[1];
\r
9568 buffer = stream_.userBuffer[1];
\r
9569 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9570 format = stream_.userFormat;
\r
9573 // Read samples from device.
\r
9574 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9576 if ( result == -1 ) {
\r
9577 // We'll assume this is an overrun, though there isn't a
\r
9578 // specific means for determining that.
\r
9579 handle->xrun[1] = true;
\r
9580 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9581 error( RtAudioError::WARNING );
\r
9585 // Do byte swapping if necessary.
\r
9586 if ( stream_.doByteSwap[1] )
\r
9587 byteSwapBuffer( buffer, samples, format );
\r
9589 // Do buffer conversion if necessary.
\r
9590 if ( stream_.doConvertBuffer[1] )
\r
9591 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9595 MUTEX_UNLOCK( &stream_.mutex );
\r
9597 RtApi::tickStreamTime();
\r
9598 if ( doStopStream == 1 ) this->stopStream();
\r
9601 static void *ossCallbackHandler( void *ptr )
\r
9603 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9604 RtApiOss *object = (RtApiOss *) info->object;
\r
9605 bool *isRunning = &info->isRunning;
\r
9607 while ( *isRunning == true ) {
\r
9608 pthread_testcancel();
\r
9609 object->callbackEvent();
\r
9612 pthread_exit( NULL );
\r
9615 //******************** End of __LINUX_OSS__ *********************//
\r
9619 // *************************************************** //
\r
9621 // Protected common (OS-independent) RtAudio methods.
\r
9623 // *************************************************** //
\r
9625 // This method can be modified to control the behavior of error
\r
9626 // message printing.
\r
9627 void RtApi :: error( RtAudioError::Type type )
\r
9629 errorStream_.str(""); // clear the ostringstream
\r
9631 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9632 if ( errorCallback ) {
\r
9633 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9635 if ( firstErrorOccurred_ )
\r
9638 firstErrorOccurred_ = true;
\r
9639 const std::string errorMessage = errorText_;
\r
9641 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9642 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9646 errorCallback( type, errorMessage );
\r
9647 firstErrorOccurred_ = false;
\r
9651 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9652 std::cerr << '\n' << errorText_ << "\n\n";
\r
9653 else if ( type != RtAudioError::WARNING )
\r
9654 throw( RtAudioError( errorText_, type ) );
\r
9657 void RtApi :: verifyStream()
\r
9659 if ( stream_.state == STREAM_CLOSED ) {
\r
9660 errorText_ = "RtApi:: a stream is not open!";
\r
9661 error( RtAudioError::INVALID_USE );
\r
9665 void RtApi :: clearStreamInfo()
\r
9667 stream_.mode = UNINITIALIZED;
\r
9668 stream_.state = STREAM_CLOSED;
\r
9669 stream_.sampleRate = 0;
\r
9670 stream_.bufferSize = 0;
\r
9671 stream_.nBuffers = 0;
\r
9672 stream_.userFormat = 0;
\r
9673 stream_.userInterleaved = true;
\r
9674 stream_.streamTime = 0.0;
\r
9675 stream_.apiHandle = 0;
\r
9676 stream_.deviceBuffer = 0;
\r
9677 stream_.callbackInfo.callback = 0;
\r
9678 stream_.callbackInfo.userData = 0;
\r
9679 stream_.callbackInfo.isRunning = false;
\r
9680 stream_.callbackInfo.errorCallback = 0;
\r
9681 for ( int i=0; i<2; i++ ) {
\r
9682 stream_.device[i] = 11111;
\r
9683 stream_.doConvertBuffer[i] = false;
\r
9684 stream_.deviceInterleaved[i] = true;
\r
9685 stream_.doByteSwap[i] = false;
\r
9686 stream_.nUserChannels[i] = 0;
\r
9687 stream_.nDeviceChannels[i] = 0;
\r
9688 stream_.channelOffset[i] = 0;
\r
9689 stream_.deviceFormat[i] = 0;
\r
9690 stream_.latency[i] = 0;
\r
9691 stream_.userBuffer[i] = 0;
\r
9692 stream_.convertInfo[i].channels = 0;
\r
9693 stream_.convertInfo[i].inJump = 0;
\r
9694 stream_.convertInfo[i].outJump = 0;
\r
9695 stream_.convertInfo[i].inFormat = 0;
\r
9696 stream_.convertInfo[i].outFormat = 0;
\r
9697 stream_.convertInfo[i].inOffset.clear();
\r
9698 stream_.convertInfo[i].outOffset.clear();
\r
9702 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9704 if ( format == RTAUDIO_SINT16 )
\r
9706 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9708 else if ( format == RTAUDIO_FLOAT64 )
\r
9710 else if ( format == RTAUDIO_SINT24 )
\r
9712 else if ( format == RTAUDIO_SINT8 )
\r
9715 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9716 error( RtAudioError::WARNING );
\r
9721 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9723 if ( mode == INPUT ) { // convert device to user buffer
\r
9724 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9725 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9726 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9727 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9729 else { // convert user to device buffer
\r
9730 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9731 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9732 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9733 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9736 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9737 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9739 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9741 // Set up the interleave/deinterleave offsets.
\r
9742 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9743 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9744 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9745 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9746 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9747 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9748 stream_.convertInfo[mode].inJump = 1;
\r
9752 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9753 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9754 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9755 stream_.convertInfo[mode].outJump = 1;
\r
9759 else { // no (de)interleaving
\r
9760 if ( stream_.userInterleaved ) {
\r
9761 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9762 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9763 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9767 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9768 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9769 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9770 stream_.convertInfo[mode].inJump = 1;
\r
9771 stream_.convertInfo[mode].outJump = 1;
\r
9776 // Add channel offset.
\r
9777 if ( firstChannel > 0 ) {
\r
9778 if ( stream_.deviceInterleaved[mode] ) {
\r
9779 if ( mode == OUTPUT ) {
\r
9780 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9781 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9784 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9785 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9789 if ( mode == OUTPUT ) {
\r
9790 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9791 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9794 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9795 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9801 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9803 // This function does format conversion, input/output channel compensation, and
\r
9804 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9805 // the lower three bytes of a 32-bit integer.
\r
9807 // Clear our device buffer when in/out duplex device channels are different
\r
9808 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9809 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9810 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9813 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9815 Float64 *out = (Float64 *)outBuffer;
\r
9817 if (info.inFormat == RTAUDIO_SINT8) {
\r
9818 signed char *in = (signed char *)inBuffer;
\r
9819 scale = 1.0 / 127.5;
\r
9820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9821 for (j=0; j<info.channels; j++) {
\r
9822 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9823 out[info.outOffset[j]] += 0.5;
\r
9824 out[info.outOffset[j]] *= scale;
\r
9826 in += info.inJump;
\r
9827 out += info.outJump;
\r
9830 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9831 Int16 *in = (Int16 *)inBuffer;
\r
9832 scale = 1.0 / 32767.5;
\r
9833 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9834 for (j=0; j<info.channels; j++) {
\r
9835 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9836 out[info.outOffset[j]] += 0.5;
\r
9837 out[info.outOffset[j]] *= scale;
\r
9839 in += info.inJump;
\r
9840 out += info.outJump;
\r
9843 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9844 Int24 *in = (Int24 *)inBuffer;
\r
9845 scale = 1.0 / 8388607.5;
\r
9846 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9847 for (j=0; j<info.channels; j++) {
\r
9848 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9849 out[info.outOffset[j]] += 0.5;
\r
9850 out[info.outOffset[j]] *= scale;
\r
9852 in += info.inJump;
\r
9853 out += info.outJump;
\r
9856 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9857 Int32 *in = (Int32 *)inBuffer;
\r
9858 scale = 1.0 / 2147483647.5;
\r
9859 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9860 for (j=0; j<info.channels; j++) {
\r
9861 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9862 out[info.outOffset[j]] += 0.5;
\r
9863 out[info.outOffset[j]] *= scale;
\r
9865 in += info.inJump;
\r
9866 out += info.outJump;
\r
9869 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9870 Float32 *in = (Float32 *)inBuffer;
\r
9871 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9872 for (j=0; j<info.channels; j++) {
\r
9873 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9875 in += info.inJump;
\r
9876 out += info.outJump;
\r
9879 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9880 // Channel compensation and/or (de)interleaving only.
\r
9881 Float64 *in = (Float64 *)inBuffer;
\r
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9883 for (j=0; j<info.channels; j++) {
\r
9884 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9886 in += info.inJump;
\r
9887 out += info.outJump;
\r
9891 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9893 Float32 *out = (Float32 *)outBuffer;
\r
9895 if (info.inFormat == RTAUDIO_SINT8) {
\r
9896 signed char *in = (signed char *)inBuffer;
\r
9897 scale = (Float32) ( 1.0 / 127.5 );
\r
9898 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9899 for (j=0; j<info.channels; j++) {
\r
9900 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9901 out[info.outOffset[j]] += 0.5;
\r
9902 out[info.outOffset[j]] *= scale;
\r
9904 in += info.inJump;
\r
9905 out += info.outJump;
\r
9908 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9909 Int16 *in = (Int16 *)inBuffer;
\r
9910 scale = (Float32) ( 1.0 / 32767.5 );
\r
9911 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9912 for (j=0; j<info.channels; j++) {
\r
9913 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9914 out[info.outOffset[j]] += 0.5;
\r
9915 out[info.outOffset[j]] *= scale;
\r
9917 in += info.inJump;
\r
9918 out += info.outJump;
\r
9921 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9922 Int24 *in = (Int24 *)inBuffer;
\r
9923 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9924 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9925 for (j=0; j<info.channels; j++) {
\r
9926 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9927 out[info.outOffset[j]] += 0.5;
\r
9928 out[info.outOffset[j]] *= scale;
\r
9930 in += info.inJump;
\r
9931 out += info.outJump;
\r
9934 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9935 Int32 *in = (Int32 *)inBuffer;
\r
9936 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9937 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9938 for (j=0; j<info.channels; j++) {
\r
9939 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9940 out[info.outOffset[j]] += 0.5;
\r
9941 out[info.outOffset[j]] *= scale;
\r
9943 in += info.inJump;
\r
9944 out += info.outJump;
\r
9947 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9948 // Channel compensation and/or (de)interleaving only.
\r
9949 Float32 *in = (Float32 *)inBuffer;
\r
9950 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9951 for (j=0; j<info.channels; j++) {
\r
9952 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9954 in += info.inJump;
\r
9955 out += info.outJump;
\r
9958 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9959 Float64 *in = (Float64 *)inBuffer;
\r
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9961 for (j=0; j<info.channels; j++) {
\r
9962 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9964 in += info.inJump;
\r
9965 out += info.outJump;
\r
9969 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9970 Int32 *out = (Int32 *)outBuffer;
\r
9971 if (info.inFormat == RTAUDIO_SINT8) {
\r
9972 signed char *in = (signed char *)inBuffer;
\r
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9974 for (j=0; j<info.channels; j++) {
\r
9975 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9976 out[info.outOffset[j]] <<= 24;
\r
9978 in += info.inJump;
\r
9979 out += info.outJump;
\r
9982 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9983 Int16 *in = (Int16 *)inBuffer;
\r
9984 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9985 for (j=0; j<info.channels; j++) {
\r
9986 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9987 out[info.outOffset[j]] <<= 16;
\r
9989 in += info.inJump;
\r
9990 out += info.outJump;
\r
9993 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9994 Int24 *in = (Int24 *)inBuffer;
\r
9995 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9996 for (j=0; j<info.channels; j++) {
\r
9997 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9998 out[info.outOffset[j]] <<= 8;
\r
10000 in += info.inJump;
\r
10001 out += info.outJump;
\r
10004 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10005 // Channel compensation and/or (de)interleaving only.
\r
10006 Int32 *in = (Int32 *)inBuffer;
\r
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10008 for (j=0; j<info.channels; j++) {
\r
10009 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10011 in += info.inJump;
\r
10012 out += info.outJump;
\r
10015 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10016 Float32 *in = (Float32 *)inBuffer;
\r
10017 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10018 for (j=0; j<info.channels; j++) {
\r
10019 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10021 in += info.inJump;
\r
10022 out += info.outJump;
\r
10025 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10026 Float64 *in = (Float64 *)inBuffer;
\r
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10028 for (j=0; j<info.channels; j++) {
\r
10029 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10031 in += info.inJump;
\r
10032 out += info.outJump;
\r
10036 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10037 Int24 *out = (Int24 *)outBuffer;
\r
10038 if (info.inFormat == RTAUDIO_SINT8) {
\r
10039 signed char *in = (signed char *)inBuffer;
\r
10040 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10041 for (j=0; j<info.channels; j++) {
\r
10042 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10043 //out[info.outOffset[j]] <<= 16;
\r
10045 in += info.inJump;
\r
10046 out += info.outJump;
\r
10049 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10050 Int16 *in = (Int16 *)inBuffer;
\r
10051 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10052 for (j=0; j<info.channels; j++) {
\r
10053 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10054 //out[info.outOffset[j]] <<= 8;
\r
10056 in += info.inJump;
\r
10057 out += info.outJump;
\r
10060 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10061 // Channel compensation and/or (de)interleaving only.
\r
10062 Int24 *in = (Int24 *)inBuffer;
\r
10063 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10064 for (j=0; j<info.channels; j++) {
\r
10065 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10067 in += info.inJump;
\r
10068 out += info.outJump;
\r
10071 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10072 Int32 *in = (Int32 *)inBuffer;
\r
10073 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10074 for (j=0; j<info.channels; j++) {
\r
10075 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10076 //out[info.outOffset[j]] >>= 8;
\r
10078 in += info.inJump;
\r
10079 out += info.outJump;
\r
10082 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10083 Float32 *in = (Float32 *)inBuffer;
\r
10084 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10085 for (j=0; j<info.channels; j++) {
\r
10086 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10088 in += info.inJump;
\r
10089 out += info.outJump;
\r
10092 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10093 Float64 *in = (Float64 *)inBuffer;
\r
10094 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10095 for (j=0; j<info.channels; j++) {
\r
10096 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10098 in += info.inJump;
\r
10099 out += info.outJump;
\r
10103 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10104 Int16 *out = (Int16 *)outBuffer;
\r
10105 if (info.inFormat == RTAUDIO_SINT8) {
\r
10106 signed char *in = (signed char *)inBuffer;
\r
10107 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10108 for (j=0; j<info.channels; j++) {
\r
10109 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10110 out[info.outOffset[j]] <<= 8;
\r
10112 in += info.inJump;
\r
10113 out += info.outJump;
\r
10116 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10117 // Channel compensation and/or (de)interleaving only.
\r
10118 Int16 *in = (Int16 *)inBuffer;
\r
10119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10120 for (j=0; j<info.channels; j++) {
\r
10121 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10123 in += info.inJump;
\r
10124 out += info.outJump;
\r
10127 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10128 Int24 *in = (Int24 *)inBuffer;
\r
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10130 for (j=0; j<info.channels; j++) {
\r
10131 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10133 in += info.inJump;
\r
10134 out += info.outJump;
\r
10137 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10138 Int32 *in = (Int32 *)inBuffer;
\r
10139 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10140 for (j=0; j<info.channels; j++) {
\r
10141 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10143 in += info.inJump;
\r
10144 out += info.outJump;
\r
10147 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10148 Float32 *in = (Float32 *)inBuffer;
\r
10149 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10150 for (j=0; j<info.channels; j++) {
\r
10151 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10153 in += info.inJump;
\r
10154 out += info.outJump;
\r
10157 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10158 Float64 *in = (Float64 *)inBuffer;
\r
10159 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10160 for (j=0; j<info.channels; j++) {
\r
10161 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10163 in += info.inJump;
\r
10164 out += info.outJump;
\r
10168 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10169 signed char *out = (signed char *)outBuffer;
\r
10170 if (info.inFormat == RTAUDIO_SINT8) {
\r
10171 // Channel compensation and/or (de)interleaving only.
\r
10172 signed char *in = (signed char *)inBuffer;
\r
10173 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10174 for (j=0; j<info.channels; j++) {
\r
10175 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10177 in += info.inJump;
\r
10178 out += info.outJump;
\r
10181 if (info.inFormat == RTAUDIO_SINT16) {
\r
10182 Int16 *in = (Int16 *)inBuffer;
\r
10183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10184 for (j=0; j<info.channels; j++) {
\r
10185 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10187 in += info.inJump;
\r
10188 out += info.outJump;
\r
10191 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10192 Int24 *in = (Int24 *)inBuffer;
\r
10193 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10194 for (j=0; j<info.channels; j++) {
\r
10195 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10197 in += info.inJump;
\r
10198 out += info.outJump;
\r
10201 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10202 Int32 *in = (Int32 *)inBuffer;
\r
10203 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10204 for (j=0; j<info.channels; j++) {
\r
10205 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10207 in += info.inJump;
\r
10208 out += info.outJump;
\r
10211 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10212 Float32 *in = (Float32 *)inBuffer;
\r
10213 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10214 for (j=0; j<info.channels; j++) {
\r
10215 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10217 in += info.inJump;
\r
10218 out += info.outJump;
\r
10221 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10222 Float64 *in = (Float64 *)inBuffer;
\r
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10224 for (j=0; j<info.channels; j++) {
\r
10225 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10227 in += info.inJump;
\r
10228 out += info.outJump;
\r
10234 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10235 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10236 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10238 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10244 if ( format == RTAUDIO_SINT16 ) {
\r
10245 for ( unsigned int i=0; i<samples; i++ ) {
\r
10246 // Swap 1st and 2nd bytes.
\r
10248 *(ptr) = *(ptr+1);
\r
10251 // Increment 2 bytes.
\r
10255 else if ( format == RTAUDIO_SINT32 ||
\r
10256 format == RTAUDIO_FLOAT32 ) {
\r
10257 for ( unsigned int i=0; i<samples; i++ ) {
\r
10258 // Swap 1st and 4th bytes.
\r
10260 *(ptr) = *(ptr+3);
\r
10263 // Swap 2nd and 3rd bytes.
\r
10266 *(ptr) = *(ptr+1);
\r
10269 // Increment 3 more bytes.
\r
10273 else if ( format == RTAUDIO_SINT24 ) {
\r
10274 for ( unsigned int i=0; i<samples; i++ ) {
\r
10275 // Swap 1st and 3rd bytes.
\r
10277 *(ptr) = *(ptr+2);
\r
10280 // Increment 2 more bytes.
\r
10284 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10285 for ( unsigned int i=0; i<samples; i++ ) {
\r
10286 // Swap 1st and 8th bytes
\r
10288 *(ptr) = *(ptr+7);
\r
10291 // Swap 2nd and 7th bytes
\r
10294 *(ptr) = *(ptr+5);
\r
10297 // Swap 3rd and 6th bytes
\r
10300 *(ptr) = *(ptr+3);
\r
10303 // Swap 4th and 5th bytes
\r
10306 *(ptr) = *(ptr+1);
\r
10309 // Increment 5 more bytes.
\r
10315 // Indentation settings for Vim and Emacs
\r
10317 // Local Variables:
\r
10318 // c-basic-offset: 2
\r
10319 // indent-tabs-mode: nil
\r
10322 // vim: et sts=2 sw=2
\r