1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1687 // Don't bother draining input
\r
1688 if ( handle->drainCounter ) {
\r
1689 handle->drainCounter++;
\r
1693 AudioDeviceID inputDevice;
\r
1694 inputDevice = handle->id[1];
\r
1695 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1697 if ( handle->nStreams[1] == 1 ) {
\r
1698 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1699 convertBuffer( stream_.userBuffer[1],
\r
1700 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1701 stream_.convertInfo[1] );
\r
1703 else { // copy to user buffer
\r
1704 memcpy( stream_.userBuffer[1],
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1706 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1709 else { // read from multiple streams
\r
1710 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1711 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1713 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1714 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1715 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1716 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1717 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1720 else { // read from multiple multi-channel streams
\r
1721 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1722 Float32 *out, *in;
\r
1724 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1725 UInt32 outChannels = stream_.nUserChannels[1];
\r
1726 if ( stream_.doConvertBuffer[1] ) {
\r
1727 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1728 outChannels = stream_.nDeviceChannels[1];
\r
1731 if ( outInterleaved ) outOffset = 1;
\r
1732 else outOffset = stream_.bufferSize;
\r
1734 channelsLeft = outChannels;
\r
1735 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1737 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1738 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1741 // Account for possible channel offset in first stream
\r
1742 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1743 streamChannels -= stream_.channelOffset[1];
\r
1744 inJump = stream_.channelOffset[1];
\r
1748 // Account for possible unread channels at end of the last stream
\r
1749 if ( streamChannels > channelsLeft ) {
\r
1750 inJump = streamChannels - channelsLeft;
\r
1751 streamChannels = channelsLeft;
\r
1754 // Determine output buffer offsets and skips
\r
1755 if ( outInterleaved ) {
\r
1756 outJump = outChannels;
\r
1757 out += outChannels - channelsLeft;
\r
1761 out += (outChannels - channelsLeft) * outOffset;
\r
1764 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1765 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1766 out[j*outOffset] = *in++;
\r
1771 channelsLeft -= streamChannels;
\r
1775 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1776 convertBuffer( stream_.userBuffer[1],
\r
1777 stream_.deviceBuffer,
\r
1778 stream_.convertInfo[1] );
\r
1784 //MUTEX_UNLOCK( &stream_.mutex );
\r
1786 RtApi::tickStreamTime();
\r
1790 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1794 case kAudioHardwareNotRunningError:
\r
1795 return "kAudioHardwareNotRunningError";
\r
1797 case kAudioHardwareUnspecifiedError:
\r
1798 return "kAudioHardwareUnspecifiedError";
\r
1800 case kAudioHardwareUnknownPropertyError:
\r
1801 return "kAudioHardwareUnknownPropertyError";
\r
1803 case kAudioHardwareBadPropertySizeError:
\r
1804 return "kAudioHardwareBadPropertySizeError";
\r
1806 case kAudioHardwareIllegalOperationError:
\r
1807 return "kAudioHardwareIllegalOperationError";
\r
1809 case kAudioHardwareBadObjectError:
\r
1810 return "kAudioHardwareBadObjectError";
\r
1812 case kAudioHardwareBadDeviceError:
\r
1813 return "kAudioHardwareBadDeviceError";
\r
1815 case kAudioHardwareBadStreamError:
\r
1816 return "kAudioHardwareBadStreamError";
\r
1818 case kAudioHardwareUnsupportedOperationError:
\r
1819 return "kAudioHardwareUnsupportedOperationError";
\r
1821 case kAudioDeviceUnsupportedFormatError:
\r
1822 return "kAudioDeviceUnsupportedFormatError";
\r
1824 case kAudioDevicePermissionsError:
\r
1825 return "kAudioDevicePermissionsError";
\r
1828 return "CoreAudio unknown error";
\r
1832 //******************** End of __MACOSX_CORE__ *********************//
\r
1835 #if defined(__UNIX_JACK__)
\r
1837 // JACK is a low-latency audio server, originally written for the
\r
1838 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1839 // connect a number of different applications to an audio device, as
\r
1840 // well as allowing them to share audio between themselves.
\r
1842 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1843 // have ports connected to the server. The JACK server is typically
\r
1844 // started in a terminal as follows:
\r
1846 // .jackd -d alsa -d hw:0
\r
1848 // or through an interface program such as qjackctl. Many of the
\r
1849 // parameters normally set for a stream are fixed by the JACK server
\r
1850 // and can be specified when the JACK server is started. In
\r
1853 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1855 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1856 // frames, and number of buffers = 4. Once the server is running, it
\r
1857 // is not possible to override these values. If the values are not
\r
1858 // specified in the command-line, the JACK server uses default values.
\r
1860 // The JACK server does not have to be running when an instance of
\r
1861 // RtApiJack is created, though the function getDeviceCount() will
\r
1862 // report 0 devices found until JACK has been started. When no
\r
1863 // devices are available (i.e., the JACK server is not running), a
\r
1864 // stream cannot be opened.
\r
1866 #include <jack/jack.h>
\r
1867 #include <unistd.h>
\r
1870 // A structure to hold various information related to the Jack API
\r
1871 // implementation.
\r
1872 struct JackHandle {
\r
1873 jack_client_t *client;
\r
1874 jack_port_t **ports[2];
\r
1875 std::string deviceName[2];
\r
1877 pthread_cond_t condition;
\r
1878 int drainCounter; // Tracks callback counts when draining
\r
1879 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1882 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1885 static void jackSilentError( const char * ) {};
\r
1887 RtApiJack :: RtApiJack()
\r
1889 // Nothing to do here.
\r
1890 #if !defined(__RTAUDIO_DEBUG__)
\r
1891 // Turn off Jack's internal error reporting.
\r
1892 jack_set_error_function( &jackSilentError );
\r
1896 RtApiJack :: ~RtApiJack()
\r
1898 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1901 unsigned int RtApiJack :: getDeviceCount( void )
\r
1903 // See if we can become a jack client.
\r
1904 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1905 jack_status_t *status = NULL;
\r
1906 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1907 if ( client == 0 ) return 0;
\r
1909 const char **ports;
\r
1910 std::string port, previousPort;
\r
1911 unsigned int nChannels = 0, nDevices = 0;
\r
1912 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1914 // Parse the port names up to the first colon (:).
\r
1915 size_t iColon = 0;
\r
1917 port = (char *) ports[ nChannels ];
\r
1918 iColon = port.find(":");
\r
1919 if ( iColon != std::string::npos ) {
\r
1920 port = port.substr( 0, iColon + 1 );
\r
1921 if ( port != previousPort ) {
\r
1923 previousPort = port;
\r
1926 } while ( ports[++nChannels] );
\r
1930 jack_client_close( client );
\r
1934 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1936 RtAudio::DeviceInfo info;
\r
1937 info.probed = false;
\r
1939 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1940 jack_status_t *status = NULL;
\r
1941 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1942 if ( client == 0 ) {
\r
1943 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1944 error( RtAudioError::WARNING );
\r
1948 const char **ports;
\r
1949 std::string port, previousPort;
\r
1950 unsigned int nPorts = 0, nDevices = 0;
\r
1951 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1953 // Parse the port names up to the first colon (:).
\r
1954 size_t iColon = 0;
\r
1956 port = (char *) ports[ nPorts ];
\r
1957 iColon = port.find(":");
\r
1958 if ( iColon != std::string::npos ) {
\r
1959 port = port.substr( 0, iColon );
\r
1960 if ( port != previousPort ) {
\r
1961 if ( nDevices == device ) info.name = port;
\r
1963 previousPort = port;
\r
1966 } while ( ports[++nPorts] );
\r
1970 if ( device >= nDevices ) {
\r
1971 jack_client_close( client );
\r
1972 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1973 error( RtAudioError::INVALID_USE );
\r
1977 // Get the current jack server sample rate.
\r
1978 info.sampleRates.clear();
\r
1979 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1981 // Count the available ports containing the client name as device
\r
1982 // channels. Jack "input ports" equal RtAudio output channels.
\r
1983 unsigned int nChannels = 0;
\r
1984 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1986 while ( ports[ nChannels ] ) nChannels++;
\r
1988 info.outputChannels = nChannels;
\r
1991 // Jack "output ports" equal RtAudio input channels.
\r
1993 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1995 while ( ports[ nChannels ] ) nChannels++;
\r
1997 info.inputChannels = nChannels;
\r
2000 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2001 jack_client_close(client);
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 // If device opens for both playback and capture, we determine the channels.
\r
2008 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2009 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2011 // Jack always uses 32-bit floats.
\r
2012 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2014 // Jack doesn't provide default devices so we'll use the first available one.
\r
2015 if ( device == 0 && info.outputChannels > 0 )
\r
2016 info.isDefaultOutput = true;
\r
2017 if ( device == 0 && info.inputChannels > 0 )
\r
2018 info.isDefaultInput = true;
\r
2020 jack_client_close(client);
\r
2021 info.probed = true;
\r
2025 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2027 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2029 RtApiJack *object = (RtApiJack *) info->object;
\r
2030 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2035 // This function will be called by a spawned thread when the Jack
\r
2036 // server signals that it is shutting down. It is necessary to handle
\r
2037 // it this way because the jackShutdown() function must return before
\r
2038 // the jack_deactivate() function (in closeStream()) will return.
\r
2039 static void *jackCloseStream( void *ptr )
\r
2041 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2042 RtApiJack *object = (RtApiJack *) info->object;
\r
2044 object->closeStream();
\r
2046 pthread_exit( NULL );
\r
2048 static void jackShutdown( void *infoPointer )
\r
2050 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2051 RtApiJack *object = (RtApiJack *) info->object;
\r
2053 // Check current stream state. If stopped, then we'll assume this
\r
2054 // was called as a result of a call to RtApiJack::stopStream (the
\r
2055 // deactivation of a client handle causes this function to be called).
\r
2056 // If not, we'll assume the Jack server is shutting down or some
\r
2057 // other problem occurred and we should close the stream.
\r
2058 if ( object->isStreamRunning() == false ) return;
\r
2060 ThreadHandle threadId;
\r
2061 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2062 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2065 static int jackXrun( void *infoPointer )
\r
2067 JackHandle *handle = (JackHandle *) infoPointer;
\r
2069 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2070 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2075 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2076 unsigned int firstChannel, unsigned int sampleRate,
\r
2077 RtAudioFormat format, unsigned int *bufferSize,
\r
2078 RtAudio::StreamOptions *options )
\r
2080 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2082 // Look for jack server and try to become a client (only do once per stream).
\r
2083 jack_client_t *client = 0;
\r
2084 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2085 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2086 jack_status_t *status = NULL;
\r
2087 if ( options && !options->streamName.empty() )
\r
2088 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2090 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2091 if ( client == 0 ) {
\r
2092 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2093 error( RtAudioError::WARNING );
\r
2098 // The handle must have been created on an earlier pass.
\r
2099 client = handle->client;
\r
2102 const char **ports;
\r
2103 std::string port, previousPort, deviceName;
\r
2104 unsigned int nPorts = 0, nDevices = 0;
\r
2105 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2107 // Parse the port names up to the first colon (:).
\r
2108 size_t iColon = 0;
\r
2110 port = (char *) ports[ nPorts ];
\r
2111 iColon = port.find(":");
\r
2112 if ( iColon != std::string::npos ) {
\r
2113 port = port.substr( 0, iColon );
\r
2114 if ( port != previousPort ) {
\r
2115 if ( nDevices == device ) deviceName = port;
\r
2117 previousPort = port;
\r
2120 } while ( ports[++nPorts] );
\r
2124 if ( device >= nDevices ) {
\r
2125 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2129 // Count the available ports containing the client name as device
\r
2130 // channels. Jack "input ports" equal RtAudio output channels.
\r
2131 unsigned int nChannels = 0;
\r
2132 unsigned long flag = JackPortIsInput;
\r
2133 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2134 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2136 while ( ports[ nChannels ] ) nChannels++;
\r
2140 // Compare the jack ports for specified client to the requested number of channels.
\r
2141 if ( nChannels < (channels + firstChannel) ) {
\r
2142 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2143 errorText_ = errorStream_.str();
\r
2147 // Check the jack server sample rate.
\r
2148 unsigned int jackRate = jack_get_sample_rate( client );
\r
2149 if ( sampleRate != jackRate ) {
\r
2150 jack_client_close( client );
\r
2151 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2152 errorText_ = errorStream_.str();
\r
2155 stream_.sampleRate = jackRate;
\r
2157 // Get the latency of the JACK port.
\r
2158 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2159 if ( ports[ firstChannel ] ) {
\r
2160 // Added by Ge Wang
\r
2161 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2162 // the range (usually the min and max are equal)
\r
2163 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2164 // get the latency range
\r
2165 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2166 // be optimistic, use the min!
\r
2167 stream_.latency[mode] = latrange.min;
\r
2168 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2172 // The jack server always uses 32-bit floating-point data.
\r
2173 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2174 stream_.userFormat = format;
\r
2176 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2177 else stream_.userInterleaved = true;
\r
2179 // Jack always uses non-interleaved buffers.
\r
2180 stream_.deviceInterleaved[mode] = false;
\r
2182 // Jack always provides host byte-ordered data.
\r
2183 stream_.doByteSwap[mode] = false;
\r
2185 // Get the buffer size. The buffer size and number of buffers
\r
2186 // (periods) is set when the jack server is started.
\r
2187 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2188 *bufferSize = stream_.bufferSize;
\r
2190 stream_.nDeviceChannels[mode] = channels;
\r
2191 stream_.nUserChannels[mode] = channels;
\r
2193 // Set flags for buffer conversion.
\r
2194 stream_.doConvertBuffer[mode] = false;
\r
2195 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2196 stream_.doConvertBuffer[mode] = true;
\r
2197 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2198 stream_.nUserChannels[mode] > 1 )
\r
2199 stream_.doConvertBuffer[mode] = true;
\r
2201 // Allocate our JackHandle structure for the stream.
\r
2202 if ( handle == 0 ) {
\r
2204 handle = new JackHandle;
\r
2206 catch ( std::bad_alloc& ) {
\r
2207 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2211 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2212 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2215 stream_.apiHandle = (void *) handle;
\r
2216 handle->client = client;
\r
2218 handle->deviceName[mode] = deviceName;
\r
2220 // Allocate necessary internal buffers.
\r
2221 unsigned long bufferBytes;
\r
2222 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2223 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2224 if ( stream_.userBuffer[mode] == NULL ) {
\r
2225 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2229 if ( stream_.doConvertBuffer[mode] ) {
\r
2231 bool makeBuffer = true;
\r
2232 if ( mode == OUTPUT )
\r
2233 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2234 else { // mode == INPUT
\r
2235 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2236 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2237 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2238 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2242 if ( makeBuffer ) {
\r
2243 bufferBytes *= *bufferSize;
\r
2244 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2245 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2246 if ( stream_.deviceBuffer == NULL ) {
\r
2247 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2253 // Allocate memory for the Jack ports (channels) identifiers.
\r
2254 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2255 if ( handle->ports[mode] == NULL ) {
\r
2256 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2260 stream_.device[mode] = device;
\r
2261 stream_.channelOffset[mode] = firstChannel;
\r
2262 stream_.state = STREAM_STOPPED;
\r
2263 stream_.callbackInfo.object = (void *) this;
\r
2265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2266 // We had already set up the stream for output.
\r
2267 stream_.mode = DUPLEX;
\r
2269 stream_.mode = mode;
\r
2270 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2271 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2272 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2275 // Register our ports.
\r
2277 if ( mode == OUTPUT ) {
\r
2278 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2279 snprintf( label, 64, "outport %d", i );
\r
2280 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2281 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2285 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2286 snprintf( label, 64, "inport %d", i );
\r
2287 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2288 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2292 // Setup the buffer conversion information structure. We don't use
\r
2293 // buffers to do channel offsets, so we override that parameter
\r
2295 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2301 pthread_cond_destroy( &handle->condition );
\r
2302 jack_client_close( handle->client );
\r
2304 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2305 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2308 stream_.apiHandle = 0;
\r
2311 for ( int i=0; i<2; i++ ) {
\r
2312 if ( stream_.userBuffer[i] ) {
\r
2313 free( stream_.userBuffer[i] );
\r
2314 stream_.userBuffer[i] = 0;
\r
2318 if ( stream_.deviceBuffer ) {
\r
2319 free( stream_.deviceBuffer );
\r
2320 stream_.deviceBuffer = 0;
\r
2326 void RtApiJack :: closeStream( void )
\r
2328 if ( stream_.state == STREAM_CLOSED ) {
\r
2329 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2330 error( RtAudioError::WARNING );
\r
2334 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2337 if ( stream_.state == STREAM_RUNNING )
\r
2338 jack_deactivate( handle->client );
\r
2340 jack_client_close( handle->client );
\r
2344 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2345 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2346 pthread_cond_destroy( &handle->condition );
\r
2348 stream_.apiHandle = 0;
\r
2351 for ( int i=0; i<2; i++ ) {
\r
2352 if ( stream_.userBuffer[i] ) {
\r
2353 free( stream_.userBuffer[i] );
\r
2354 stream_.userBuffer[i] = 0;
\r
2358 if ( stream_.deviceBuffer ) {
\r
2359 free( stream_.deviceBuffer );
\r
2360 stream_.deviceBuffer = 0;
\r
2363 stream_.mode = UNINITIALIZED;
\r
2364 stream_.state = STREAM_CLOSED;
\r
2367 void RtApiJack :: startStream( void )
\r
2370 if ( stream_.state == STREAM_RUNNING ) {
\r
2371 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2372 error( RtAudioError::WARNING );
\r
2376 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2377 int result = jack_activate( handle->client );
\r
2379 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2383 const char **ports;
\r
2385 // Get the list of available ports.
\r
2386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2388 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2389 if ( ports == NULL) {
\r
2390 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2394 // Now make the port connections. Since RtAudio wasn't designed to
\r
2395 // allow the user to select particular channels of a device, we'll
\r
2396 // just open the first "nChannels" ports with offset.
\r
2397 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2399 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2400 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2403 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2410 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2412 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2413 if ( ports == NULL) {
\r
2414 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2418 // Now make the port connections. See note above.
\r
2419 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2421 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2422 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2425 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2432 handle->drainCounter = 0;
\r
2433 handle->internalDrain = false;
\r
2434 stream_.state = STREAM_RUNNING;
\r
2437 if ( result == 0 ) return;
\r
2438 error( RtAudioError::SYSTEM_ERROR );
\r
2441 void RtApiJack :: stopStream( void )
\r
2444 if ( stream_.state == STREAM_STOPPED ) {
\r
2445 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2446 error( RtAudioError::WARNING );
\r
2450 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2453 if ( handle->drainCounter == 0 ) {
\r
2454 handle->drainCounter = 2;
\r
2455 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2459 jack_deactivate( handle->client );
\r
2460 stream_.state = STREAM_STOPPED;
\r
2463 void RtApiJack :: abortStream( void )
\r
2466 if ( stream_.state == STREAM_STOPPED ) {
\r
2467 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2468 error( RtAudioError::WARNING );
\r
2472 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2473 handle->drainCounter = 2;
\r
2478 // This function will be called by a spawned thread when the user
\r
2479 // callback function signals that the stream should be stopped or
\r
2480 // aborted. It is necessary to handle it this way because the
\r
2481 // callbackEvent() function must return before the jack_deactivate()
\r
2482 // function will return.
\r
2483 static void *jackStopStream( void *ptr )
\r
2485 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2486 RtApiJack *object = (RtApiJack *) info->object;
\r
2488 object->stopStream();
\r
2489 pthread_exit( NULL );
\r
2492 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2494 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2495 if ( stream_.state == STREAM_CLOSED ) {
\r
2496 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2497 error( RtAudioError::WARNING );
\r
2500 if ( stream_.bufferSize != nframes ) {
\r
2501 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2502 error( RtAudioError::WARNING );
\r
2506 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2507 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 // Check if we were draining the stream and signal is finished.
\r
2510 if ( handle->drainCounter > 3 ) {
\r
2511 ThreadHandle threadId;
\r
2513 stream_.state = STREAM_STOPPING;
\r
2514 if ( handle->internalDrain == true )
\r
2515 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2517 pthread_cond_signal( &handle->condition );
\r
2521 // Invoke user callback first, to get fresh output data.
\r
2522 if ( handle->drainCounter == 0 ) {
\r
2523 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2524 double streamTime = getStreamTime();
\r
2525 RtAudioStreamStatus status = 0;
\r
2526 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2527 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2528 handle->xrun[0] = false;
\r
2530 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2531 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2532 handle->xrun[1] = false;
\r
2534 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2535 stream_.bufferSize, streamTime, status, info->userData );
\r
2536 if ( cbReturnValue == 2 ) {
\r
2537 stream_.state = STREAM_STOPPING;
\r
2538 handle->drainCounter = 2;
\r
2540 pthread_create( &id, NULL, jackStopStream, info );
\r
2543 else if ( cbReturnValue == 1 ) {
\r
2544 handle->drainCounter = 1;
\r
2545 handle->internalDrain = true;
\r
2549 jack_default_audio_sample_t *jackbuffer;
\r
2550 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2551 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2553 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2555 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2556 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2557 memset( jackbuffer, 0, bufferBytes );
\r
2561 else if ( stream_.doConvertBuffer[0] ) {
\r
2563 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2565 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2566 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2567 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2570 else { // no buffer conversion
\r
2571 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2572 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2573 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2578 // Don't bother draining input
\r
2579 if ( handle->drainCounter ) {
\r
2580 handle->drainCounter++;
\r
2584 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2586 if ( stream_.doConvertBuffer[1] ) {
\r
2587 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2588 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2589 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2591 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2593 else { // no buffer conversion
\r
2594 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2595 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2596 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2602 RtApi::tickStreamTime();
\r
2605 //******************** End of __UNIX_JACK__ *********************//
\r
2608 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2610 // The ASIO API is designed around a callback scheme, so this
\r
2611 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2612 // Jack. The primary constraint with ASIO is that it only allows
\r
2613 // access to a single driver at a time. Thus, it is not possible to
\r
2614 // have more than one simultaneous RtAudio stream.
\r
2616 // This implementation also requires a number of external ASIO files
\r
2617 // and a few global variables. The ASIO callback scheme does not
\r
2618 // allow for the passing of user data, so we must create a global
\r
2619 // pointer to our callbackInfo structure.
\r
2621 // On unix systems, we make use of a pthread condition variable.
\r
2622 // Since there is no equivalent in Windows, I hacked something based
\r
2623 // on information found in
\r
2624 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2626 #include "asiosys.h"
\r
2628 #include "iasiothiscallresolver.h"
\r
2629 #include "asiodrivers.h"
\r
2632 static AsioDrivers drivers;
\r
2633 static ASIOCallbacks asioCallbacks;
\r
2634 static ASIODriverInfo driverInfo;
\r
2635 static CallbackInfo *asioCallbackInfo;
\r
2636 static bool asioXRun;
\r
2638 struct AsioHandle {
\r
2639 int drainCounter; // Tracks callback counts when draining
\r
2640 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2641 ASIOBufferInfo *bufferInfos;
\r
2645 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2648 // Function declarations (definitions at end of section)
\r
2649 static const char* getAsioErrorString( ASIOError result );
\r
2650 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2651 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2653 RtApiAsio :: RtApiAsio()
\r
2655 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2656 // CoInitialize beforehand, but it must be for appartment threading
\r
2657 // (in which case, CoInitilialize will return S_FALSE here).
\r
2658 coInitialized_ = false;
\r
2659 HRESULT hr = CoInitialize( NULL );
\r
2660 if ( FAILED(hr) ) {
\r
2661 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2662 error( RtAudioError::WARNING );
\r
2664 coInitialized_ = true;
\r
2666 drivers.removeCurrentDriver();
\r
2667 driverInfo.asioVersion = 2;
\r
2669 // See note in DirectSound implementation about GetDesktopWindow().
\r
2670 driverInfo.sysRef = GetForegroundWindow();
\r
2673 RtApiAsio :: ~RtApiAsio()
\r
2675 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2676 if ( coInitialized_ ) CoUninitialize();
\r
2679 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2681 return (unsigned int) drivers.asioGetNumDev();
\r
2684 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2686 RtAudio::DeviceInfo info;
\r
2687 info.probed = false;
\r
2690 unsigned int nDevices = getDeviceCount();
\r
2691 if ( nDevices == 0 ) {
\r
2692 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2693 error( RtAudioError::INVALID_USE );
\r
2697 if ( device >= nDevices ) {
\r
2698 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2699 error( RtAudioError::INVALID_USE );
\r
2703 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2704 if ( stream_.state != STREAM_CLOSED ) {
\r
2705 if ( device >= devices_.size() ) {
\r
2706 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2707 error( RtAudioError::WARNING );
\r
2710 return devices_[ device ];
\r
2713 char driverName[32];
\r
2714 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2715 if ( result != ASE_OK ) {
\r
2716 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2717 errorText_ = errorStream_.str();
\r
2718 error( RtAudioError::WARNING );
\r
2722 info.name = driverName;
\r
2724 if ( !drivers.loadDriver( driverName ) ) {
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtAudioError::WARNING );
\r
2731 result = ASIOInit( &driverInfo );
\r
2732 if ( result != ASE_OK ) {
\r
2733 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2734 errorText_ = errorStream_.str();
\r
2735 error( RtAudioError::WARNING );
\r
2739 // Determine the device channel information.
\r
2740 long inputChannels, outputChannels;
\r
2741 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2742 if ( result != ASE_OK ) {
\r
2743 drivers.removeCurrentDriver();
\r
2744 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2745 errorText_ = errorStream_.str();
\r
2746 error( RtAudioError::WARNING );
\r
2750 info.outputChannels = outputChannels;
\r
2751 info.inputChannels = inputChannels;
\r
2752 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2753 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2755 // Determine the supported sample rates.
\r
2756 info.sampleRates.clear();
\r
2757 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2758 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2759 if ( result == ASE_OK )
\r
2760 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2763 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2764 ASIOChannelInfo channelInfo;
\r
2765 channelInfo.channel = 0;
\r
2766 channelInfo.isInput = true;
\r
2767 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2768 result = ASIOGetChannelInfo( &channelInfo );
\r
2769 if ( result != ASE_OK ) {
\r
2770 drivers.removeCurrentDriver();
\r
2771 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2772 errorText_ = errorStream_.str();
\r
2773 error( RtAudioError::WARNING );
\r
2777 info.nativeFormats = 0;
\r
2778 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT16;
\r
2780 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_SINT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2784 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2785 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2786 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2787 info.nativeFormats |= RTAUDIO_SINT24;
\r
2789 if ( info.outputChannels > 0 )
\r
2790 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2791 if ( info.inputChannels > 0 )
\r
2792 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2794 info.probed = true;
\r
2795 drivers.removeCurrentDriver();
\r
2799 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2801 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2802 object->callbackEvent( index );
\r
2805 void RtApiAsio :: saveDeviceInfo( void )
\r
2809 unsigned int nDevices = getDeviceCount();
\r
2810 devices_.resize( nDevices );
\r
2811 for ( unsigned int i=0; i<nDevices; i++ )
\r
2812 devices_[i] = getDeviceInfo( i );
\r
2815 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2816 unsigned int firstChannel, unsigned int sampleRate,
\r
2817 RtAudioFormat format, unsigned int *bufferSize,
\r
2818 RtAudio::StreamOptions *options )
\r
2820 // For ASIO, a duplex stream MUST use the same driver.
\r
2821 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2822 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2826 char driverName[32];
\r
2827 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2828 if ( result != ASE_OK ) {
\r
2829 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2830 errorText_ = errorStream_.str();
\r
2834 // Only load the driver once for duplex stream.
\r
2835 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2836 // The getDeviceInfo() function will not work when a stream is open
\r
2837 // because ASIO does not allow multiple devices to run at the same
\r
2838 // time. Thus, we'll probe the system before opening a stream and
\r
2839 // save the results for use by getDeviceInfo().
\r
2840 this->saveDeviceInfo();
\r
2842 if ( !drivers.loadDriver( driverName ) ) {
\r
2843 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2844 errorText_ = errorStream_.str();
\r
2848 result = ASIOInit( &driverInfo );
\r
2849 if ( result != ASE_OK ) {
\r
2850 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2851 errorText_ = errorStream_.str();
\r
2856 // Check the device channel count.
\r
2857 long inputChannels, outputChannels;
\r
2858 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2859 if ( result != ASE_OK ) {
\r
2860 drivers.removeCurrentDriver();
\r
2861 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2862 errorText_ = errorStream_.str();
\r
2866 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2867 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2870 errorText_ = errorStream_.str();
\r
2873 stream_.nDeviceChannels[mode] = channels;
\r
2874 stream_.nUserChannels[mode] = channels;
\r
2875 stream_.channelOffset[mode] = firstChannel;
\r
2877 // Verify the sample rate is supported.
\r
2878 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2879 if ( result != ASE_OK ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Get the current sample rate
\r
2887 ASIOSampleRate currentRate;
\r
2888 result = ASIOGetSampleRate( ¤tRate );
\r
2889 if ( result != ASE_OK ) {
\r
2890 drivers.removeCurrentDriver();
\r
2891 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2892 errorText_ = errorStream_.str();
\r
2896 // Set the sample rate only if necessary
\r
2897 if ( currentRate != sampleRate ) {
\r
2898 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2899 if ( result != ASE_OK ) {
\r
2900 drivers.removeCurrentDriver();
\r
2901 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2902 errorText_ = errorStream_.str();
\r
2907 // Determine the driver data type.
\r
2908 ASIOChannelInfo channelInfo;
\r
2909 channelInfo.channel = 0;
\r
2910 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2911 else channelInfo.isInput = true;
\r
2912 result = ASIOGetChannelInfo( &channelInfo );
\r
2913 if ( result != ASE_OK ) {
\r
2914 drivers.removeCurrentDriver();
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2916 errorText_ = errorStream_.str();
\r
2920 // Assuming WINDOWS host is always little-endian.
\r
2921 stream_.doByteSwap[mode] = false;
\r
2922 stream_.userFormat = format;
\r
2923 stream_.deviceFormat[mode] = 0;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2925 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2926 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2928 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2930 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2932 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2933 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2934 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2936 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2937 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2938 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2940 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2941 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2942 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2945 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2946 drivers.removeCurrentDriver();
\r
2947 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2948 errorText_ = errorStream_.str();
\r
2952 // Set the buffer size. For a duplex stream, this will end up
\r
2953 // setting the buffer size based on the input constraints, which
\r
2955 long minSize, maxSize, preferSize, granularity;
\r
2956 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2957 if ( result != ASE_OK ) {
\r
2958 drivers.removeCurrentDriver();
\r
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2960 errorText_ = errorStream_.str();
\r
2964 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2965 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2966 else if ( granularity == -1 ) {
\r
2967 // Make sure bufferSize is a power of two.
\r
2968 int log2_of_min_size = 0;
\r
2969 int log2_of_max_size = 0;
\r
2971 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2972 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2973 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2976 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2977 int min_delta_num = log2_of_min_size;
\r
2979 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2980 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2981 if (current_delta < min_delta) {
\r
2982 min_delta = current_delta;
\r
2983 min_delta_num = i;
\r
2987 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2988 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2989 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2991 else if ( granularity != 0 ) {
\r
2992 // Set to an even multiple of granularity, rounding up.
\r
2993 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2996 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2997 drivers.removeCurrentDriver();
\r
2998 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3002 stream_.bufferSize = *bufferSize;
\r
3003 stream_.nBuffers = 2;
\r
3005 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3006 else stream_.userInterleaved = true;
\r
3008 // ASIO always uses non-interleaved buffers.
\r
3009 stream_.deviceInterleaved[mode] = false;
\r
3011 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3012 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3013 if ( handle == 0 ) {
\r
3015 handle = new AsioHandle;
\r
3017 catch ( std::bad_alloc& ) {
\r
3018 //if ( handle == NULL ) {
\r
3019 drivers.removeCurrentDriver();
\r
3020 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3023 handle->bufferInfos = 0;
\r
3025 // Create a manual-reset event.
\r
3026 handle->condition = CreateEvent( NULL, // no security
\r
3027 TRUE, // manual-reset
\r
3028 FALSE, // non-signaled initially
\r
3029 NULL ); // unnamed
\r
3030 stream_.apiHandle = (void *) handle;
\r
3033 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3034 // and output separately, we'll have to dispose of previously
\r
3035 // created output buffers for a duplex stream.
\r
3036 long inputLatency, outputLatency;
\r
3037 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3038 ASIODisposeBuffers();
\r
3039 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3042 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3043 bool buffersAllocated = false;
\r
3044 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3045 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3046 if ( handle->bufferInfos == NULL ) {
\r
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3048 errorText_ = errorStream_.str();
\r
3052 ASIOBufferInfo *infos;
\r
3053 infos = handle->bufferInfos;
\r
3054 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3055 infos->isInput = ASIOFalse;
\r
3056 infos->channelNum = i + stream_.channelOffset[0];
\r
3057 infos->buffers[0] = infos->buffers[1] = 0;
\r
3059 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3060 infos->isInput = ASIOTrue;
\r
3061 infos->channelNum = i + stream_.channelOffset[1];
\r
3062 infos->buffers[0] = infos->buffers[1] = 0;
\r
3065 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3066 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3067 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3068 asioCallbacks.asioMessage = &asioMessages;
\r
3069 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3070 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3071 if ( result != ASE_OK ) {
\r
3072 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3073 errorText_ = errorStream_.str();
\r
3076 buffersAllocated = true;
\r
3078 // Set flags for buffer conversion.
\r
3079 stream_.doConvertBuffer[mode] = false;
\r
3080 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3081 stream_.doConvertBuffer[mode] = true;
\r
3082 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3083 stream_.nUserChannels[mode] > 1 )
\r
3084 stream_.doConvertBuffer[mode] = true;
\r
3086 // Allocate necessary internal buffers
\r
3087 unsigned long bufferBytes;
\r
3088 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3089 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3090 if ( stream_.userBuffer[mode] == NULL ) {
\r
3091 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3095 if ( stream_.doConvertBuffer[mode] ) {
\r
3097 bool makeBuffer = true;
\r
3098 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3099 if ( mode == INPUT ) {
\r
3100 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3101 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3102 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3106 if ( makeBuffer ) {
\r
3107 bufferBytes *= *bufferSize;
\r
3108 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3109 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3110 if ( stream_.deviceBuffer == NULL ) {
\r
3111 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3117 stream_.sampleRate = sampleRate;
\r
3118 stream_.device[mode] = device;
\r
3119 stream_.state = STREAM_STOPPED;
\r
3120 asioCallbackInfo = &stream_.callbackInfo;
\r
3121 stream_.callbackInfo.object = (void *) this;
\r
3122 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3123 // We had already set up an output stream.
\r
3124 stream_.mode = DUPLEX;
\r
3126 stream_.mode = mode;
\r
3128 // Determine device latencies
\r
3129 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3130 if ( result != ASE_OK ) {
\r
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3132 errorText_ = errorStream_.str();
\r
3133 error( RtAudioError::WARNING); // warn but don't fail
\r
3136 stream_.latency[0] = outputLatency;
\r
3137 stream_.latency[1] = inputLatency;
\r
3140 // Setup the buffer conversion information structure. We don't use
\r
3141 // buffers to do channel offsets, so we override that parameter
\r
3143 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3148 if ( buffersAllocated )
\r
3149 ASIODisposeBuffers();
\r
3150 drivers.removeCurrentDriver();
\r
3153 CloseHandle( handle->condition );
\r
3154 if ( handle->bufferInfos )
\r
3155 free( handle->bufferInfos );
\r
3157 stream_.apiHandle = 0;
\r
3160 for ( int i=0; i<2; i++ ) {
\r
3161 if ( stream_.userBuffer[i] ) {
\r
3162 free( stream_.userBuffer[i] );
\r
3163 stream_.userBuffer[i] = 0;
\r
3167 if ( stream_.deviceBuffer ) {
\r
3168 free( stream_.deviceBuffer );
\r
3169 stream_.deviceBuffer = 0;
\r
3175 void RtApiAsio :: closeStream()
\r
3177 if ( stream_.state == STREAM_CLOSED ) {
\r
3178 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3179 error( RtAudioError::WARNING );
\r
3183 if ( stream_.state == STREAM_RUNNING ) {
\r
3184 stream_.state = STREAM_STOPPED;
\r
3187 ASIODisposeBuffers();
\r
3188 drivers.removeCurrentDriver();
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3192 CloseHandle( handle->condition );
\r
3193 if ( handle->bufferInfos )
\r
3194 free( handle->bufferInfos );
\r
3196 stream_.apiHandle = 0;
\r
3199 for ( int i=0; i<2; i++ ) {
\r
3200 if ( stream_.userBuffer[i] ) {
\r
3201 free( stream_.userBuffer[i] );
\r
3202 stream_.userBuffer[i] = 0;
\r
3206 if ( stream_.deviceBuffer ) {
\r
3207 free( stream_.deviceBuffer );
\r
3208 stream_.deviceBuffer = 0;
\r
3211 stream_.mode = UNINITIALIZED;
\r
3212 stream_.state = STREAM_CLOSED;
\r
3215 bool stopThreadCalled = false;
\r
3217 void RtApiAsio :: startStream()
\r
3220 if ( stream_.state == STREAM_RUNNING ) {
\r
3221 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3222 error( RtAudioError::WARNING );
\r
3226 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3227 ASIOError result = ASIOStart();
\r
3228 if ( result != ASE_OK ) {
\r
3229 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3230 errorText_ = errorStream_.str();
\r
3234 handle->drainCounter = 0;
\r
3235 handle->internalDrain = false;
\r
3236 ResetEvent( handle->condition );
\r
3237 stream_.state = STREAM_RUNNING;
\r
3241 stopThreadCalled = false;
\r
3243 if ( result == ASE_OK ) return;
\r
3244 error( RtAudioError::SYSTEM_ERROR );
\r
3247 void RtApiAsio :: stopStream()
\r
3250 if ( stream_.state == STREAM_STOPPED ) {
\r
3251 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3252 error( RtAudioError::WARNING );
\r
3256 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3258 if ( handle->drainCounter == 0 ) {
\r
3259 handle->drainCounter = 2;
\r
3260 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3264 stream_.state = STREAM_STOPPED;
\r
3266 ASIOError result = ASIOStop();
\r
3267 if ( result != ASE_OK ) {
\r
3268 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3269 errorText_ = errorStream_.str();
\r
3272 if ( result == ASE_OK ) return;
\r
3273 error( RtAudioError::SYSTEM_ERROR );
\r
3276 void RtApiAsio :: abortStream()
\r
3279 if ( stream_.state == STREAM_STOPPED ) {
\r
3280 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3281 error( RtAudioError::WARNING );
\r
3285 // The following lines were commented-out because some behavior was
\r
3286 // noted where the device buffers need to be zeroed to avoid
\r
3287 // continuing sound, even when the device buffers are completely
\r
3288 // disposed. So now, calling abort is the same as calling stop.
\r
3289 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3290 // handle->drainCounter = 2;
\r
3294 // This function will be called by a spawned thread when the user
\r
3295 // callback function signals that the stream should be stopped or
\r
3296 // aborted. It is necessary to handle it this way because the
\r
3297 // callbackEvent() function must return before the ASIOStop()
\r
3298 // function will return.
\r
3299 static unsigned __stdcall asioStopStream( void *ptr )
\r
3301 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3302 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3304 object->stopStream();
\r
3305 _endthreadex( 0 );
\r
3309 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3311 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3312 if ( stream_.state == STREAM_CLOSED ) {
\r
3313 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3314 error( RtAudioError::WARNING );
\r
3318 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3321 // Check if we were draining the stream and signal if finished.
\r
3322 if ( handle->drainCounter > 3 ) {
\r
3324 stream_.state = STREAM_STOPPING;
\r
3325 if ( handle->internalDrain == false )
\r
3326 SetEvent( handle->condition );
\r
3327 else { // spawn a thread to stop the stream
\r
3328 unsigned threadId;
\r
3329 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3330 &stream_.callbackInfo, 0, &threadId );
\r
3335 // Invoke user callback to get fresh output data UNLESS we are
\r
3336 // draining stream.
\r
3337 if ( handle->drainCounter == 0 ) {
\r
3338 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3339 double streamTime = getStreamTime();
\r
3340 RtAudioStreamStatus status = 0;
\r
3341 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3342 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3345 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3346 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3349 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3350 stream_.bufferSize, streamTime, status, info->userData );
\r
3351 if ( cbReturnValue == 2 ) {
\r
3352 stream_.state = STREAM_STOPPING;
\r
3353 handle->drainCounter = 2;
\r
3354 unsigned threadId;
\r
3355 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3356 &stream_.callbackInfo, 0, &threadId );
\r
3359 else if ( cbReturnValue == 1 ) {
\r
3360 handle->drainCounter = 1;
\r
3361 handle->internalDrain = true;
\r
3365 unsigned int nChannels, bufferBytes, i, j;
\r
3366 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3369 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3371 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3373 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3374 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3375 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3379 else if ( stream_.doConvertBuffer[0] ) {
\r
3381 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3382 if ( stream_.doByteSwap[0] )
\r
3383 byteSwapBuffer( stream_.deviceBuffer,
\r
3384 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3385 stream_.deviceFormat[0] );
\r
3387 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3388 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3389 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3390 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3396 if ( stream_.doByteSwap[0] )
\r
3397 byteSwapBuffer( stream_.userBuffer[0],
\r
3398 stream_.bufferSize * stream_.nUserChannels[0],
\r
3399 stream_.userFormat );
\r
3401 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3402 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3403 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3404 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3410 // Don't bother draining input
\r
3411 if ( handle->drainCounter ) {
\r
3412 handle->drainCounter++;
\r
3416 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3418 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3420 if (stream_.doConvertBuffer[1]) {
\r
3422 // Always interleave ASIO input data.
\r
3423 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3424 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3425 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3426 handle->bufferInfos[i].buffers[bufferIndex],
\r
3430 if ( stream_.doByteSwap[1] )
\r
3431 byteSwapBuffer( stream_.deviceBuffer,
\r
3432 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3433 stream_.deviceFormat[1] );
\r
3434 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3438 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3439 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3440 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3441 handle->bufferInfos[i].buffers[bufferIndex],
\r
3446 if ( stream_.doByteSwap[1] )
\r
3447 byteSwapBuffer( stream_.userBuffer[1],
\r
3448 stream_.bufferSize * stream_.nUserChannels[1],
\r
3449 stream_.userFormat );
\r
3454 // The following call was suggested by Malte Clasen. While the API
\r
3455 // documentation indicates it should not be required, some device
\r
3456 // drivers apparently do not function correctly without it.
\r
3457 ASIOOutputReady();
\r
3459 RtApi::tickStreamTime();
\r
3463 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3465 // The ASIO documentation says that this usually only happens during
\r
3466 // external sync. Audio processing is not stopped by the driver,
\r
3467 // actual sample rate might not have even changed, maybe only the
\r
3468 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3471 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3473 object->stopStream();
\r
3475 catch ( RtAudioError &exception ) {
\r
3476 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3480 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3483 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3487 switch( selector ) {
\r
3488 case kAsioSelectorSupported:
\r
3489 if ( value == kAsioResetRequest
\r
3490 || value == kAsioEngineVersion
\r
3491 || value == kAsioResyncRequest
\r
3492 || value == kAsioLatenciesChanged
\r
3493 // The following three were added for ASIO 2.0, you don't
\r
3494 // necessarily have to support them.
\r
3495 || value == kAsioSupportsTimeInfo
\r
3496 || value == kAsioSupportsTimeCode
\r
3497 || value == kAsioSupportsInputMonitor)
\r
3500 case kAsioResetRequest:
\r
3501 // Defer the task and perform the reset of the driver during the
\r
3502 // next "safe" situation. You cannot reset the driver right now,
\r
3503 // as this code is called from the driver. Reset the driver is
\r
3504 // done by completely destruct is. I.e. ASIOStop(),
\r
3505 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3507 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3510 case kAsioResyncRequest:
\r
3511 // This informs the application that the driver encountered some
\r
3512 // non-fatal data loss. It is used for synchronization purposes
\r
3513 // of different media. Added mainly to work around the Win16Mutex
\r
3514 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3515 // which could lose data because the Mutex was held too long by
\r
3516 // another thread. However a driver can issue it in other
\r
3517 // situations, too.
\r
3518 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3522 case kAsioLatenciesChanged:
\r
3523 // This will inform the host application that the drivers were
\r
3524 // latencies changed. Beware, it this does not mean that the
\r
3525 // buffer sizes have changed! You might need to update internal
\r
3527 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3530 case kAsioEngineVersion:
\r
3531 // Return the supported ASIO version of the host application. If
\r
3532 // a host application does not implement this selector, ASIO 1.0
\r
3533 // is assumed by the driver.
\r
3536 case kAsioSupportsTimeInfo:
\r
3537 // Informs the driver whether the
\r
3538 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3539 // For compatibility with ASIO 1.0 drivers the host application
\r
3540 // should always support the "old" bufferSwitch method, too.
\r
3543 case kAsioSupportsTimeCode:
\r
3544 // Informs the driver whether application is interested in time
\r
3545 // code info. If an application does not need to know about time
\r
3546 // code, the driver has less work to do.
\r
3553 static const char* getAsioErrorString( ASIOError result )
\r
3558 const char*message;
\r
3561 static const Messages m[] =
\r
3563 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3564 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3565 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3566 { ASE_InvalidMode, "Invalid mode." },
\r
3567 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3568 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3569 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3572 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3573 if ( m[i].value == result ) return m[i].message;
\r
3575 return "Unknown error.";
\r
3578 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3582 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3584 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3585 // - Introduces support for the Windows WASAPI API
\r
3586 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3587 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3588 // - Includes automatic internal conversion of sample rate, buffer size and channel count
\r
3593 #include <audioclient.h>
\r
3595 #include <mmdeviceapi.h>
\r
3596 #include <functiondiscoverykeys_devpkey.h>
\r
3598 //=============================================================================
\r
3600 #define SAFE_RELEASE( objectPtr )\
\r
3603 objectPtr->Release();\
\r
3604 objectPtr = NULL;\
\r
3607 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3609 //-----------------------------------------------------------------------------
\r
3611 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3612 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3613 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3614 // provide intermediate storage for read / write synchronization.
\r
3615 class WasapiBuffer
\r
3619 : buffer_( NULL ),
\r
3628 // sets the length of the internal ring buffer
\r
3629 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3632 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3634 bufferSize_ = bufferSize;
\r
3639 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3640 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3642 if ( !buffer || // incoming buffer is NULL
\r
3643 bufferSize == 0 || // incoming buffer has no data
\r
3644 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3649 unsigned int relOutIndex = outIndex_;
\r
3650 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3651 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3652 relOutIndex += bufferSize_;
\r
3655 // "in" index can end on the "out" index but cannot begin at it
\r
3656 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3657 return false; // not enough space between "in" index and "out" index
\r
3660 // copy buffer from external to internal
\r
3661 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3662 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3663 int fromInSize = bufferSize - fromZeroSize;
\r
3667 case RTAUDIO_SINT8:
\r
3668 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3669 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3671 case RTAUDIO_SINT16:
\r
3672 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3673 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3675 case RTAUDIO_SINT24:
\r
3676 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3677 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3679 case RTAUDIO_SINT32:
\r
3680 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3681 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3683 case RTAUDIO_FLOAT32:
\r
3684 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3685 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3687 case RTAUDIO_FLOAT64:
\r
3688 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3689 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3693 // update "in" index
\r
3694 inIndex_ += bufferSize;
\r
3695 inIndex_ %= bufferSize_;
\r
3700 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3701 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3703 if ( !buffer || // incoming buffer is NULL
\r
3704 bufferSize == 0 || // incoming buffer has no data
\r
3705 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3710 unsigned int relInIndex = inIndex_;
\r
3711 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3712 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3713 relInIndex += bufferSize_;
\r
3716 // "out" index can begin at and end on the "in" index
\r
3717 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3718 return false; // not enough space between "out" index and "in" index
\r
3721 // copy buffer from internal to external
\r
3722 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3723 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3724 int fromOutSize = bufferSize - fromZeroSize;
\r
3728 case RTAUDIO_SINT8:
\r
3729 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3730 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3732 case RTAUDIO_SINT16:
\r
3733 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3734 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3736 case RTAUDIO_SINT24:
\r
3737 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3738 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3740 case RTAUDIO_SINT32:
\r
3741 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3742 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3744 case RTAUDIO_FLOAT32:
\r
3745 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3746 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3748 case RTAUDIO_FLOAT64:
\r
3749 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3750 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3754 // update "out" index
\r
3755 outIndex_ += bufferSize;
\r
3756 outIndex_ %= bufferSize_;
\r
3763 unsigned int bufferSize_;
\r
3764 unsigned int inIndex_;
\r
3765 unsigned int outIndex_;
\r
3768 //-----------------------------------------------------------------------------
\r
3770 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3771 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3772 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3773 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3774 // one rate and its multiple.
\r
3775 void convertBufferWasapi( char* outBuffer,
\r
3776 const char* inBuffer,
\r
3777 const unsigned int& inChannelCount,
\r
3778 const unsigned int& outChannelCount,
\r
3779 const unsigned int& inSampleRate,
\r
3780 const unsigned int& outSampleRate,
\r
3781 const unsigned int& inSampleCount,
\r
3782 unsigned int& outSampleCount,
\r
3783 const RtAudioFormat& format )
\r
3785 // calculate the new outSampleCount and relative sampleStep
\r
3786 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3787 float sampleStep = 1.0f / sampleRatio;
\r
3788 float inSampleFraction = 0.0f;
\r
3789 unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );
\r
3791 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3793 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3794 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3796 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3800 case RTAUDIO_SINT8:
\r
3801 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3803 case RTAUDIO_SINT16:
\r
3804 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3806 case RTAUDIO_SINT24:
\r
3807 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3809 case RTAUDIO_SINT32:
\r
3810 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3812 case RTAUDIO_FLOAT32:
\r
3813 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3815 case RTAUDIO_FLOAT64:
\r
3816 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3820 // jump to next in sample
\r
3821 inSampleFraction += sampleStep;
\r
3825 //-----------------------------------------------------------------------------
\r
3827 // A structure to hold various information related to the WASAPI implementation.
\r
3828 struct WasapiHandle
\r
3830 IAudioClient* captureAudioClient;
\r
3831 IAudioClient* renderAudioClient;
\r
3832 IAudioCaptureClient* captureClient;
\r
3833 IAudioRenderClient* renderClient;
\r
3834 HANDLE captureEvent;
\r
3835 HANDLE renderEvent;
\r
3838 : captureAudioClient( NULL ),
\r
3839 renderAudioClient( NULL ),
\r
3840 captureClient( NULL ),
\r
3841 renderClient( NULL ),
\r
3842 captureEvent( NULL ),
\r
3843 renderEvent( NULL ) {}
\r
3846 //=============================================================================
\r
3848 RtApiWasapi::RtApiWasapi()
\r
3849 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3851 // WASAPI can run either apartment or multi-threaded
\r
3852 HRESULT hr = CoInitialize( NULL );
\r
3853 if ( !FAILED( hr ) )
\r
3854 coInitialized_ = true;
\r
3856 // Instantiate device enumerator
\r
3857 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3858 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3859 ( void** ) &deviceEnumerator_ );
\r
3861 if ( FAILED( hr ) ) {
\r
3862 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3863 error( RtAudioError::DRIVER_ERROR );
\r
3867 //-----------------------------------------------------------------------------
\r
3869 RtApiWasapi::~RtApiWasapi()
\r
3871 if ( stream_.state != STREAM_CLOSED )
\r
3874 SAFE_RELEASE( deviceEnumerator_ );
\r
3876 // If this object previously called CoInitialize()
\r
3877 if ( coInitialized_ )
\r
3881 //=============================================================================
\r
3883 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3885 unsigned int captureDeviceCount = 0;
\r
3886 unsigned int renderDeviceCount = 0;
\r
3888 IMMDeviceCollection* captureDevices = NULL;
\r
3889 IMMDeviceCollection* renderDevices = NULL;
\r
3891 // Count capture devices
\r
3892 errorText_.clear();
\r
3893 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3894 if ( FAILED( hr ) ) {
\r
3895 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3899 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3900 if ( FAILED( hr ) ) {
\r
3901 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3905 // Count render devices
\r
3906 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3907 if ( FAILED( hr ) ) {
\r
3908 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3912 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3913 if ( FAILED( hr ) ) {
\r
3914 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3919 // release all references
\r
3920 SAFE_RELEASE( captureDevices );
\r
3921 SAFE_RELEASE( renderDevices );
\r
3923 if ( errorText_.empty() )
\r
3924 return captureDeviceCount + renderDeviceCount;
\r
3926 error( RtAudioError::DRIVER_ERROR );
\r
3930 //-----------------------------------------------------------------------------
\r
3932 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3934 RtAudio::DeviceInfo info;
\r
3935 unsigned int captureDeviceCount = 0;
\r
3936 unsigned int renderDeviceCount = 0;
\r
3937 std::wstring deviceName;
\r
3938 std::string defaultDeviceName;
\r
3939 bool isCaptureDevice = false;
\r
3941 PROPVARIANT deviceNameProp;
\r
3942 PROPVARIANT defaultDeviceNameProp;
\r
3944 IMMDeviceCollection* captureDevices = NULL;
\r
3945 IMMDeviceCollection* renderDevices = NULL;
\r
3946 IMMDevice* devicePtr = NULL;
\r
3947 IMMDevice* defaultDevicePtr = NULL;
\r
3948 IAudioClient* audioClient = NULL;
\r
3949 IPropertyStore* devicePropStore = NULL;
\r
3950 IPropertyStore* defaultDevicePropStore = NULL;
\r
3952 WAVEFORMATEX* deviceFormat = NULL;
\r
3953 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3956 info.probed = false;
\r
3958 // Count capture devices
\r
3959 errorText_.clear();
\r
3960 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3961 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3962 if ( FAILED( hr ) ) {
\r
3963 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3967 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3968 if ( FAILED( hr ) ) {
\r
3969 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3973 // Count render devices
\r
3974 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3975 if ( FAILED( hr ) ) {
\r
3976 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3980 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3981 if ( FAILED( hr ) ) {
\r
3982 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3986 // validate device index
\r
3987 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3988 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3989 errorType = RtAudioError::INVALID_USE;
\r
3993 // determine whether index falls within capture or render devices
\r
3994 if ( device >= renderDeviceCount ) {
\r
3995 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3996 if ( FAILED( hr ) ) {
\r
3997 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4000 isCaptureDevice = true;
\r
4003 hr = renderDevices->Item( device, &devicePtr );
\r
4004 if ( FAILED( hr ) ) {
\r
4005 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4008 isCaptureDevice = false;
\r
4011 // get default device name
\r
4012 if ( isCaptureDevice ) {
\r
4013 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4014 if ( FAILED( hr ) ) {
\r
4015 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4020 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4021 if ( FAILED( hr ) ) {
\r
4022 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4027 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4028 if ( FAILED( hr ) ) {
\r
4029 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4032 PropVariantInit( &defaultDeviceNameProp );
\r
4034 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4035 if ( FAILED( hr ) ) {
\r
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4040 deviceName = defaultDeviceNameProp.pwszVal;
\r
4041 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4044 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4045 if ( FAILED( hr ) ) {
\r
4046 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4050 PropVariantInit( &deviceNameProp );
\r
4052 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4053 if ( FAILED( hr ) ) {
\r
4054 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4058 deviceName = deviceNameProp.pwszVal;
\r
4059 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4062 if ( isCaptureDevice ) {
\r
4063 info.isDefaultInput = info.name == defaultDeviceName;
\r
4064 info.isDefaultOutput = false;
\r
4067 info.isDefaultInput = false;
\r
4068 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4072 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4073 if ( FAILED( hr ) ) {
\r
4074 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4078 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4079 if ( FAILED( hr ) ) {
\r
4080 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4084 if ( isCaptureDevice ) {
\r
4085 info.inputChannels = deviceFormat->nChannels;
\r
4086 info.outputChannels = 0;
\r
4087 info.duplexChannels = 0;
\r
4090 info.inputChannels = 0;
\r
4091 info.outputChannels = deviceFormat->nChannels;
\r
4092 info.duplexChannels = 0;
\r
4096 info.sampleRates.clear();
\r
4098 // allow support for all sample rates as we have a built-in sample rate converter
\r
4099 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4100 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4104 info.nativeFormats = 0;
\r
4106 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4107 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4108 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4110 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4111 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4113 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4114 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4117 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4118 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4119 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4121 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4122 info.nativeFormats |= RTAUDIO_SINT8;
\r
4124 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4125 info.nativeFormats |= RTAUDIO_SINT16;
\r
4127 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4128 info.nativeFormats |= RTAUDIO_SINT24;
\r
4130 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4131 info.nativeFormats |= RTAUDIO_SINT32;
\r
4136 info.probed = true;
\r
4139 // release all references
\r
4140 PropVariantClear( &deviceNameProp );
\r
4141 PropVariantClear( &defaultDeviceNameProp );
\r
4143 SAFE_RELEASE( captureDevices );
\r
4144 SAFE_RELEASE( renderDevices );
\r
4145 SAFE_RELEASE( devicePtr );
\r
4146 SAFE_RELEASE( defaultDevicePtr );
\r
4147 SAFE_RELEASE( audioClient );
\r
4148 SAFE_RELEASE( devicePropStore );
\r
4149 SAFE_RELEASE( defaultDevicePropStore );
\r
4151 CoTaskMemFree( deviceFormat );
\r
4152 CoTaskMemFree( closestMatchFormat );
\r
4154 if ( !errorText_.empty() )
\r
4155 error( errorType );
\r
4159 //-----------------------------------------------------------------------------
\r
4161 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4163 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4164 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4172 //-----------------------------------------------------------------------------
\r
4174 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4176 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4177 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4185 //-----------------------------------------------------------------------------
\r
4187 void RtApiWasapi::closeStream( void )
\r
4189 if ( stream_.state == STREAM_CLOSED ) {
\r
4190 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4191 error( RtAudioError::WARNING );
\r
4195 if ( stream_.state != STREAM_STOPPED )
\r
4198 // clean up stream memory
\r
4199 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4200 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4202 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4203 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4205 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4206 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4208 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4209 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4211 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4212 stream_.apiHandle = NULL;
\r
4214 for ( int i = 0; i < 2; i++ ) {
\r
4215 if ( stream_.userBuffer[i] ) {
\r
4216 free( stream_.userBuffer[i] );
\r
4217 stream_.userBuffer[i] = 0;
\r
4221 if ( stream_.deviceBuffer ) {
\r
4222 free( stream_.deviceBuffer );
\r
4223 stream_.deviceBuffer = 0;
\r
4226 // update stream state
\r
4227 stream_.state = STREAM_CLOSED;
\r
4230 //-----------------------------------------------------------------------------
\r
4232 void RtApiWasapi::startStream( void )
\r
4236 if ( stream_.state == STREAM_RUNNING ) {
\r
4237 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4238 error( RtAudioError::WARNING );
\r
4242 // update stream state
\r
4243 stream_.state = STREAM_RUNNING;
\r
4245 // create WASAPI stream thread
\r
4246 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4248 if ( !stream_.callbackInfo.thread ) {
\r
4249 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4250 error( RtAudioError::THREAD_ERROR );
\r
4253 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4254 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4258 //-----------------------------------------------------------------------------
\r
4260 void RtApiWasapi::stopStream( void )
\r
4264 if ( stream_.state == STREAM_STOPPED ) {
\r
4265 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4266 error( RtAudioError::WARNING );
\r
4270 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4271 stream_.state = STREAM_STOPPING;
\r
4273 // wait until stream thread is stopped
\r
4274 while( stream_.state != STREAM_STOPPED ) {
\r
4278 // Wait for the last buffer to play before stopping.
\r
4279 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4281 // stop capture client if applicable
\r
4282 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4283 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4284 if ( FAILED( hr ) ) {
\r
4285 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4286 error( RtAudioError::DRIVER_ERROR );
\r
4291 // stop render client if applicable
\r
4292 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4293 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4294 if ( FAILED( hr ) ) {
\r
4295 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4296 error( RtAudioError::DRIVER_ERROR );
\r
4301 // close thread handle
\r
4302 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4303 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4304 error( RtAudioError::THREAD_ERROR );
\r
4308 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4311 //-----------------------------------------------------------------------------
\r
4313 void RtApiWasapi::abortStream( void )
\r
4317 if ( stream_.state == STREAM_STOPPED ) {
\r
4318 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4319 error( RtAudioError::WARNING );
\r
4323 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4324 stream_.state = STREAM_STOPPING;
\r
4326 // wait until stream thread is stopped
\r
4327 while ( stream_.state != STREAM_STOPPED ) {
\r
4331 // stop capture client if applicable
\r
4332 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4333 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4334 if ( FAILED( hr ) ) {
\r
4335 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4336 error( RtAudioError::DRIVER_ERROR );
\r
4341 // stop render client if applicable
\r
4342 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4343 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4344 if ( FAILED( hr ) ) {
\r
4345 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4346 error( RtAudioError::DRIVER_ERROR );
\r
4351 // close thread handle
\r
4352 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4353 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4354 error( RtAudioError::THREAD_ERROR );
\r
4358 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4361 //-----------------------------------------------------------------------------
\r
4363 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4364 unsigned int firstChannel, unsigned int sampleRate,
\r
4365 RtAudioFormat format, unsigned int* bufferSize,
\r
4366 RtAudio::StreamOptions* options )
\r
4368 bool methodResult = FAILURE;
\r
4369 unsigned int captureDeviceCount = 0;
\r
4370 unsigned int renderDeviceCount = 0;
\r
4372 IMMDeviceCollection* captureDevices = NULL;
\r
4373 IMMDeviceCollection* renderDevices = NULL;
\r
4374 IMMDevice* devicePtr = NULL;
\r
4375 WAVEFORMATEX* deviceFormat = NULL;
\r
4376 unsigned int bufferBytes;
\r
4377 stream_.state = STREAM_STOPPED;
\r
4379 // create API Handle if not already created
\r
4380 if ( !stream_.apiHandle )
\r
4381 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4383 // Count capture devices
\r
4384 errorText_.clear();
\r
4385 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4386 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4387 if ( FAILED( hr ) ) {
\r
4388 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4392 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4393 if ( FAILED( hr ) ) {
\r
4394 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4398 // Count render devices
\r
4399 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4400 if ( FAILED( hr ) ) {
\r
4401 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4405 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4406 if ( FAILED( hr ) ) {
\r
4407 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4411 // validate device index
\r
4412 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4413 errorType = RtAudioError::INVALID_USE;
\r
4414 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4418 // determine whether index falls within capture or render devices
\r
4419 if ( device >= renderDeviceCount ) {
\r
4420 if ( mode != INPUT ) {
\r
4421 errorType = RtAudioError::INVALID_USE;
\r
4422 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4426 // retrieve captureAudioClient from devicePtr
\r
4427 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4429 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4430 if ( FAILED( hr ) ) {
\r
4431 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4435 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4436 NULL, ( void** ) &captureAudioClient );
\r
4437 if ( FAILED( hr ) ) {
\r
4438 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4442 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4443 if ( FAILED( hr ) ) {
\r
4444 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4448 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4449 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4452 if ( mode != OUTPUT ) {
\r
4453 errorType = RtAudioError::INVALID_USE;
\r
4454 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4458 // retrieve renderAudioClient from devicePtr
\r
4459 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4461 hr = renderDevices->Item( device, &devicePtr );
\r
4462 if ( FAILED( hr ) ) {
\r
4463 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4467 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4468 NULL, ( void** ) &renderAudioClient );
\r
4469 if ( FAILED( hr ) ) {
\r
4470 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4474 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4475 if ( FAILED( hr ) ) {
\r
4476 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4480 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4481 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4484 // fill stream data
\r
4485 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4486 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4487 stream_.mode = DUPLEX;
\r
4490 stream_.mode = mode;
\r
4493 stream_.device[mode] = device;
\r
4494 stream_.doByteSwap[mode] = false;
\r
4495 stream_.sampleRate = sampleRate;
\r
4496 stream_.bufferSize = *bufferSize;
\r
4497 stream_.nBuffers = 1;
\r
4498 stream_.nUserChannels[mode] = channels;
\r
4499 stream_.channelOffset[mode] = firstChannel;
\r
4500 stream_.userFormat = format;
\r
4501 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4503 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4504 stream_.userInterleaved = false;
\r
4506 stream_.userInterleaved = true;
\r
4507 stream_.deviceInterleaved[mode] = true;
\r
4509 // Set flags for buffer conversion.
\r
4510 stream_.doConvertBuffer[mode] = false;
\r
4511 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4512 stream_.doConvertBuffer[mode] = true;
\r
4513 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4514 stream_.nUserChannels[mode] > 1 )
\r
4515 stream_.doConvertBuffer[mode] = true;
\r
4517 if ( stream_.doConvertBuffer[mode] )
\r
4518 setConvertInfo( mode, 0 );
\r
4520 // Allocate necessary internal buffers
\r
4521 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4523 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4524 if ( !stream_.userBuffer[mode] ) {
\r
4525 errorType = RtAudioError::MEMORY_ERROR;
\r
4526 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4530 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4531 stream_.callbackInfo.priority = 15;
\r
4533 stream_.callbackInfo.priority = 0;
\r
4535 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4536 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4538 methodResult = SUCCESS;
\r
4542 SAFE_RELEASE( captureDevices );
\r
4543 SAFE_RELEASE( renderDevices );
\r
4544 SAFE_RELEASE( devicePtr );
\r
4545 CoTaskMemFree( deviceFormat );
\r
4547 // if method failed, close the stream
\r
4548 if ( methodResult == FAILURE )
\r
4551 if ( !errorText_.empty() )
\r
4552 error( errorType );
\r
4553 return methodResult;
\r
4556 //=============================================================================
\r
4558 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4561 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4566 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4569 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4574 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4577 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4582 //-----------------------------------------------------------------------------
\r
4584 void RtApiWasapi::wasapiThread()
\r
4586 // as this is a new thread, we must CoInitialize it
\r
4587 CoInitialize( NULL );
\r
4591 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4592 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4593 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4594 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4595 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4596 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4598 WAVEFORMATEX* captureFormat = NULL;
\r
4599 WAVEFORMATEX* renderFormat = NULL;
\r
4600 float captureSrRatio = 0.0f;
\r
4601 float renderSrRatio = 0.0f;
\r
4602 WasapiBuffer captureBuffer;
\r
4603 WasapiBuffer renderBuffer;
\r
4605 // declare local stream variables
\r
4606 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4607 BYTE* streamBuffer = NULL;
\r
4608 unsigned long captureFlags = 0;
\r
4609 unsigned int bufferFrameCount = 0;
\r
4610 unsigned int numFramesPadding = 0;
\r
4611 unsigned int convBufferSize = 0;
\r
4612 bool callbackPushed = false;
\r
4613 bool callbackPulled = false;
\r
4614 bool callbackStopped = false;
\r
4615 int callbackResult = 0;
\r
4617 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4618 char* convBuffer = NULL;
\r
4619 unsigned int convBuffSize = 0;
\r
4620 unsigned int deviceBuffSize = 0;
\r
4622 errorText_.clear();
\r
4623 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4625 // Attempt to assign "Pro Audio" characteristic to thread
\r
4626 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4628 DWORD taskIndex = 0;
\r
4629 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4630 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4631 FreeLibrary( AvrtDll );
\r
4634 // start capture stream if applicable
\r
4635 if ( captureAudioClient ) {
\r
4636 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4637 if ( FAILED( hr ) ) {
\r
4638 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4642 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4644 // initialize capture stream according to desire buffer size
\r
4645 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4646 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4648 if ( !captureClient ) {
\r
4649 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4650 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4651 desiredBufferPeriod,
\r
4652 desiredBufferPeriod,
\r
4655 if ( FAILED( hr ) ) {
\r
4656 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4660 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4661 ( void** ) &captureClient );
\r
4662 if ( FAILED( hr ) ) {
\r
4663 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4667 // configure captureEvent to trigger on every available capture buffer
\r
4668 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4669 if ( !captureEvent ) {
\r
4670 errorType = RtAudioError::SYSTEM_ERROR;
\r
4671 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4675 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4676 if ( FAILED( hr ) ) {
\r
4677 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4681 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4682 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4685 unsigned int inBufferSize = 0;
\r
4686 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4687 if ( FAILED( hr ) ) {
\r
4688 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4692 // scale outBufferSize according to stream->user sample rate ratio
\r
4693 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4694 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4696 // set captureBuffer size
\r
4697 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4699 // reset the capture stream
\r
4700 hr = captureAudioClient->Reset();
\r
4701 if ( FAILED( hr ) ) {
\r
4702 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4706 // start the capture stream
\r
4707 hr = captureAudioClient->Start();
\r
4708 if ( FAILED( hr ) ) {
\r
4709 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4714 // start render stream if applicable
\r
4715 if ( renderAudioClient ) {
\r
4716 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4717 if ( FAILED( hr ) ) {
\r
4718 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4722 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4724 // initialize render stream according to desire buffer size
\r
4725 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4726 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4728 if ( !renderClient ) {
\r
4729 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4730 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4731 desiredBufferPeriod,
\r
4732 desiredBufferPeriod,
\r
4735 if ( FAILED( hr ) ) {
\r
4736 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4740 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4741 ( void** ) &renderClient );
\r
4742 if ( FAILED( hr ) ) {
\r
4743 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4747 // configure renderEvent to trigger on every available render buffer
\r
4748 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4749 if ( !renderEvent ) {
\r
4750 errorType = RtAudioError::SYSTEM_ERROR;
\r
4751 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4755 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4756 if ( FAILED( hr ) ) {
\r
4757 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4761 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4762 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4765 unsigned int outBufferSize = 0;
\r
4766 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4767 if ( FAILED( hr ) ) {
\r
4768 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4772 // scale inBufferSize according to user->stream sample rate ratio
\r
4773 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4774 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4776 // set renderBuffer size
\r
4777 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4779 // reset the render stream
\r
4780 hr = renderAudioClient->Reset();
\r
4781 if ( FAILED( hr ) ) {
\r
4782 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4786 // start the render stream
\r
4787 hr = renderAudioClient->Start();
\r
4788 if ( FAILED( hr ) ) {
\r
4789 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4794 if ( stream_.mode == INPUT ) {
\r
4795 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4796 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4798 else if ( stream_.mode == OUTPUT ) {
\r
4799 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4800 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4802 else if ( stream_.mode == DUPLEX ) {
\r
4803 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4804 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4805 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4806 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4809 convBuffer = ( char* ) malloc( convBuffSize );
\r
4810 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4811 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4812 errorType = RtAudioError::MEMORY_ERROR;
\r
4813 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4817 // stream process loop
\r
4818 while ( stream_.state != STREAM_STOPPING ) {
\r
4819 if ( !callbackPulled ) {
\r
4822 // 1. Pull callback buffer from inputBuffer
\r
4823 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4824 // Convert callback buffer to user format
\r
4826 if ( captureAudioClient ) {
\r
4827 // Pull callback buffer from inputBuffer
\r
4828 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4829 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4830 stream_.deviceFormat[INPUT] );
\r
4832 if ( callbackPulled ) {
\r
4833 // Convert callback buffer to user sample rate and channel count
\r
4834 convertBufferWasapi( stream_.deviceBuffer,
\r
4836 stream_.nDeviceChannels[INPUT],
\r
4837 stream_.nUserChannels[INPUT],
\r
4838 captureFormat->nSamplesPerSec,
\r
4839 stream_.sampleRate,
\r
4840 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4842 stream_.deviceFormat[INPUT] );
\r
4844 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4845 // Convert callback buffer to user format
\r
4846 convertBuffer( stream_.userBuffer[INPUT],
\r
4847 stream_.deviceBuffer,
\r
4848 stream_.convertInfo[INPUT] );
\r
4851 // no conversion, simple copy deviceBuffer to userBuffer
\r
4852 memcpy( stream_.userBuffer[INPUT],
\r
4853 stream_.deviceBuffer,
\r
4854 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4859 // if there is no capture stream, set callbackPulled flag
\r
4860 callbackPulled = true;
\r
4863 // Execute Callback
\r
4864 // ================
\r
4865 // 1. Execute user callback method
\r
4866 // 2. Handle return value from callback
\r
4868 // if callback has not requested the stream to stop
\r
4869 if ( callbackPulled && !callbackStopped ) {
\r
4870 // Execute user callback method
\r
4871 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4872 stream_.userBuffer[INPUT],
\r
4873 stream_.bufferSize,
\r
4875 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4876 stream_.callbackInfo.userData );
\r
4878 // Handle return value from callback
\r
4879 if ( callbackResult == 1 ) {
\r
4880 // instantiate a thread to stop this thread
\r
4881 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4882 if ( !threadHandle ) {
\r
4883 errorType = RtAudioError::THREAD_ERROR;
\r
4884 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4887 else if ( !CloseHandle( threadHandle ) ) {
\r
4888 errorType = RtAudioError::THREAD_ERROR;
\r
4889 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4893 callbackStopped = true;
\r
4895 else if ( callbackResult == 2 ) {
\r
4896 // instantiate a thread to stop this thread
\r
4897 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4898 if ( !threadHandle ) {
\r
4899 errorType = RtAudioError::THREAD_ERROR;
\r
4900 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4903 else if ( !CloseHandle( threadHandle ) ) {
\r
4904 errorType = RtAudioError::THREAD_ERROR;
\r
4905 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4909 callbackStopped = true;
\r
4914 // Callback Output
\r
4915 // ===============
\r
4916 // 1. Convert callback buffer to stream format
\r
4917 // 2. Convert callback buffer to stream sample rate and channel count
\r
4918 // 3. Push callback buffer into outputBuffer
\r
4920 if ( renderAudioClient && callbackPulled ) {
\r
4921 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4922 // Convert callback buffer to stream format
\r
4923 convertBuffer( stream_.deviceBuffer,
\r
4924 stream_.userBuffer[OUTPUT],
\r
4925 stream_.convertInfo[OUTPUT] );
\r
4927 // Convert callback buffer to stream sample rate and channel count
\r
4928 convertBufferWasapi( convBuffer,
\r
4929 stream_.deviceBuffer,
\r
4930 stream_.nUserChannels[OUTPUT],
\r
4931 stream_.nDeviceChannels[OUTPUT],
\r
4932 stream_.sampleRate,
\r
4933 renderFormat->nSamplesPerSec,
\r
4934 stream_.bufferSize,
\r
4936 stream_.deviceFormat[OUTPUT] );
\r
4939 // Convert callback buffer to stream sample rate and channel count
\r
4940 convertBufferWasapi( convBuffer,
\r
4941 stream_.userBuffer[OUTPUT],
\r
4942 stream_.nUserChannels[OUTPUT],
\r
4943 stream_.nDeviceChannels[OUTPUT],
\r
4944 stream_.sampleRate,
\r
4945 renderFormat->nSamplesPerSec,
\r
4946 stream_.bufferSize,
\r
4948 stream_.deviceFormat[OUTPUT] );
\r
4951 // Push callback buffer into outputBuffer
\r
4952 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4953 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4954 stream_.deviceFormat[OUTPUT] );
\r
4959 // 1. Get capture buffer from stream
\r
4960 // 2. Push capture buffer into inputBuffer
\r
4961 // 3. If 2. was successful: Release capture buffer
\r
4963 if ( captureAudioClient ) {
\r
4964 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4965 if ( !callbackPulled ) {
\r
4966 WaitForSingleObject( captureEvent, INFINITE );
\r
4969 // Get capture buffer from stream
\r
4970 hr = captureClient->GetBuffer( &streamBuffer,
\r
4971 &bufferFrameCount,
\r
4972 &captureFlags, NULL, NULL );
\r
4973 if ( FAILED( hr ) ) {
\r
4974 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4978 if ( bufferFrameCount != 0 ) {
\r
4979 // Push capture buffer into inputBuffer
\r
4980 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4981 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4982 stream_.deviceFormat[INPUT] ) )
\r
4984 // Release capture buffer
\r
4985 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4986 if ( FAILED( hr ) ) {
\r
4987 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4993 // Inform WASAPI that capture was unsuccessful
\r
4994 hr = captureClient->ReleaseBuffer( 0 );
\r
4995 if ( FAILED( hr ) ) {
\r
4996 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5003 // Inform WASAPI that capture was unsuccessful
\r
5004 hr = captureClient->ReleaseBuffer( 0 );
\r
5005 if ( FAILED( hr ) ) {
\r
5006 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5014 // 1. Get render buffer from stream
\r
5015 // 2. Pull next buffer from outputBuffer
\r
5016 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5017 // Release render buffer
\r
5019 if ( renderAudioClient ) {
\r
5020 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5021 if ( callbackPulled && !callbackPushed ) {
\r
5022 WaitForSingleObject( renderEvent, INFINITE );
\r
5025 // Get render buffer from stream
\r
5026 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5027 if ( FAILED( hr ) ) {
\r
5028 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5032 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5033 if ( FAILED( hr ) ) {
\r
5034 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5038 bufferFrameCount -= numFramesPadding;
\r
5040 if ( bufferFrameCount != 0 ) {
\r
5041 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5042 if ( FAILED( hr ) ) {
\r
5043 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5047 // Pull next buffer from outputBuffer
\r
5048 // Fill render buffer with next buffer
\r
5049 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5050 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5051 stream_.deviceFormat[OUTPUT] ) )
\r
5053 // Release render buffer
\r
5054 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5055 if ( FAILED( hr ) ) {
\r
5056 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5062 // Inform WASAPI that render was unsuccessful
\r
5063 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5064 if ( FAILED( hr ) ) {
\r
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5072 // Inform WASAPI that render was unsuccessful
\r
5073 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5074 if ( FAILED( hr ) ) {
\r
5075 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5081 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5082 if ( callbackPushed ) {
\r
5083 callbackPulled = false;
\r
5086 // tick stream time
\r
5087 RtApi::tickStreamTime();
\r
5092 CoTaskMemFree( captureFormat );
\r
5093 CoTaskMemFree( renderFormat );
\r
5095 free ( convBuffer );
\r
5099 // update stream state
\r
5100 stream_.state = STREAM_STOPPED;
\r
5102 if ( errorText_.empty() )
\r
5105 error( errorType );
\r
5108 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5112 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5114 // Modified by Robin Davies, October 2005
\r
5115 // - Improvements to DirectX pointer chasing.
\r
5116 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5117 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5118 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5119 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5121 #include <dsound.h>
\r
5122 #include <assert.h>
\r
5123 #include <algorithm>
\r
5125 #if defined(__MINGW32__)
\r
5126 // missing from latest mingw winapi
\r
5127 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5128 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5129 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5130 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5133 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5135 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5136 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5139 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5141 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5142 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5143 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5144 return pointer >= earlierPointer && pointer < laterPointer;
\r
5147 // A structure to hold various information related to the DirectSound
\r
5148 // API implementation.
\r
5150 unsigned int drainCounter; // Tracks callback counts when draining
\r
5151 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5155 UINT bufferPointer[2];
\r
5156 DWORD dsBufferSize[2];
\r
5157 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5161 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5164 // Declarations for utility functions, callbacks, and structures
\r
5165 // specific to the DirectSound implementation.
\r
5166 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5167 LPCTSTR description,
\r
5169 LPVOID lpContext );
\r
5171 static const char* getErrorString( int code );
\r
5173 static unsigned __stdcall callbackHandler( void *ptr );
\r
5182 : found(false) { validId[0] = false; validId[1] = false; }
\r
5185 struct DsProbeData {
\r
5187 std::vector<struct DsDevice>* dsDevices;
\r
5190 RtApiDs :: RtApiDs()
\r
5192 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5193 // accept whatever the mainline chose for a threading model.
\r
5194 coInitialized_ = false;
\r
5195 HRESULT hr = CoInitialize( NULL );
\r
5196 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5199 RtApiDs :: ~RtApiDs()
\r
5201 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5202 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5205 // The DirectSound default output is always the first device.
\r
5206 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5211 // The DirectSound default input is always the first input device,
\r
5212 // which is the first capture device enumerated.
\r
5213 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5218 unsigned int RtApiDs :: getDeviceCount( void )
\r
5220 // Set query flag for previously found devices to false, so that we
\r
5221 // can check for any devices that have disappeared.
\r
5222 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5223 dsDevices[i].found = false;
\r
5225 // Query DirectSound devices.
\r
5226 struct DsProbeData probeInfo;
\r
5227 probeInfo.isInput = false;
\r
5228 probeInfo.dsDevices = &dsDevices;
\r
5229 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5230 if ( FAILED( result ) ) {
\r
5231 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5232 errorText_ = errorStream_.str();
\r
5233 error( RtAudioError::WARNING );
\r
5236 // Query DirectSoundCapture devices.
\r
5237 probeInfo.isInput = true;
\r
5238 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5239 if ( FAILED( result ) ) {
\r
5240 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5241 errorText_ = errorStream_.str();
\r
5242 error( RtAudioError::WARNING );
\r
5245 // Clean out any devices that may have disappeared.
\r
5246 std::vector< int > indices;
\r
5247 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5248 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5249 //unsigned int nErased = 0;
\r
5250 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5251 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5252 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5254 return static_cast<unsigned int>(dsDevices.size());
\r
5257 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5259 RtAudio::DeviceInfo info;
\r
5260 info.probed = false;
\r
5262 if ( dsDevices.size() == 0 ) {
\r
5263 // Force a query of all devices
\r
5265 if ( dsDevices.size() == 0 ) {
\r
5266 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5267 error( RtAudioError::INVALID_USE );
\r
5272 if ( device >= dsDevices.size() ) {
\r
5273 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5274 error( RtAudioError::INVALID_USE );
\r
5279 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5281 LPDIRECTSOUND output;
\r
5283 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5284 if ( FAILED( result ) ) {
\r
5285 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5286 errorText_ = errorStream_.str();
\r
5287 error( RtAudioError::WARNING );
\r
5291 outCaps.dwSize = sizeof( outCaps );
\r
5292 result = output->GetCaps( &outCaps );
\r
5293 if ( FAILED( result ) ) {
\r
5294 output->Release();
\r
5295 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5296 errorText_ = errorStream_.str();
\r
5297 error( RtAudioError::WARNING );
\r
5301 // Get output channel information.
\r
5302 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5304 // Get sample rate information.
\r
5305 info.sampleRates.clear();
\r
5306 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5307 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5308 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5309 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5312 // Get format information.
\r
5313 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5314 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5316 output->Release();
\r
5318 if ( getDefaultOutputDevice() == device )
\r
5319 info.isDefaultOutput = true;
\r
5321 if ( dsDevices[ device ].validId[1] == false ) {
\r
5322 info.name = dsDevices[ device ].name;
\r
5323 info.probed = true;
\r
5329 LPDIRECTSOUNDCAPTURE input;
\r
5330 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5331 if ( FAILED( result ) ) {
\r
5332 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5333 errorText_ = errorStream_.str();
\r
5334 error( RtAudioError::WARNING );
\r
5339 inCaps.dwSize = sizeof( inCaps );
\r
5340 result = input->GetCaps( &inCaps );
\r
5341 if ( FAILED( result ) ) {
\r
5343 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5344 errorText_ = errorStream_.str();
\r
5345 error( RtAudioError::WARNING );
\r
5349 // Get input channel information.
\r
5350 info.inputChannels = inCaps.dwChannels;
\r
5352 // Get sample rate and format information.
\r
5353 std::vector<unsigned int> rates;
\r
5354 if ( inCaps.dwChannels >= 2 ) {
\r
5355 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5362 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5364 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5370 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5374 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5377 else if ( inCaps.dwChannels == 1 ) {
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5387 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5391 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5393 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5397 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5400 else info.inputChannels = 0; // technically, this would be an error
\r
5404 if ( info.inputChannels == 0 ) return info;
\r
5406 // Copy the supported rates to the info structure but avoid duplication.
\r
5408 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5410 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5411 if ( rates[i] == info.sampleRates[j] ) {
\r
5416 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5418 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5420 // If device opens for both playback and capture, we determine the channels.
\r
5421 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5422 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5424 if ( device == 0 ) info.isDefaultInput = true;
\r
5426 // Copy name and return.
\r
5427 info.name = dsDevices[ device ].name;
\r
5428 info.probed = true;
\r
5432 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5433 unsigned int firstChannel, unsigned int sampleRate,
\r
5434 RtAudioFormat format, unsigned int *bufferSize,
\r
5435 RtAudio::StreamOptions *options )
\r
5437 if ( channels + firstChannel > 2 ) {
\r
5438 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5442 size_t nDevices = dsDevices.size();
\r
5443 if ( nDevices == 0 ) {
\r
5444 // This should not happen because a check is made before this function is called.
\r
5445 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5449 if ( device >= nDevices ) {
\r
5450 // This should not happen because a check is made before this function is called.
\r
5451 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5455 if ( mode == OUTPUT ) {
\r
5456 if ( dsDevices[ device ].validId[0] == false ) {
\r
5457 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5458 errorText_ = errorStream_.str();
\r
5462 else { // mode == INPUT
\r
5463 if ( dsDevices[ device ].validId[1] == false ) {
\r
5464 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5465 errorText_ = errorStream_.str();
\r
5470 // According to a note in PortAudio, using GetDesktopWindow()
\r
5471 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5472 // that occur when the application's window is not the foreground
\r
5473 // window. Also, if the application window closes before the
\r
5474 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5475 // problems when using GetDesktopWindow() but it seems fine now
\r
5476 // (January 2010). I'll leave it commented here.
\r
5477 // HWND hWnd = GetForegroundWindow();
\r
5478 HWND hWnd = GetDesktopWindow();
\r
5480 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5481 // two. This is a judgement call and a value of two is probably too
\r
5482 // low for capture, but it should work for playback.
\r
5484 if ( options ) nBuffers = options->numberOfBuffers;
\r
5485 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5486 if ( nBuffers < 2 ) nBuffers = 3;
\r
5488 // Check the lower range of the user-specified buffer size and set
\r
5489 // (arbitrarily) to a lower bound of 32.
\r
5490 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5492 // Create the wave format structure. The data format setting will
\r
5493 // be determined later.
\r
5494 WAVEFORMATEX waveFormat;
\r
5495 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5496 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5497 waveFormat.nChannels = channels + firstChannel;
\r
5498 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5500 // Determine the device buffer size. By default, we'll use the value
\r
5501 // defined above (32K), but we will grow it to make allowances for
\r
5502 // very large software buffer sizes.
\r
5503 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5504 DWORD dsPointerLeadTime = 0;
\r
5506 void *ohandle = 0, *bhandle = 0;
\r
5508 if ( mode == OUTPUT ) {
\r
5510 LPDIRECTSOUND output;
\r
5511 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5512 if ( FAILED( result ) ) {
\r
5513 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5514 errorText_ = errorStream_.str();
\r
5519 outCaps.dwSize = sizeof( outCaps );
\r
5520 result = output->GetCaps( &outCaps );
\r
5521 if ( FAILED( result ) ) {
\r
5522 output->Release();
\r
5523 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5524 errorText_ = errorStream_.str();
\r
5528 // Check channel information.
\r
5529 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5530 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5531 errorText_ = errorStream_.str();
\r
5535 // Check format information. Use 16-bit format unless not
\r
5536 // supported or user requests 8-bit.
\r
5537 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5538 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5539 waveFormat.wBitsPerSample = 16;
\r
5540 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5543 waveFormat.wBitsPerSample = 8;
\r
5544 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5546 stream_.userFormat = format;
\r
5548 // Update wave format structure and buffer information.
\r
5549 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5550 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5551 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5553 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5554 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5555 dsBufferSize *= 2;
\r
5557 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5558 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5559 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5560 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5561 if ( FAILED( result ) ) {
\r
5562 output->Release();
\r
5563 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5564 errorText_ = errorStream_.str();
\r
5568 // Even though we will write to the secondary buffer, we need to
\r
5569 // access the primary buffer to set the correct output format
\r
5570 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5571 // buffer description.
\r
5572 DSBUFFERDESC bufferDescription;
\r
5573 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5574 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5575 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5577 // Obtain the primary buffer
\r
5578 LPDIRECTSOUNDBUFFER buffer;
\r
5579 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5580 if ( FAILED( result ) ) {
\r
5581 output->Release();
\r
5582 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5583 errorText_ = errorStream_.str();
\r
5587 // Set the primary DS buffer sound format.
\r
5588 result = buffer->SetFormat( &waveFormat );
\r
5589 if ( FAILED( result ) ) {
\r
5590 output->Release();
\r
5591 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5592 errorText_ = errorStream_.str();
\r
5596 // Setup the secondary DS buffer description.
\r
5597 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5598 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5599 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5600 DSBCAPS_GLOBALFOCUS |
\r
5601 DSBCAPS_GETCURRENTPOSITION2 |
\r
5602 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5603 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5604 bufferDescription.lpwfxFormat = &waveFormat;
\r
5606 // Try to create the secondary DS buffer. If that doesn't work,
\r
5607 // try to use software mixing. Otherwise, there's a problem.
\r
5608 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5609 if ( FAILED( result ) ) {
\r
5610 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5611 DSBCAPS_GLOBALFOCUS |
\r
5612 DSBCAPS_GETCURRENTPOSITION2 |
\r
5613 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5614 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5615 if ( FAILED( result ) ) {
\r
5616 output->Release();
\r
5617 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5618 errorText_ = errorStream_.str();
\r
5623 // Get the buffer size ... might be different from what we specified.
\r
5625 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5626 result = buffer->GetCaps( &dsbcaps );
\r
5627 if ( FAILED( result ) ) {
\r
5628 output->Release();
\r
5629 buffer->Release();
\r
5630 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5631 errorText_ = errorStream_.str();
\r
5635 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5637 // Lock the DS buffer
\r
5640 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5641 if ( FAILED( result ) ) {
\r
5642 output->Release();
\r
5643 buffer->Release();
\r
5644 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5645 errorText_ = errorStream_.str();
\r
5649 // Zero the DS buffer
\r
5650 ZeroMemory( audioPtr, dataLen );
\r
5652 // Unlock the DS buffer
\r
5653 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5654 if ( FAILED( result ) ) {
\r
5655 output->Release();
\r
5656 buffer->Release();
\r
5657 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5658 errorText_ = errorStream_.str();
\r
5662 ohandle = (void *) output;
\r
5663 bhandle = (void *) buffer;
\r
5666 if ( mode == INPUT ) {
\r
5668 LPDIRECTSOUNDCAPTURE input;
\r
5669 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5670 if ( FAILED( result ) ) {
\r
5671 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5672 errorText_ = errorStream_.str();
\r
5677 inCaps.dwSize = sizeof( inCaps );
\r
5678 result = input->GetCaps( &inCaps );
\r
5679 if ( FAILED( result ) ) {
\r
5681 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5682 errorText_ = errorStream_.str();
\r
5686 // Check channel information.
\r
5687 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5688 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5692 // Check format information. Use 16-bit format unless user
\r
5693 // requests 8-bit.
\r
5694 DWORD deviceFormats;
\r
5695 if ( channels + firstChannel == 2 ) {
\r
5696 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5697 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5698 waveFormat.wBitsPerSample = 8;
\r
5699 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5701 else { // assume 16-bit is supported
\r
5702 waveFormat.wBitsPerSample = 16;
\r
5703 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5706 else { // channel == 1
\r
5707 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5708 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5709 waveFormat.wBitsPerSample = 8;
\r
5710 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5712 else { // assume 16-bit is supported
\r
5713 waveFormat.wBitsPerSample = 16;
\r
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5717 stream_.userFormat = format;
\r
5719 // Update wave format structure and buffer information.
\r
5720 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5721 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5722 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5724 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5725 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5726 dsBufferSize *= 2;
\r
5728 // Setup the secondary DS buffer description.
\r
5729 DSCBUFFERDESC bufferDescription;
\r
5730 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5731 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5732 bufferDescription.dwFlags = 0;
\r
5733 bufferDescription.dwReserved = 0;
\r
5734 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5735 bufferDescription.lpwfxFormat = &waveFormat;
\r
5737 // Create the capture buffer.
\r
5738 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5739 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5740 if ( FAILED( result ) ) {
\r
5742 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5743 errorText_ = errorStream_.str();
\r
5747 // Get the buffer size ... might be different from what we specified.
\r
5748 DSCBCAPS dscbcaps;
\r
5749 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5750 result = buffer->GetCaps( &dscbcaps );
\r
5751 if ( FAILED( result ) ) {
\r
5753 buffer->Release();
\r
5754 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5755 errorText_ = errorStream_.str();
\r
5759 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5761 // NOTE: We could have a problem here if this is a duplex stream
\r
5762 // and the play and capture hardware buffer sizes are different
\r
5763 // (I'm actually not sure if that is a problem or not).
\r
5764 // Currently, we are not verifying that.
\r
5766 // Lock the capture buffer
\r
5769 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5770 if ( FAILED( result ) ) {
\r
5772 buffer->Release();
\r
5773 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5774 errorText_ = errorStream_.str();
\r
5778 // Zero the buffer
\r
5779 ZeroMemory( audioPtr, dataLen );
\r
5781 // Unlock the buffer
\r
5782 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5783 if ( FAILED( result ) ) {
\r
5785 buffer->Release();
\r
5786 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5787 errorText_ = errorStream_.str();
\r
5791 ohandle = (void *) input;
\r
5792 bhandle = (void *) buffer;
\r
5795 // Set various stream parameters
\r
5796 DsHandle *handle = 0;
\r
5797 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5798 stream_.nUserChannels[mode] = channels;
\r
5799 stream_.bufferSize = *bufferSize;
\r
5800 stream_.channelOffset[mode] = firstChannel;
\r
5801 stream_.deviceInterleaved[mode] = true;
\r
5802 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5803 else stream_.userInterleaved = true;
\r
5805 // Set flag for buffer conversion
\r
5806 stream_.doConvertBuffer[mode] = false;
\r
5807 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5808 stream_.doConvertBuffer[mode] = true;
\r
5809 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5810 stream_.doConvertBuffer[mode] = true;
\r
5811 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5812 stream_.nUserChannels[mode] > 1 )
\r
5813 stream_.doConvertBuffer[mode] = true;
\r
5815 // Allocate necessary internal buffers
\r
5816 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5817 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5818 if ( stream_.userBuffer[mode] == NULL ) {
\r
5819 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5823 if ( stream_.doConvertBuffer[mode] ) {
\r
5825 bool makeBuffer = true;
\r
5826 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5827 if ( mode == INPUT ) {
\r
5828 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5829 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5830 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5834 if ( makeBuffer ) {
\r
5835 bufferBytes *= *bufferSize;
\r
5836 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5837 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5838 if ( stream_.deviceBuffer == NULL ) {
\r
5839 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5845 // Allocate our DsHandle structures for the stream.
\r
5846 if ( stream_.apiHandle == 0 ) {
\r
5848 handle = new DsHandle;
\r
5850 catch ( std::bad_alloc& ) {
\r
5851 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5855 // Create a manual-reset event.
\r
5856 handle->condition = CreateEvent( NULL, // no security
\r
5857 TRUE, // manual-reset
\r
5858 FALSE, // non-signaled initially
\r
5859 NULL ); // unnamed
\r
5860 stream_.apiHandle = (void *) handle;
\r
5863 handle = (DsHandle *) stream_.apiHandle;
\r
5864 handle->id[mode] = ohandle;
\r
5865 handle->buffer[mode] = bhandle;
\r
5866 handle->dsBufferSize[mode] = dsBufferSize;
\r
5867 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5869 stream_.device[mode] = device;
\r
5870 stream_.state = STREAM_STOPPED;
\r
5871 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5872 // We had already set up an output stream.
\r
5873 stream_.mode = DUPLEX;
\r
5875 stream_.mode = mode;
\r
5876 stream_.nBuffers = nBuffers;
\r
5877 stream_.sampleRate = sampleRate;
\r
5879 // Setup the buffer conversion information structure.
\r
5880 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5882 // Setup the callback thread.
\r
5883 if ( stream_.callbackInfo.isRunning == false ) {
\r
5884 unsigned threadId;
\r
5885 stream_.callbackInfo.isRunning = true;
\r
5886 stream_.callbackInfo.object = (void *) this;
\r
5887 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5888 &stream_.callbackInfo, 0, &threadId );
\r
5889 if ( stream_.callbackInfo.thread == 0 ) {
\r
5890 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5894 // Boost DS thread priority
\r
5895 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5901 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5902 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5903 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5904 if ( buffer ) buffer->Release();
\r
5905 object->Release();
\r
5907 if ( handle->buffer[1] ) {
\r
5908 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5909 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5910 if ( buffer ) buffer->Release();
\r
5911 object->Release();
\r
5913 CloseHandle( handle->condition );
\r
5915 stream_.apiHandle = 0;
\r
5918 for ( int i=0; i<2; i++ ) {
\r
5919 if ( stream_.userBuffer[i] ) {
\r
5920 free( stream_.userBuffer[i] );
\r
5921 stream_.userBuffer[i] = 0;
\r
5925 if ( stream_.deviceBuffer ) {
\r
5926 free( stream_.deviceBuffer );
\r
5927 stream_.deviceBuffer = 0;
\r
5930 stream_.state = STREAM_CLOSED;
\r
5934 void RtApiDs :: closeStream()
\r
5936 if ( stream_.state == STREAM_CLOSED ) {
\r
5937 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5938 error( RtAudioError::WARNING );
\r
5942 // Stop the callback thread.
\r
5943 stream_.callbackInfo.isRunning = false;
\r
5944 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5945 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5947 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5949 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5950 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5951 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5954 buffer->Release();
\r
5956 object->Release();
\r
5958 if ( handle->buffer[1] ) {
\r
5959 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5960 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5963 buffer->Release();
\r
5965 object->Release();
\r
5967 CloseHandle( handle->condition );
\r
5969 stream_.apiHandle = 0;
\r
5972 for ( int i=0; i<2; i++ ) {
\r
5973 if ( stream_.userBuffer[i] ) {
\r
5974 free( stream_.userBuffer[i] );
\r
5975 stream_.userBuffer[i] = 0;
\r
5979 if ( stream_.deviceBuffer ) {
\r
5980 free( stream_.deviceBuffer );
\r
5981 stream_.deviceBuffer = 0;
\r
5984 stream_.mode = UNINITIALIZED;
\r
5985 stream_.state = STREAM_CLOSED;
\r
5988 void RtApiDs :: startStream()
\r
5991 if ( stream_.state == STREAM_RUNNING ) {
\r
5992 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5993 error( RtAudioError::WARNING );
\r
5997 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5999 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6000 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6001 // this is already in effect.
\r
6002 timeBeginPeriod( 1 );
\r
6004 buffersRolling = false;
\r
6005 duplexPrerollBytes = 0;
\r
6007 if ( stream_.mode == DUPLEX ) {
\r
6008 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6009 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6012 HRESULT result = 0;
\r
6013 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6015 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6016 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6017 if ( FAILED( result ) ) {
\r
6018 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6019 errorText_ = errorStream_.str();
\r
6024 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6026 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6027 result = buffer->Start( DSCBSTART_LOOPING );
\r
6028 if ( FAILED( result ) ) {
\r
6029 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6030 errorText_ = errorStream_.str();
\r
6035 handle->drainCounter = 0;
\r
6036 handle->internalDrain = false;
\r
6037 ResetEvent( handle->condition );
\r
6038 stream_.state = STREAM_RUNNING;
\r
6041 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6044 void RtApiDs :: stopStream()
\r
6047 if ( stream_.state == STREAM_STOPPED ) {
\r
6048 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6049 error( RtAudioError::WARNING );
\r
6053 HRESULT result = 0;
\r
6056 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6057 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6058 if ( handle->drainCounter == 0 ) {
\r
6059 handle->drainCounter = 2;
\r
6060 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6063 stream_.state = STREAM_STOPPED;
\r
6065 MUTEX_LOCK( &stream_.mutex );
\r
6067 // Stop the buffer and clear memory
\r
6068 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6069 result = buffer->Stop();
\r
6070 if ( FAILED( result ) ) {
\r
6071 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6072 errorText_ = errorStream_.str();
\r
6076 // Lock the buffer and clear it so that if we start to play again,
\r
6077 // we won't have old data playing.
\r
6078 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6079 if ( FAILED( result ) ) {
\r
6080 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6081 errorText_ = errorStream_.str();
\r
6085 // Zero the DS buffer
\r
6086 ZeroMemory( audioPtr, dataLen );
\r
6088 // Unlock the DS buffer
\r
6089 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6090 if ( FAILED( result ) ) {
\r
6091 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6092 errorText_ = errorStream_.str();
\r
6096 // If we start playing again, we must begin at beginning of buffer.
\r
6097 handle->bufferPointer[0] = 0;
\r
6100 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6101 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6105 stream_.state = STREAM_STOPPED;
\r
6107 if ( stream_.mode != DUPLEX )
\r
6108 MUTEX_LOCK( &stream_.mutex );
\r
6110 result = buffer->Stop();
\r
6111 if ( FAILED( result ) ) {
\r
6112 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6113 errorText_ = errorStream_.str();
\r
6117 // Lock the buffer and clear it so that if we start to play again,
\r
6118 // we won't have old data playing.
\r
6119 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6120 if ( FAILED( result ) ) {
\r
6121 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6122 errorText_ = errorStream_.str();
\r
6126 // Zero the DS buffer
\r
6127 ZeroMemory( audioPtr, dataLen );
\r
6129 // Unlock the DS buffer
\r
6130 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6131 if ( FAILED( result ) ) {
\r
6132 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6133 errorText_ = errorStream_.str();
\r
6137 // If we start recording again, we must begin at beginning of buffer.
\r
6138 handle->bufferPointer[1] = 0;
\r
6142 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6143 MUTEX_UNLOCK( &stream_.mutex );
\r
6145 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6148 void RtApiDs :: abortStream()
\r
6151 if ( stream_.state == STREAM_STOPPED ) {
\r
6152 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6153 error( RtAudioError::WARNING );
\r
6157 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6158 handle->drainCounter = 2;
\r
6163 void RtApiDs :: callbackEvent()
\r
6165 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6166 Sleep( 50 ); // sleep 50 milliseconds
\r
6170 if ( stream_.state == STREAM_CLOSED ) {
\r
6171 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6172 error( RtAudioError::WARNING );
\r
6176 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6177 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6179 // Check if we were draining the stream and signal is finished.
\r
6180 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6182 stream_.state = STREAM_STOPPING;
\r
6183 if ( handle->internalDrain == false )
\r
6184 SetEvent( handle->condition );
\r
6190 // Invoke user callback to get fresh output data UNLESS we are
\r
6191 // draining stream.
\r
6192 if ( handle->drainCounter == 0 ) {
\r
6193 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6194 double streamTime = getStreamTime();
\r
6195 RtAudioStreamStatus status = 0;
\r
6196 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6197 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6198 handle->xrun[0] = false;
\r
6200 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6201 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6202 handle->xrun[1] = false;
\r
6204 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6205 stream_.bufferSize, streamTime, status, info->userData );
\r
6206 if ( cbReturnValue == 2 ) {
\r
6207 stream_.state = STREAM_STOPPING;
\r
6208 handle->drainCounter = 2;
\r
6212 else if ( cbReturnValue == 1 ) {
\r
6213 handle->drainCounter = 1;
\r
6214 handle->internalDrain = true;
\r
6219 DWORD currentWritePointer, safeWritePointer;
\r
6220 DWORD currentReadPointer, safeReadPointer;
\r
6221 UINT nextWritePointer;
\r
6223 LPVOID buffer1 = NULL;
\r
6224 LPVOID buffer2 = NULL;
\r
6225 DWORD bufferSize1 = 0;
\r
6226 DWORD bufferSize2 = 0;
\r
6231 MUTEX_LOCK( &stream_.mutex );
\r
6232 if ( stream_.state == STREAM_STOPPED ) {
\r
6233 MUTEX_UNLOCK( &stream_.mutex );
\r
6237 if ( buffersRolling == false ) {
\r
6238 if ( stream_.mode == DUPLEX ) {
\r
6239 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6241 // It takes a while for the devices to get rolling. As a result,
\r
6242 // there's no guarantee that the capture and write device pointers
\r
6243 // will move in lockstep. Wait here for both devices to start
\r
6244 // rolling, and then set our buffer pointers accordingly.
\r
6245 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6246 // bytes later than the write buffer.
\r
6248 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6249 // take place between the two GetCurrentPosition calls... but I'm
\r
6250 // really not sure how to solve the problem. Temporarily boost to
\r
6251 // Realtime priority, maybe; but I'm not sure what priority the
\r
6252 // DirectSound service threads run at. We *should* be roughly
\r
6253 // within a ms or so of correct.
\r
6255 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6256 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6258 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6260 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6261 if ( FAILED( result ) ) {
\r
6262 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6263 errorText_ = errorStream_.str();
\r
6264 error( RtAudioError::SYSTEM_ERROR );
\r
6267 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6268 if ( FAILED( result ) ) {
\r
6269 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6270 errorText_ = errorStream_.str();
\r
6271 error( RtAudioError::SYSTEM_ERROR );
\r
6275 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6276 if ( FAILED( result ) ) {
\r
6277 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6278 errorText_ = errorStream_.str();
\r
6279 error( RtAudioError::SYSTEM_ERROR );
\r
6282 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6283 if ( FAILED( result ) ) {
\r
6284 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6285 errorText_ = errorStream_.str();
\r
6286 error( RtAudioError::SYSTEM_ERROR );
\r
6289 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6293 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6295 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6296 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6297 handle->bufferPointer[1] = safeReadPointer;
\r
6299 else if ( stream_.mode == OUTPUT ) {
\r
6301 // Set the proper nextWritePosition after initial startup.
\r
6302 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6303 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6304 if ( FAILED( result ) ) {
\r
6305 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6306 errorText_ = errorStream_.str();
\r
6307 error( RtAudioError::SYSTEM_ERROR );
\r
6310 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6311 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6314 buffersRolling = true;
\r
6317 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6319 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6321 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6322 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6323 bufferBytes *= formatBytes( stream_.userFormat );
\r
6324 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6327 // Setup parameters and do buffer conversion if necessary.
\r
6328 if ( stream_.doConvertBuffer[0] ) {
\r
6329 buffer = stream_.deviceBuffer;
\r
6330 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6331 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6332 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6335 buffer = stream_.userBuffer[0];
\r
6336 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6337 bufferBytes *= formatBytes( stream_.userFormat );
\r
6340 // No byte swapping necessary in DirectSound implementation.
\r
6342 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6343 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6345 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6346 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6348 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6349 nextWritePointer = handle->bufferPointer[0];
\r
6351 DWORD endWrite, leadPointer;
\r
6353 // Find out where the read and "safe write" pointers are.
\r
6354 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6355 if ( FAILED( result ) ) {
\r
6356 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6357 errorText_ = errorStream_.str();
\r
6358 error( RtAudioError::SYSTEM_ERROR );
\r
6362 // We will copy our output buffer into the region between
\r
6363 // safeWritePointer and leadPointer. If leadPointer is not
\r
6364 // beyond the next endWrite position, wait until it is.
\r
6365 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6366 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6367 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6368 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6369 endWrite = nextWritePointer + bufferBytes;
\r
6371 // Check whether the entire write region is behind the play pointer.
\r
6372 if ( leadPointer >= endWrite ) break;
\r
6374 // If we are here, then we must wait until the leadPointer advances
\r
6375 // beyond the end of our next write region. We use the
\r
6376 // Sleep() function to suspend operation until that happens.
\r
6377 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6378 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6379 if ( millis < 1.0 ) millis = 1.0;
\r
6380 Sleep( (DWORD) millis );
\r
6383 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6384 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6385 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6386 handle->xrun[0] = true;
\r
6387 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6388 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6389 handle->bufferPointer[0] = nextWritePointer;
\r
6390 endWrite = nextWritePointer + bufferBytes;
\r
6393 // Lock free space in the buffer
\r
6394 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6395 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6396 if ( FAILED( result ) ) {
\r
6397 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6398 errorText_ = errorStream_.str();
\r
6399 error( RtAudioError::SYSTEM_ERROR );
\r
6403 // Copy our buffer into the DS buffer
\r
6404 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6405 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6407 // Update our buffer offset and unlock sound buffer
\r
6408 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6409 if ( FAILED( result ) ) {
\r
6410 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6411 errorText_ = errorStream_.str();
\r
6412 error( RtAudioError::SYSTEM_ERROR );
\r
6415 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6416 handle->bufferPointer[0] = nextWritePointer;
\r
6419 // Don't bother draining input
\r
6420 if ( handle->drainCounter ) {
\r
6421 handle->drainCounter++;
\r
6425 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6427 // Setup parameters.
\r
6428 if ( stream_.doConvertBuffer[1] ) {
\r
6429 buffer = stream_.deviceBuffer;
\r
6430 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6431 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6434 buffer = stream_.userBuffer[1];
\r
6435 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6436 bufferBytes *= formatBytes( stream_.userFormat );
\r
6439 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6440 long nextReadPointer = handle->bufferPointer[1];
\r
6441 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6443 // Find out where the write and "safe read" pointers are.
\r
6444 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6445 if ( FAILED( result ) ) {
\r
6446 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6447 errorText_ = errorStream_.str();
\r
6448 error( RtAudioError::SYSTEM_ERROR );
\r
6452 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6453 DWORD endRead = nextReadPointer + bufferBytes;
\r
6455 // Handling depends on whether we are INPUT or DUPLEX.
\r
6456 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6457 // then a wait here will drag the write pointers into the forbidden zone.
\r
6459 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6460 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6461 // practical way to sync up the read and write pointers reliably, given the
\r
6462 // the very complex relationship between phase and increment of the read and write
\r
6465 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6466 // provide a pre-roll period of 0.5 seconds in which we return
\r
6467 // zeros from the read buffer while the pointers sync up.
\r
6469 if ( stream_.mode == DUPLEX ) {
\r
6470 if ( safeReadPointer < endRead ) {
\r
6471 if ( duplexPrerollBytes <= 0 ) {
\r
6472 // Pre-roll time over. Be more agressive.
\r
6473 int adjustment = endRead-safeReadPointer;
\r
6475 handle->xrun[1] = true;
\r
6477 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6478 // and perform fine adjustments later.
\r
6479 // - small adjustments: back off by twice as much.
\r
6480 if ( adjustment >= 2*bufferBytes )
\r
6481 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6483 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6485 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6489 // In pre=roll time. Just do it.
\r
6490 nextReadPointer = safeReadPointer - bufferBytes;
\r
6491 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6493 endRead = nextReadPointer + bufferBytes;
\r
6496 else { // mode == INPUT
\r
6497 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6498 // See comments for playback.
\r
6499 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6500 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6501 if ( millis < 1.0 ) millis = 1.0;
\r
6502 Sleep( (DWORD) millis );
\r
6504 // Wake up and find out where we are now.
\r
6505 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6506 if ( FAILED( result ) ) {
\r
6507 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6508 errorText_ = errorStream_.str();
\r
6509 error( RtAudioError::SYSTEM_ERROR );
\r
6513 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6517 // Lock free space in the buffer
\r
6518 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6519 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6520 if ( FAILED( result ) ) {
\r
6521 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6522 errorText_ = errorStream_.str();
\r
6523 error( RtAudioError::SYSTEM_ERROR );
\r
6527 if ( duplexPrerollBytes <= 0 ) {
\r
6528 // Copy our buffer into the DS buffer
\r
6529 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6530 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6533 memset( buffer, 0, bufferSize1 );
\r
6534 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6535 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6538 // Update our buffer offset and unlock sound buffer
\r
6539 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6540 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6541 if ( FAILED( result ) ) {
\r
6542 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6543 errorText_ = errorStream_.str();
\r
6544 error( RtAudioError::SYSTEM_ERROR );
\r
6547 handle->bufferPointer[1] = nextReadPointer;
\r
6549 // No byte swapping necessary in DirectSound implementation.
\r
6551 // If necessary, convert 8-bit data from unsigned to signed.
\r
6552 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6553 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6555 // Do buffer conversion if necessary.
\r
6556 if ( stream_.doConvertBuffer[1] )
\r
6557 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6561 MUTEX_UNLOCK( &stream_.mutex );
\r
6562 RtApi::tickStreamTime();
\r
6565 // Definitions for utility functions and callbacks
\r
6566 // specific to the DirectSound implementation.
\r
6568 static unsigned __stdcall callbackHandler( void *ptr )
\r
6570 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6571 RtApiDs *object = (RtApiDs *) info->object;
\r
6572 bool* isRunning = &info->isRunning;
\r
6574 while ( *isRunning == true ) {
\r
6575 object->callbackEvent();
\r
6578 _endthreadex( 0 );
\r
6582 #include "tchar.h"
\r
6584 static std::string convertTChar( LPCTSTR name )
\r
6586 #if defined( UNICODE ) || defined( _UNICODE )
\r
6587 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6588 std::string s( length-1, '\0' );
\r
6589 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6591 std::string s( name );
\r
6597 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6598 LPCTSTR description,
\r
6599 LPCTSTR /*module*/,
\r
6600 LPVOID lpContext )
\r
6602 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6603 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6606 bool validDevice = false;
\r
6607 if ( probeInfo.isInput == true ) {
\r
6609 LPDIRECTSOUNDCAPTURE object;
\r
6611 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6612 if ( hr != DS_OK ) return TRUE;
\r
6614 caps.dwSize = sizeof(caps);
\r
6615 hr = object->GetCaps( &caps );
\r
6616 if ( hr == DS_OK ) {
\r
6617 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6618 validDevice = true;
\r
6620 object->Release();
\r
6624 LPDIRECTSOUND object;
\r
6625 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6626 if ( hr != DS_OK ) return TRUE;
\r
6628 caps.dwSize = sizeof(caps);
\r
6629 hr = object->GetCaps( &caps );
\r
6630 if ( hr == DS_OK ) {
\r
6631 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6632 validDevice = true;
\r
6634 object->Release();
\r
6637 // If good device, then save its name and guid.
\r
6638 std::string name = convertTChar( description );
\r
6639 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6640 if ( lpguid == NULL )
\r
6641 name = "Default Device";
\r
6642 if ( validDevice ) {
\r
6643 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6644 if ( dsDevices[i].name == name ) {
\r
6645 dsDevices[i].found = true;
\r
6646 if ( probeInfo.isInput ) {
\r
6647 dsDevices[i].id[1] = lpguid;
\r
6648 dsDevices[i].validId[1] = true;
\r
6651 dsDevices[i].id[0] = lpguid;
\r
6652 dsDevices[i].validId[0] = true;
\r
6659 device.name = name;
\r
6660 device.found = true;
\r
6661 if ( probeInfo.isInput ) {
\r
6662 device.id[1] = lpguid;
\r
6663 device.validId[1] = true;
\r
6666 device.id[0] = lpguid;
\r
6667 device.validId[0] = true;
\r
6669 dsDevices.push_back( device );
\r
6675 static const char* getErrorString( int code )
\r
6679 case DSERR_ALLOCATED:
\r
6680 return "Already allocated";
\r
6682 case DSERR_CONTROLUNAVAIL:
\r
6683 return "Control unavailable";
\r
6685 case DSERR_INVALIDPARAM:
\r
6686 return "Invalid parameter";
\r
6688 case DSERR_INVALIDCALL:
\r
6689 return "Invalid call";
\r
6691 case DSERR_GENERIC:
\r
6692 return "Generic error";
\r
6694 case DSERR_PRIOLEVELNEEDED:
\r
6695 return "Priority level needed";
\r
6697 case DSERR_OUTOFMEMORY:
\r
6698 return "Out of memory";
\r
6700 case DSERR_BADFORMAT:
\r
6701 return "The sample rate or the channel format is not supported";
\r
6703 case DSERR_UNSUPPORTED:
\r
6704 return "Not supported";
\r
6706 case DSERR_NODRIVER:
\r
6707 return "No driver";
\r
6709 case DSERR_ALREADYINITIALIZED:
\r
6710 return "Already initialized";
\r
6712 case DSERR_NOAGGREGATION:
\r
6713 return "No aggregation";
\r
6715 case DSERR_BUFFERLOST:
\r
6716 return "Buffer lost";
\r
6718 case DSERR_OTHERAPPHASPRIO:
\r
6719 return "Another application already has priority";
\r
6721 case DSERR_UNINITIALIZED:
\r
6722 return "Uninitialized";
\r
6725 return "DirectSound unknown error";
\r
6728 //******************** End of __WINDOWS_DS__ *********************//
\r
6732 #if defined(__LINUX_ALSA__)
\r
6734 #include <alsa/asoundlib.h>
\r
6735 #include <unistd.h>
\r
6737 // A structure to hold various information related to the ALSA API
\r
6738 // implementation.
\r
6739 struct AlsaHandle {
\r
6740 snd_pcm_t *handles[2];
\r
6741 bool synchronized;
\r
6743 pthread_cond_t runnable_cv;
\r
6747 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6750 static void *alsaCallbackHandler( void * ptr );
\r
6752 RtApiAlsa :: RtApiAlsa()
\r
6754 // Nothing to do here.
\r
6757 RtApiAlsa :: ~RtApiAlsa()
\r
6759 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6762 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6764 unsigned nDevices = 0;
\r
6765 int result, subdevice, card;
\r
6767 snd_ctl_t *handle;
\r
6769 // Count cards and devices
\r
6771 snd_card_next( &card );
\r
6772 while ( card >= 0 ) {
\r
6773 sprintf( name, "hw:%d", card );
\r
6774 result = snd_ctl_open( &handle, name, 0 );
\r
6775 if ( result < 0 ) {
\r
6776 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6777 errorText_ = errorStream_.str();
\r
6778 error( RtAudioError::WARNING );
\r
6783 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6784 if ( result < 0 ) {
\r
6785 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6786 errorText_ = errorStream_.str();
\r
6787 error( RtAudioError::WARNING );
\r
6790 if ( subdevice < 0 )
\r
6795 snd_ctl_close( handle );
\r
6796 snd_card_next( &card );
\r
6799 result = snd_ctl_open( &handle, "default", 0 );
\r
6800 if (result == 0) {
\r
6802 snd_ctl_close( handle );
\r
6808 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6810 RtAudio::DeviceInfo info;
\r
6811 info.probed = false;
\r
6813 unsigned nDevices = 0;
\r
6814 int result, subdevice, card;
\r
6816 snd_ctl_t *chandle;
\r
6818 // Count cards and devices
\r
6820 snd_card_next( &card );
\r
6821 while ( card >= 0 ) {
\r
6822 sprintf( name, "hw:%d", card );
\r
6823 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6824 if ( result < 0 ) {
\r
6825 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6826 errorText_ = errorStream_.str();
\r
6827 error( RtAudioError::WARNING );
\r
6832 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6833 if ( result < 0 ) {
\r
6834 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6835 errorText_ = errorStream_.str();
\r
6836 error( RtAudioError::WARNING );
\r
6839 if ( subdevice < 0 ) break;
\r
6840 if ( nDevices == device ) {
\r
6841 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6847 snd_ctl_close( chandle );
\r
6848 snd_card_next( &card );
\r
6851 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6852 if ( result == 0 ) {
\r
6853 if ( nDevices == device ) {
\r
6854 strcpy( name, "default" );
\r
6860 if ( nDevices == 0 ) {
\r
6861 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6862 error( RtAudioError::INVALID_USE );
\r
6866 if ( device >= nDevices ) {
\r
6867 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6868 error( RtAudioError::INVALID_USE );
\r
6874 // If a stream is already open, we cannot probe the stream devices.
\r
6875 // Thus, use the saved results.
\r
6876 if ( stream_.state != STREAM_CLOSED &&
\r
6877 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6878 snd_ctl_close( chandle );
\r
6879 if ( device >= devices_.size() ) {
\r
6880 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6881 error( RtAudioError::WARNING );
\r
6884 return devices_[ device ];
\r
6887 int openMode = SND_PCM_ASYNC;
\r
6888 snd_pcm_stream_t stream;
\r
6889 snd_pcm_info_t *pcminfo;
\r
6890 snd_pcm_info_alloca( &pcminfo );
\r
6891 snd_pcm_t *phandle;
\r
6892 snd_pcm_hw_params_t *params;
\r
6893 snd_pcm_hw_params_alloca( ¶ms );
\r
6895 // First try for playback unless default device (which has subdev -1)
\r
6896 stream = SND_PCM_STREAM_PLAYBACK;
\r
6897 snd_pcm_info_set_stream( pcminfo, stream );
\r
6898 if ( subdevice != -1 ) {
\r
6899 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6900 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6902 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6903 if ( result < 0 ) {
\r
6904 // Device probably doesn't support playback.
\r
6905 goto captureProbe;
\r
6909 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6910 if ( result < 0 ) {
\r
6911 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6912 errorText_ = errorStream_.str();
\r
6913 error( RtAudioError::WARNING );
\r
6914 goto captureProbe;
\r
6917 // The device is open ... fill the parameter structure.
\r
6918 result = snd_pcm_hw_params_any( phandle, params );
\r
6919 if ( result < 0 ) {
\r
6920 snd_pcm_close( phandle );
\r
6921 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6922 errorText_ = errorStream_.str();
\r
6923 error( RtAudioError::WARNING );
\r
6924 goto captureProbe;
\r
6927 // Get output channel information.
\r
6928 unsigned int value;
\r
6929 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6930 if ( result < 0 ) {
\r
6931 snd_pcm_close( phandle );
\r
6932 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6933 errorText_ = errorStream_.str();
\r
6934 error( RtAudioError::WARNING );
\r
6935 goto captureProbe;
\r
6937 info.outputChannels = value;
\r
6938 snd_pcm_close( phandle );
\r
6941 stream = SND_PCM_STREAM_CAPTURE;
\r
6942 snd_pcm_info_set_stream( pcminfo, stream );
\r
6944 // Now try for capture unless default device (with subdev = -1)
\r
6945 if ( subdevice != -1 ) {
\r
6946 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6947 snd_ctl_close( chandle );
\r
6948 if ( result < 0 ) {
\r
6949 // Device probably doesn't support capture.
\r
6950 if ( info.outputChannels == 0 ) return info;
\r
6951 goto probeParameters;
\r
6955 snd_ctl_close( chandle );
\r
6957 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6958 if ( result < 0 ) {
\r
6959 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6960 errorText_ = errorStream_.str();
\r
6961 error( RtAudioError::WARNING );
\r
6962 if ( info.outputChannels == 0 ) return info;
\r
6963 goto probeParameters;
\r
6966 // The device is open ... fill the parameter structure.
\r
6967 result = snd_pcm_hw_params_any( phandle, params );
\r
6968 if ( result < 0 ) {
\r
6969 snd_pcm_close( phandle );
\r
6970 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6971 errorText_ = errorStream_.str();
\r
6972 error( RtAudioError::WARNING );
\r
6973 if ( info.outputChannels == 0 ) return info;
\r
6974 goto probeParameters;
\r
6977 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6978 if ( result < 0 ) {
\r
6979 snd_pcm_close( phandle );
\r
6980 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6981 errorText_ = errorStream_.str();
\r
6982 error( RtAudioError::WARNING );
\r
6983 if ( info.outputChannels == 0 ) return info;
\r
6984 goto probeParameters;
\r
6986 info.inputChannels = value;
\r
6987 snd_pcm_close( phandle );
\r
6989 // If device opens for both playback and capture, we determine the channels.
\r
6990 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6991 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6993 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6994 if ( device == 0 && info.outputChannels > 0 )
\r
6995 info.isDefaultOutput = true;
\r
6996 if ( device == 0 && info.inputChannels > 0 )
\r
6997 info.isDefaultInput = true;
\r
7000 // At this point, we just need to figure out the supported data
\r
7001 // formats and sample rates. We'll proceed by opening the device in
\r
7002 // the direction with the maximum number of channels, or playback if
\r
7003 // they are equal. This might limit our sample rate options, but so
\r
7006 if ( info.outputChannels >= info.inputChannels )
\r
7007 stream = SND_PCM_STREAM_PLAYBACK;
\r
7009 stream = SND_PCM_STREAM_CAPTURE;
\r
7010 snd_pcm_info_set_stream( pcminfo, stream );
\r
7012 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7013 if ( result < 0 ) {
\r
7014 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7015 errorText_ = errorStream_.str();
\r
7016 error( RtAudioError::WARNING );
\r
7020 // The device is open ... fill the parameter structure.
\r
7021 result = snd_pcm_hw_params_any( phandle, params );
\r
7022 if ( result < 0 ) {
\r
7023 snd_pcm_close( phandle );
\r
7024 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7025 errorText_ = errorStream_.str();
\r
7026 error( RtAudioError::WARNING );
\r
7030 // Test our discrete set of sample rate values.
\r
7031 info.sampleRates.clear();
\r
7032 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7033 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7034 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7036 if ( info.sampleRates.size() == 0 ) {
\r
7037 snd_pcm_close( phandle );
\r
7038 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7039 errorText_ = errorStream_.str();
\r
7040 error( RtAudioError::WARNING );
\r
7044 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7045 snd_pcm_format_t format;
\r
7046 info.nativeFormats = 0;
\r
7047 format = SND_PCM_FORMAT_S8;
\r
7048 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7049 info.nativeFormats |= RTAUDIO_SINT8;
\r
7050 format = SND_PCM_FORMAT_S16;
\r
7051 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7052 info.nativeFormats |= RTAUDIO_SINT16;
\r
7053 format = SND_PCM_FORMAT_S24;
\r
7054 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7055 info.nativeFormats |= RTAUDIO_SINT24;
\r
7056 format = SND_PCM_FORMAT_S32;
\r
7057 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7058 info.nativeFormats |= RTAUDIO_SINT32;
\r
7059 format = SND_PCM_FORMAT_FLOAT;
\r
7060 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7061 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7062 format = SND_PCM_FORMAT_FLOAT64;
\r
7063 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7064 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7066 // Check that we have at least one supported format
\r
7067 if ( info.nativeFormats == 0 ) {
\r
7068 snd_pcm_close( phandle );
\r
7069 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7070 errorText_ = errorStream_.str();
\r
7071 error( RtAudioError::WARNING );
\r
7075 // Get the device name
\r
7077 result = snd_card_get_name( card, &cardname );
\r
7078 if ( result >= 0 ) {
\r
7079 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7084 // That's all ... close the device and return
\r
7085 snd_pcm_close( phandle );
\r
7086 info.probed = true;
\r
7090 void RtApiAlsa :: saveDeviceInfo( void )
\r
7094 unsigned int nDevices = getDeviceCount();
\r
7095 devices_.resize( nDevices );
\r
7096 for ( unsigned int i=0; i<nDevices; i++ )
\r
7097 devices_[i] = getDeviceInfo( i );
\r
7100 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7101 unsigned int firstChannel, unsigned int sampleRate,
\r
7102 RtAudioFormat format, unsigned int *bufferSize,
\r
7103 RtAudio::StreamOptions *options )
\r
7106 #if defined(__RTAUDIO_DEBUG__)
\r
7107 snd_output_t *out;
\r
7108 snd_output_stdio_attach(&out, stderr, 0);
\r
7111 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7113 unsigned nDevices = 0;
\r
7114 int result, subdevice, card;
\r
7116 snd_ctl_t *chandle;
\r
7118 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7119 snprintf(name, sizeof(name), "%s", "default");
\r
7121 // Count cards and devices
\r
7123 snd_card_next( &card );
\r
7124 while ( card >= 0 ) {
\r
7125 sprintf( name, "hw:%d", card );
\r
7126 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7127 if ( result < 0 ) {
\r
7128 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7129 errorText_ = errorStream_.str();
\r
7134 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7135 if ( result < 0 ) break;
\r
7136 if ( subdevice < 0 ) break;
\r
7137 if ( nDevices == device ) {
\r
7138 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7139 snd_ctl_close( chandle );
\r
7144 snd_ctl_close( chandle );
\r
7145 snd_card_next( &card );
\r
7148 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7149 if ( result == 0 ) {
\r
7150 if ( nDevices == device ) {
\r
7151 strcpy( name, "default" );
\r
7157 if ( nDevices == 0 ) {
\r
7158 // This should not happen because a check is made before this function is called.
\r
7159 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7163 if ( device >= nDevices ) {
\r
7164 // This should not happen because a check is made before this function is called.
\r
7165 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7172 // The getDeviceInfo() function will not work for a device that is
\r
7173 // already open. Thus, we'll probe the system before opening a
\r
7174 // stream and save the results for use by getDeviceInfo().
\r
7175 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7176 this->saveDeviceInfo();
\r
7178 snd_pcm_stream_t stream;
\r
7179 if ( mode == OUTPUT )
\r
7180 stream = SND_PCM_STREAM_PLAYBACK;
\r
7182 stream = SND_PCM_STREAM_CAPTURE;
\r
7184 snd_pcm_t *phandle;
\r
7185 int openMode = SND_PCM_ASYNC;
\r
7186 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7187 if ( result < 0 ) {
\r
7188 if ( mode == OUTPUT )
\r
7189 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7191 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7192 errorText_ = errorStream_.str();
\r
7196 // Fill the parameter structure.
\r
7197 snd_pcm_hw_params_t *hw_params;
\r
7198 snd_pcm_hw_params_alloca( &hw_params );
\r
7199 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7200 if ( result < 0 ) {
\r
7201 snd_pcm_close( phandle );
\r
7202 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7203 errorText_ = errorStream_.str();
\r
7207 #if defined(__RTAUDIO_DEBUG__)
\r
7208 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7209 snd_pcm_hw_params_dump( hw_params, out );
\r
7212 // Set access ... check user preference.
\r
7213 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7214 stream_.userInterleaved = false;
\r
7215 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7216 if ( result < 0 ) {
\r
7217 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7218 stream_.deviceInterleaved[mode] = true;
\r
7221 stream_.deviceInterleaved[mode] = false;
\r
7224 stream_.userInterleaved = true;
\r
7225 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7226 if ( result < 0 ) {
\r
7227 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7228 stream_.deviceInterleaved[mode] = false;
\r
7231 stream_.deviceInterleaved[mode] = true;
\r
7234 if ( result < 0 ) {
\r
7235 snd_pcm_close( phandle );
\r
7236 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7237 errorText_ = errorStream_.str();
\r
7241 // Determine how to set the device format.
\r
7242 stream_.userFormat = format;
\r
7243 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7245 if ( format == RTAUDIO_SINT8 )
\r
7246 deviceFormat = SND_PCM_FORMAT_S8;
\r
7247 else if ( format == RTAUDIO_SINT16 )
\r
7248 deviceFormat = SND_PCM_FORMAT_S16;
\r
7249 else if ( format == RTAUDIO_SINT24 )
\r
7250 deviceFormat = SND_PCM_FORMAT_S24;
\r
7251 else if ( format == RTAUDIO_SINT32 )
\r
7252 deviceFormat = SND_PCM_FORMAT_S32;
\r
7253 else if ( format == RTAUDIO_FLOAT32 )
\r
7254 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7255 else if ( format == RTAUDIO_FLOAT64 )
\r
7256 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7258 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7259 stream_.deviceFormat[mode] = format;
\r
7263 // The user requested format is not natively supported by the device.
\r
7264 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7265 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7266 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7270 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7271 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7272 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7276 deviceFormat = SND_PCM_FORMAT_S32;
\r
7277 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7278 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7282 deviceFormat = SND_PCM_FORMAT_S24;
\r
7283 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7284 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7288 deviceFormat = SND_PCM_FORMAT_S16;
\r
7289 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7290 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7294 deviceFormat = SND_PCM_FORMAT_S8;
\r
7295 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7296 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7300 // If we get here, no supported format was found.
\r
7301 snd_pcm_close( phandle );
\r
7302 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7303 errorText_ = errorStream_.str();
\r
7307 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7308 if ( result < 0 ) {
\r
7309 snd_pcm_close( phandle );
\r
7310 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7311 errorText_ = errorStream_.str();
\r
7315 // Determine whether byte-swaping is necessary.
\r
7316 stream_.doByteSwap[mode] = false;
\r
7317 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7318 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7319 if ( result == 0 )
\r
7320 stream_.doByteSwap[mode] = true;
\r
7321 else if (result < 0) {
\r
7322 snd_pcm_close( phandle );
\r
7323 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7324 errorText_ = errorStream_.str();
\r
7329 // Set the sample rate.
\r
7330 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7331 if ( result < 0 ) {
\r
7332 snd_pcm_close( phandle );
\r
7333 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7334 errorText_ = errorStream_.str();
\r
7338 // Determine the number of channels for this device. We support a possible
\r
7339 // minimum device channel number > than the value requested by the user.
\r
7340 stream_.nUserChannels[mode] = channels;
\r
7341 unsigned int value;
\r
7342 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7343 unsigned int deviceChannels = value;
\r
7344 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7345 snd_pcm_close( phandle );
\r
7346 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7347 errorText_ = errorStream_.str();
\r
7351 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7352 if ( result < 0 ) {
\r
7353 snd_pcm_close( phandle );
\r
7354 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7355 errorText_ = errorStream_.str();
\r
7358 deviceChannels = value;
\r
7359 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7360 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7362 // Set the device channels.
\r
7363 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7364 if ( result < 0 ) {
\r
7365 snd_pcm_close( phandle );
\r
7366 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7367 errorText_ = errorStream_.str();
\r
7371 // Set the buffer (or period) size.
\r
7373 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7374 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7375 if ( result < 0 ) {
\r
7376 snd_pcm_close( phandle );
\r
7377 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7378 errorText_ = errorStream_.str();
\r
7381 *bufferSize = periodSize;
\r
7383 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7384 unsigned int periods = 0;
\r
7385 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7386 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7387 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7388 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7389 if ( result < 0 ) {
\r
7390 snd_pcm_close( phandle );
\r
7391 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7392 errorText_ = errorStream_.str();
\r
7396 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7397 // MUST be the same in both directions!
\r
7398 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7399 snd_pcm_close( phandle );
\r
7400 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7401 errorText_ = errorStream_.str();
\r
7405 stream_.bufferSize = *bufferSize;
\r
7407 // Install the hardware configuration
\r
7408 result = snd_pcm_hw_params( phandle, hw_params );
\r
7409 if ( result < 0 ) {
\r
7410 snd_pcm_close( phandle );
\r
7411 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7412 errorText_ = errorStream_.str();
\r
7416 #if defined(__RTAUDIO_DEBUG__)
\r
7417 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7418 snd_pcm_hw_params_dump( hw_params, out );
\r
7421 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7422 snd_pcm_sw_params_t *sw_params = NULL;
\r
7423 snd_pcm_sw_params_alloca( &sw_params );
\r
7424 snd_pcm_sw_params_current( phandle, sw_params );
\r
7425 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7426 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7427 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7429 // The following two settings were suggested by Theo Veenker
\r
7430 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7431 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7433 // here are two options for a fix
\r
7434 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7435 snd_pcm_uframes_t val;
\r
7436 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7437 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7439 result = snd_pcm_sw_params( phandle, sw_params );
\r
7440 if ( result < 0 ) {
\r
7441 snd_pcm_close( phandle );
\r
7442 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7443 errorText_ = errorStream_.str();
\r
7447 #if defined(__RTAUDIO_DEBUG__)
\r
7448 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7449 snd_pcm_sw_params_dump( sw_params, out );
\r
7452 // Set flags for buffer conversion
\r
7453 stream_.doConvertBuffer[mode] = false;
\r
7454 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7455 stream_.doConvertBuffer[mode] = true;
\r
7456 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7457 stream_.doConvertBuffer[mode] = true;
\r
7458 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7459 stream_.nUserChannels[mode] > 1 )
\r
7460 stream_.doConvertBuffer[mode] = true;
\r
7462 // Allocate the ApiHandle if necessary and then save.
\r
7463 AlsaHandle *apiInfo = 0;
\r
7464 if ( stream_.apiHandle == 0 ) {
\r
7466 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7468 catch ( std::bad_alloc& ) {
\r
7469 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7473 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7474 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7478 stream_.apiHandle = (void *) apiInfo;
\r
7479 apiInfo->handles[0] = 0;
\r
7480 apiInfo->handles[1] = 0;
\r
7483 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7485 apiInfo->handles[mode] = phandle;
\r
7488 // Allocate necessary internal buffers.
\r
7489 unsigned long bufferBytes;
\r
7490 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7491 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7492 if ( stream_.userBuffer[mode] == NULL ) {
\r
7493 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7497 if ( stream_.doConvertBuffer[mode] ) {
\r
7499 bool makeBuffer = true;
\r
7500 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7501 if ( mode == INPUT ) {
\r
7502 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7503 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7504 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7508 if ( makeBuffer ) {
\r
7509 bufferBytes *= *bufferSize;
\r
7510 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7511 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7512 if ( stream_.deviceBuffer == NULL ) {
\r
7513 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7519 stream_.sampleRate = sampleRate;
\r
7520 stream_.nBuffers = periods;
\r
7521 stream_.device[mode] = device;
\r
7522 stream_.state = STREAM_STOPPED;
\r
7524 // Setup the buffer conversion information structure.
\r
7525 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7527 // Setup thread if necessary.
\r
7528 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7529 // We had already set up an output stream.
\r
7530 stream_.mode = DUPLEX;
\r
7531 // Link the streams if possible.
\r
7532 apiInfo->synchronized = false;
\r
7533 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7534 apiInfo->synchronized = true;
\r
7536 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7537 error( RtAudioError::WARNING );
\r
7541 stream_.mode = mode;
\r
7543 // Setup callback thread.
\r
7544 stream_.callbackInfo.object = (void *) this;
\r
7546 // Set the thread attributes for joinable and realtime scheduling
\r
7547 // priority (optional). The higher priority will only take affect
\r
7548 // if the program is run as root or suid. Note, under Linux
\r
7549 // processes with CAP_SYS_NICE privilege, a user can change
\r
7550 // scheduling policy and priority (thus need not be root). See
\r
7551 // POSIX "capabilities".
\r
7552 pthread_attr_t attr;
\r
7553 pthread_attr_init( &attr );
\r
7554 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7556 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7557 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7558 // We previously attempted to increase the audio callback priority
\r
7559 // to SCHED_RR here via the attributes. However, while no errors
\r
7560 // were reported in doing so, it did not work. So, now this is
\r
7561 // done in the alsaCallbackHandler function.
\r
7562 stream_.callbackInfo.doRealtime = true;
\r
7563 int priority = options->priority;
\r
7564 int min = sched_get_priority_min( SCHED_RR );
\r
7565 int max = sched_get_priority_max( SCHED_RR );
\r
7566 if ( priority < min ) priority = min;
\r
7567 else if ( priority > max ) priority = max;
\r
7568 stream_.callbackInfo.priority = priority;
\r
7572 stream_.callbackInfo.isRunning = true;
\r
7573 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7574 pthread_attr_destroy( &attr );
\r
7576 stream_.callbackInfo.isRunning = false;
\r
7577 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7586 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7587 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7588 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7590 stream_.apiHandle = 0;
\r
7593 if ( phandle) snd_pcm_close( phandle );
\r
7595 for ( int i=0; i<2; i++ ) {
\r
7596 if ( stream_.userBuffer[i] ) {
\r
7597 free( stream_.userBuffer[i] );
\r
7598 stream_.userBuffer[i] = 0;
\r
7602 if ( stream_.deviceBuffer ) {
\r
7603 free( stream_.deviceBuffer );
\r
7604 stream_.deviceBuffer = 0;
\r
7607 stream_.state = STREAM_CLOSED;
\r
7611 void RtApiAlsa :: closeStream()
\r
7613 if ( stream_.state == STREAM_CLOSED ) {
\r
7614 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7615 error( RtAudioError::WARNING );
\r
7619 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7620 stream_.callbackInfo.isRunning = false;
\r
7621 MUTEX_LOCK( &stream_.mutex );
\r
7622 if ( stream_.state == STREAM_STOPPED ) {
\r
7623 apiInfo->runnable = true;
\r
7624 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7626 MUTEX_UNLOCK( &stream_.mutex );
\r
7627 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7629 if ( stream_.state == STREAM_RUNNING ) {
\r
7630 stream_.state = STREAM_STOPPED;
\r
7631 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7632 snd_pcm_drop( apiInfo->handles[0] );
\r
7633 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7634 snd_pcm_drop( apiInfo->handles[1] );
\r
7638 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7639 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7640 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7642 stream_.apiHandle = 0;
\r
7645 for ( int i=0; i<2; i++ ) {
\r
7646 if ( stream_.userBuffer[i] ) {
\r
7647 free( stream_.userBuffer[i] );
\r
7648 stream_.userBuffer[i] = 0;
\r
7652 if ( stream_.deviceBuffer ) {
\r
7653 free( stream_.deviceBuffer );
\r
7654 stream_.deviceBuffer = 0;
\r
7657 stream_.mode = UNINITIALIZED;
\r
7658 stream_.state = STREAM_CLOSED;
\r
7661 void RtApiAlsa :: startStream()
\r
7663 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7666 if ( stream_.state == STREAM_RUNNING ) {
\r
7667 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7668 error( RtAudioError::WARNING );
\r
7672 MUTEX_LOCK( &stream_.mutex );
\r
7675 snd_pcm_state_t state;
\r
7676 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7677 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7678 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7679 state = snd_pcm_state( handle[0] );
\r
7680 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7681 result = snd_pcm_prepare( handle[0] );
\r
7682 if ( result < 0 ) {
\r
7683 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7684 errorText_ = errorStream_.str();
\r
7690 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7691 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7692 state = snd_pcm_state( handle[1] );
\r
7693 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7694 result = snd_pcm_prepare( handle[1] );
\r
7695 if ( result < 0 ) {
\r
7696 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7697 errorText_ = errorStream_.str();
\r
7703 stream_.state = STREAM_RUNNING;
\r
7706 apiInfo->runnable = true;
\r
7707 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7708 MUTEX_UNLOCK( &stream_.mutex );
\r
7710 if ( result >= 0 ) return;
\r
7711 error( RtAudioError::SYSTEM_ERROR );
\r
7714 void RtApiAlsa :: stopStream()
\r
7717 if ( stream_.state == STREAM_STOPPED ) {
\r
7718 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7719 error( RtAudioError::WARNING );
\r
7723 stream_.state = STREAM_STOPPED;
\r
7724 MUTEX_LOCK( &stream_.mutex );
\r
7727 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7728 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7729 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7730 if ( apiInfo->synchronized )
\r
7731 result = snd_pcm_drop( handle[0] );
\r
7733 result = snd_pcm_drain( handle[0] );
\r
7734 if ( result < 0 ) {
\r
7735 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7736 errorText_ = errorStream_.str();
\r
7741 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7742 result = snd_pcm_drop( handle[1] );
\r
7743 if ( result < 0 ) {
\r
7744 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7745 errorText_ = errorStream_.str();
\r
7751 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7752 MUTEX_UNLOCK( &stream_.mutex );
\r
7754 if ( result >= 0 ) return;
\r
7755 error( RtAudioError::SYSTEM_ERROR );
\r
7758 void RtApiAlsa :: abortStream()
\r
7761 if ( stream_.state == STREAM_STOPPED ) {
\r
7762 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7763 error( RtAudioError::WARNING );
\r
7767 stream_.state = STREAM_STOPPED;
\r
7768 MUTEX_LOCK( &stream_.mutex );
\r
7771 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7772 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7773 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7774 result = snd_pcm_drop( handle[0] );
\r
7775 if ( result < 0 ) {
\r
7776 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7777 errorText_ = errorStream_.str();
\r
7782 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7783 result = snd_pcm_drop( handle[1] );
\r
7784 if ( result < 0 ) {
\r
7785 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7786 errorText_ = errorStream_.str();
\r
7792 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7793 MUTEX_UNLOCK( &stream_.mutex );
\r
7795 if ( result >= 0 ) return;
\r
7796 error( RtAudioError::SYSTEM_ERROR );
\r
7799 void RtApiAlsa :: callbackEvent()
\r
7801 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7802 if ( stream_.state == STREAM_STOPPED ) {
\r
7803 MUTEX_LOCK( &stream_.mutex );
\r
7804 while ( !apiInfo->runnable )
\r
7805 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7807 if ( stream_.state != STREAM_RUNNING ) {
\r
7808 MUTEX_UNLOCK( &stream_.mutex );
\r
7811 MUTEX_UNLOCK( &stream_.mutex );
\r
7814 if ( stream_.state == STREAM_CLOSED ) {
\r
7815 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7816 error( RtAudioError::WARNING );
\r
7820 int doStopStream = 0;
\r
7821 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7822 double streamTime = getStreamTime();
\r
7823 RtAudioStreamStatus status = 0;
\r
7824 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7825 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7826 apiInfo->xrun[0] = false;
\r
7828 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7829 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7830 apiInfo->xrun[1] = false;
\r
7832 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7833 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7835 if ( doStopStream == 2 ) {
\r
7840 MUTEX_LOCK( &stream_.mutex );
\r
7842 // The state might change while waiting on a mutex.
\r
7843 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7848 snd_pcm_t **handle;
\r
7849 snd_pcm_sframes_t frames;
\r
7850 RtAudioFormat format;
\r
7851 handle = (snd_pcm_t **) apiInfo->handles;
\r
7853 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7855 // Setup parameters.
\r
7856 if ( stream_.doConvertBuffer[1] ) {
\r
7857 buffer = stream_.deviceBuffer;
\r
7858 channels = stream_.nDeviceChannels[1];
\r
7859 format = stream_.deviceFormat[1];
\r
7862 buffer = stream_.userBuffer[1];
\r
7863 channels = stream_.nUserChannels[1];
\r
7864 format = stream_.userFormat;
\r
7867 // Read samples from device in interleaved/non-interleaved format.
\r
7868 if ( stream_.deviceInterleaved[1] )
\r
7869 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7871 void *bufs[channels];
\r
7872 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7873 for ( int i=0; i<channels; i++ )
\r
7874 bufs[i] = (void *) (buffer + (i * offset));
\r
7875 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7878 if ( result < (int) stream_.bufferSize ) {
\r
7879 // Either an error or overrun occured.
\r
7880 if ( result == -EPIPE ) {
\r
7881 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7882 if ( state == SND_PCM_STATE_XRUN ) {
\r
7883 apiInfo->xrun[1] = true;
\r
7884 result = snd_pcm_prepare( handle[1] );
\r
7885 if ( result < 0 ) {
\r
7886 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7887 errorText_ = errorStream_.str();
\r
7891 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7892 errorText_ = errorStream_.str();
\r
7896 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7897 errorText_ = errorStream_.str();
\r
7899 error( RtAudioError::WARNING );
\r
7903 // Do byte swapping if necessary.
\r
7904 if ( stream_.doByteSwap[1] )
\r
7905 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7907 // Do buffer conversion if necessary.
\r
7908 if ( stream_.doConvertBuffer[1] )
\r
7909 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7911 // Check stream latency
\r
7912 result = snd_pcm_delay( handle[1], &frames );
\r
7913 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7918 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7920 // Setup parameters and do buffer conversion if necessary.
\r
7921 if ( stream_.doConvertBuffer[0] ) {
\r
7922 buffer = stream_.deviceBuffer;
\r
7923 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7924 channels = stream_.nDeviceChannels[0];
\r
7925 format = stream_.deviceFormat[0];
\r
7928 buffer = stream_.userBuffer[0];
\r
7929 channels = stream_.nUserChannels[0];
\r
7930 format = stream_.userFormat;
\r
7933 // Do byte swapping if necessary.
\r
7934 if ( stream_.doByteSwap[0] )
\r
7935 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7937 // Write samples to device in interleaved/non-interleaved format.
\r
7938 if ( stream_.deviceInterleaved[0] )
\r
7939 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7941 void *bufs[channels];
\r
7942 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7943 for ( int i=0; i<channels; i++ )
\r
7944 bufs[i] = (void *) (buffer + (i * offset));
\r
7945 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7948 if ( result < (int) stream_.bufferSize ) {
\r
7949 // Either an error or underrun occured.
\r
7950 if ( result == -EPIPE ) {
\r
7951 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7952 if ( state == SND_PCM_STATE_XRUN ) {
\r
7953 apiInfo->xrun[0] = true;
\r
7954 result = snd_pcm_prepare( handle[0] );
\r
7955 if ( result < 0 ) {
\r
7956 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7957 errorText_ = errorStream_.str();
\r
7961 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7962 errorText_ = errorStream_.str();
\r
7966 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7967 errorText_ = errorStream_.str();
\r
7969 error( RtAudioError::WARNING );
\r
7973 // Check stream latency
\r
7974 result = snd_pcm_delay( handle[0], &frames );
\r
7975 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7979 MUTEX_UNLOCK( &stream_.mutex );
\r
7981 RtApi::tickStreamTime();
\r
7982 if ( doStopStream == 1 ) this->stopStream();
\r
7985 static void *alsaCallbackHandler( void *ptr )
\r
7987 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7988 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7989 bool *isRunning = &info->isRunning;
\r
7991 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7992 if ( &info->doRealtime ) {
\r
7993 pthread_t tID = pthread_self(); // ID of this thread
\r
7994 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7995 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7999 while ( *isRunning == true ) {
\r
8000 pthread_testcancel();
\r
8001 object->callbackEvent();
\r
8004 pthread_exit( NULL );
\r
8007 //******************** End of __LINUX_ALSA__ *********************//
\r
8010 #if defined(__LINUX_PULSE__)
\r
8012 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8013 // and Tristan Matthews.
\r
8015 #include <pulse/error.h>
\r
8016 #include <pulse/simple.h>
\r
8019 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8020 44100, 48000, 96000, 0};
\r
8022 struct rtaudio_pa_format_mapping_t {
\r
8023 RtAudioFormat rtaudio_format;
\r
8024 pa_sample_format_t pa_format;
\r
8027 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8028 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8029 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8030 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8031 {0, PA_SAMPLE_INVALID}};
\r
8033 struct PulseAudioHandle {
\r
8034 pa_simple *s_play;
\r
8037 pthread_cond_t runnable_cv;
\r
8039 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8042 RtApiPulse::~RtApiPulse()
\r
8044 if ( stream_.state != STREAM_CLOSED )
\r
8048 unsigned int RtApiPulse::getDeviceCount( void )
\r
8053 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8055 RtAudio::DeviceInfo info;
\r
8056 info.probed = true;
\r
8057 info.name = "PulseAudio";
\r
8058 info.outputChannels = 2;
\r
8059 info.inputChannels = 2;
\r
8060 info.duplexChannels = 2;
\r
8061 info.isDefaultOutput = true;
\r
8062 info.isDefaultInput = true;
\r
8064 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8065 info.sampleRates.push_back( *sr );
\r
8067 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8072 static void *pulseaudio_callback( void * user )
\r
8074 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8075 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8076 volatile bool *isRunning = &cbi->isRunning;
\r
8078 while ( *isRunning ) {
\r
8079 pthread_testcancel();
\r
8080 context->callbackEvent();
\r
8083 pthread_exit( NULL );
\r
8086 void RtApiPulse::closeStream( void )
\r
8088 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8090 stream_.callbackInfo.isRunning = false;
\r
8092 MUTEX_LOCK( &stream_.mutex );
\r
8093 if ( stream_.state == STREAM_STOPPED ) {
\r
8094 pah->runnable = true;
\r
8095 pthread_cond_signal( &pah->runnable_cv );
\r
8097 MUTEX_UNLOCK( &stream_.mutex );
\r
8099 pthread_join( pah->thread, 0 );
\r
8100 if ( pah->s_play ) {
\r
8101 pa_simple_flush( pah->s_play, NULL );
\r
8102 pa_simple_free( pah->s_play );
\r
8105 pa_simple_free( pah->s_rec );
\r
8107 pthread_cond_destroy( &pah->runnable_cv );
\r
8109 stream_.apiHandle = 0;
\r
8112 if ( stream_.userBuffer[0] ) {
\r
8113 free( stream_.userBuffer[0] );
\r
8114 stream_.userBuffer[0] = 0;
\r
8116 if ( stream_.userBuffer[1] ) {
\r
8117 free( stream_.userBuffer[1] );
\r
8118 stream_.userBuffer[1] = 0;
\r
8121 stream_.state = STREAM_CLOSED;
\r
8122 stream_.mode = UNINITIALIZED;
\r
8125 void RtApiPulse::callbackEvent( void )
\r
8127 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8129 if ( stream_.state == STREAM_STOPPED ) {
\r
8130 MUTEX_LOCK( &stream_.mutex );
\r
8131 while ( !pah->runnable )
\r
8132 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8134 if ( stream_.state != STREAM_RUNNING ) {
\r
8135 MUTEX_UNLOCK( &stream_.mutex );
\r
8138 MUTEX_UNLOCK( &stream_.mutex );
\r
8141 if ( stream_.state == STREAM_CLOSED ) {
\r
8142 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8143 "this shouldn't happen!";
\r
8144 error( RtAudioError::WARNING );
\r
8148 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8149 double streamTime = getStreamTime();
\r
8150 RtAudioStreamStatus status = 0;
\r
8151 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8152 stream_.bufferSize, streamTime, status,
\r
8153 stream_.callbackInfo.userData );
\r
8155 if ( doStopStream == 2 ) {
\r
8160 MUTEX_LOCK( &stream_.mutex );
\r
8161 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8162 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8164 if ( stream_.state != STREAM_RUNNING )
\r
8169 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8170 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8171 convertBuffer( stream_.deviceBuffer,
\r
8172 stream_.userBuffer[OUTPUT],
\r
8173 stream_.convertInfo[OUTPUT] );
\r
8174 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8175 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8177 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8178 formatBytes( stream_.userFormat );
\r
8180 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8181 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8182 pa_strerror( pa_error ) << ".";
\r
8183 errorText_ = errorStream_.str();
\r
8184 error( RtAudioError::WARNING );
\r
8188 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8189 if ( stream_.doConvertBuffer[INPUT] )
\r
8190 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8191 formatBytes( stream_.deviceFormat[INPUT] );
\r
8193 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8194 formatBytes( stream_.userFormat );
\r
8196 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8197 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8198 pa_strerror( pa_error ) << ".";
\r
8199 errorText_ = errorStream_.str();
\r
8200 error( RtAudioError::WARNING );
\r
8202 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8203 convertBuffer( stream_.userBuffer[INPUT],
\r
8204 stream_.deviceBuffer,
\r
8205 stream_.convertInfo[INPUT] );
\r
8210 MUTEX_UNLOCK( &stream_.mutex );
\r
8211 RtApi::tickStreamTime();
\r
8213 if ( doStopStream == 1 )
\r
8217 void RtApiPulse::startStream( void )
\r
8219 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8221 if ( stream_.state == STREAM_CLOSED ) {
\r
8222 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8223 error( RtAudioError::INVALID_USE );
\r
8226 if ( stream_.state == STREAM_RUNNING ) {
\r
8227 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8228 error( RtAudioError::WARNING );
\r
8232 MUTEX_LOCK( &stream_.mutex );
\r
8234 stream_.state = STREAM_RUNNING;
\r
8236 pah->runnable = true;
\r
8237 pthread_cond_signal( &pah->runnable_cv );
\r
8238 MUTEX_UNLOCK( &stream_.mutex );
\r
8241 void RtApiPulse::stopStream( void )
\r
8243 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8245 if ( stream_.state == STREAM_CLOSED ) {
\r
8246 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8247 error( RtAudioError::INVALID_USE );
\r
8250 if ( stream_.state == STREAM_STOPPED ) {
\r
8251 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8252 error( RtAudioError::WARNING );
\r
8256 stream_.state = STREAM_STOPPED;
\r
8257 MUTEX_LOCK( &stream_.mutex );
\r
8259 if ( pah && pah->s_play ) {
\r
8261 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8262 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8263 pa_strerror( pa_error ) << ".";
\r
8264 errorText_ = errorStream_.str();
\r
8265 MUTEX_UNLOCK( &stream_.mutex );
\r
8266 error( RtAudioError::SYSTEM_ERROR );
\r
8271 stream_.state = STREAM_STOPPED;
\r
8272 MUTEX_UNLOCK( &stream_.mutex );
\r
8275 void RtApiPulse::abortStream( void )
\r
8277 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8279 if ( stream_.state == STREAM_CLOSED ) {
\r
8280 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8281 error( RtAudioError::INVALID_USE );
\r
8284 if ( stream_.state == STREAM_STOPPED ) {
\r
8285 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8286 error( RtAudioError::WARNING );
\r
8290 stream_.state = STREAM_STOPPED;
\r
8291 MUTEX_LOCK( &stream_.mutex );
\r
8293 if ( pah && pah->s_play ) {
\r
8295 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8296 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8297 pa_strerror( pa_error ) << ".";
\r
8298 errorText_ = errorStream_.str();
\r
8299 MUTEX_UNLOCK( &stream_.mutex );
\r
8300 error( RtAudioError::SYSTEM_ERROR );
\r
8305 stream_.state = STREAM_STOPPED;
\r
8306 MUTEX_UNLOCK( &stream_.mutex );
\r
8309 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8310 unsigned int channels, unsigned int firstChannel,
\r
8311 unsigned int sampleRate, RtAudioFormat format,
\r
8312 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8314 PulseAudioHandle *pah = 0;
\r
8315 unsigned long bufferBytes = 0;
\r
8316 pa_sample_spec ss;
\r
8318 if ( device != 0 ) return false;
\r
8319 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8320 if ( channels != 1 && channels != 2 ) {
\r
8321 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8324 ss.channels = channels;
\r
8326 if ( firstChannel != 0 ) return false;
\r
8328 bool sr_found = false;
\r
8329 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8330 if ( sampleRate == *sr ) {
\r
8332 stream_.sampleRate = sampleRate;
\r
8333 ss.rate = sampleRate;
\r
8337 if ( !sr_found ) {
\r
8338 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8342 bool sf_found = 0;
\r
8343 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8344 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8345 if ( format == sf->rtaudio_format ) {
\r
8347 stream_.userFormat = sf->rtaudio_format;
\r
8348 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8349 ss.format = sf->pa_format;
\r
8353 if ( !sf_found ) { // Use internal data format conversion.
\r
8354 stream_.userFormat = format;
\r
8355 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8356 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8359 // Set other stream parameters.
\r
8360 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8361 else stream_.userInterleaved = true;
\r
8362 stream_.deviceInterleaved[mode] = true;
\r
8363 stream_.nBuffers = 1;
\r
8364 stream_.doByteSwap[mode] = false;
\r
8365 stream_.nUserChannels[mode] = channels;
\r
8366 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8367 stream_.channelOffset[mode] = 0;
\r
8368 std::string streamName = "RtAudio";
\r
8370 // Set flags for buffer conversion.
\r
8371 stream_.doConvertBuffer[mode] = false;
\r
8372 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8373 stream_.doConvertBuffer[mode] = true;
\r
8374 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8375 stream_.doConvertBuffer[mode] = true;
\r
8377 // Allocate necessary internal buffers.
\r
8378 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8379 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8380 if ( stream_.userBuffer[mode] == NULL ) {
\r
8381 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8384 stream_.bufferSize = *bufferSize;
\r
8386 if ( stream_.doConvertBuffer[mode] ) {
\r
8388 bool makeBuffer = true;
\r
8389 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8390 if ( mode == INPUT ) {
\r
8391 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8392 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8393 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8397 if ( makeBuffer ) {
\r
8398 bufferBytes *= *bufferSize;
\r
8399 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8400 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8401 if ( stream_.deviceBuffer == NULL ) {
\r
8402 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8408 stream_.device[mode] = device;
\r
8410 // Setup the buffer conversion information structure.
\r
8411 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8413 if ( !stream_.apiHandle ) {
\r
8414 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8416 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8420 stream_.apiHandle = pah;
\r
8421 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8422 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8426 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8429 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8432 pa_buffer_attr buffer_attr;
\r
8433 buffer_attr.fragsize = bufferBytes;
\r
8434 buffer_attr.maxlength = -1;
\r
8436 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8437 if ( !pah->s_rec ) {
\r
8438 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8443 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8444 if ( !pah->s_play ) {
\r
8445 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8453 if ( stream_.mode == UNINITIALIZED )
\r
8454 stream_.mode = mode;
\r
8455 else if ( stream_.mode == mode )
\r
8458 stream_.mode = DUPLEX;
\r
8460 if ( !stream_.callbackInfo.isRunning ) {
\r
8461 stream_.callbackInfo.object = this;
\r
8462 stream_.callbackInfo.isRunning = true;
\r
8463 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8464 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8469 stream_.state = STREAM_STOPPED;
\r
8473 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8474 pthread_cond_destroy( &pah->runnable_cv );
\r
8476 stream_.apiHandle = 0;
\r
8479 for ( int i=0; i<2; i++ ) {
\r
8480 if ( stream_.userBuffer[i] ) {
\r
8481 free( stream_.userBuffer[i] );
\r
8482 stream_.userBuffer[i] = 0;
\r
8486 if ( stream_.deviceBuffer ) {
\r
8487 free( stream_.deviceBuffer );
\r
8488 stream_.deviceBuffer = 0;
\r
8494 //******************** End of __LINUX_PULSE__ *********************//
\r
8497 #if defined(__LINUX_OSS__)
\r
8499 #include <unistd.h>
\r
8500 #include <sys/ioctl.h>
\r
8501 #include <unistd.h>
\r
8502 #include <fcntl.h>
\r
8503 #include <sys/soundcard.h>
\r
8504 #include <errno.h>
\r
8507 static void *ossCallbackHandler(void * ptr);
\r
8509 // A structure to hold various information related to the OSS API
\r
8510 // implementation.
\r
8511 struct OssHandle {
\r
8512 int id[2]; // device ids
\r
8515 pthread_cond_t runnable;
\r
8518 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8521 RtApiOss :: RtApiOss()
\r
8523 // Nothing to do here.
\r
8526 RtApiOss :: ~RtApiOss()
\r
8528 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8531 unsigned int RtApiOss :: getDeviceCount( void )
\r
8533 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8534 if ( mixerfd == -1 ) {
\r
8535 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8536 error( RtAudioError::WARNING );
\r
8540 oss_sysinfo sysinfo;
\r
8541 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8543 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8544 error( RtAudioError::WARNING );
\r
8549 return sysinfo.numaudios;
\r
8552 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8554 RtAudio::DeviceInfo info;
\r
8555 info.probed = false;
\r
8557 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8558 if ( mixerfd == -1 ) {
\r
8559 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8560 error( RtAudioError::WARNING );
\r
8564 oss_sysinfo sysinfo;
\r
8565 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8566 if ( result == -1 ) {
\r
8568 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8569 error( RtAudioError::WARNING );
\r
8573 unsigned nDevices = sysinfo.numaudios;
\r
8574 if ( nDevices == 0 ) {
\r
8576 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8577 error( RtAudioError::INVALID_USE );
\r
8581 if ( device >= nDevices ) {
\r
8583 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8584 error( RtAudioError::INVALID_USE );
\r
8588 oss_audioinfo ainfo;
\r
8589 ainfo.dev = device;
\r
8590 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8592 if ( result == -1 ) {
\r
8593 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8594 errorText_ = errorStream_.str();
\r
8595 error( RtAudioError::WARNING );
\r
8600 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8601 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8602 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8603 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8604 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8607 // Probe data formats ... do for input
\r
8608 unsigned long mask = ainfo.iformats;
\r
8609 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8610 info.nativeFormats |= RTAUDIO_SINT16;
\r
8611 if ( mask & AFMT_S8 )
\r
8612 info.nativeFormats |= RTAUDIO_SINT8;
\r
8613 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8614 info.nativeFormats |= RTAUDIO_SINT32;
\r
8615 if ( mask & AFMT_FLOAT )
\r
8616 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8617 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8618 info.nativeFormats |= RTAUDIO_SINT24;
\r
8620 // Check that we have at least one supported format
\r
8621 if ( info.nativeFormats == 0 ) {
\r
8622 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8623 errorText_ = errorStream_.str();
\r
8624 error( RtAudioError::WARNING );
\r
8628 // Probe the supported sample rates.
\r
8629 info.sampleRates.clear();
\r
8630 if ( ainfo.nrates ) {
\r
8631 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8632 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8633 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8634 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8641 // Check min and max rate values;
\r
8642 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8643 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8644 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8648 if ( info.sampleRates.size() == 0 ) {
\r
8649 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8650 errorText_ = errorStream_.str();
\r
8651 error( RtAudioError::WARNING );
\r
8654 info.probed = true;
\r
8655 info.name = ainfo.name;
\r
8662 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8663 unsigned int firstChannel, unsigned int sampleRate,
\r
8664 RtAudioFormat format, unsigned int *bufferSize,
\r
8665 RtAudio::StreamOptions *options )
\r
8667 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8668 if ( mixerfd == -1 ) {
\r
8669 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8673 oss_sysinfo sysinfo;
\r
8674 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8675 if ( result == -1 ) {
\r
8677 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8681 unsigned nDevices = sysinfo.numaudios;
\r
8682 if ( nDevices == 0 ) {
\r
8683 // This should not happen because a check is made before this function is called.
\r
8685 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8689 if ( device >= nDevices ) {
\r
8690 // This should not happen because a check is made before this function is called.
\r
8692 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8696 oss_audioinfo ainfo;
\r
8697 ainfo.dev = device;
\r
8698 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8700 if ( result == -1 ) {
\r
8701 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8702 errorText_ = errorStream_.str();
\r
8706 // Check if device supports input or output
\r
8707 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8708 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8709 if ( mode == OUTPUT )
\r
8710 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8712 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8713 errorText_ = errorStream_.str();
\r
8718 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8719 if ( mode == OUTPUT )
\r
8720 flags |= O_WRONLY;
\r
8721 else { // mode == INPUT
\r
8722 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8723 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8724 close( handle->id[0] );
\r
8725 handle->id[0] = 0;
\r
8726 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8727 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8728 errorText_ = errorStream_.str();
\r
8731 // Check that the number previously set channels is the same.
\r
8732 if ( stream_.nUserChannels[0] != channels ) {
\r
8733 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8734 errorText_ = errorStream_.str();
\r
8740 flags |= O_RDONLY;
\r
8743 // Set exclusive access if specified.
\r
8744 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8746 // Try to open the device.
\r
8748 fd = open( ainfo.devnode, flags, 0 );
\r
8750 if ( errno == EBUSY )
\r
8751 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8753 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8754 errorText_ = errorStream_.str();
\r
8758 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8760 if ( flags | O_RDWR ) {
\r
8761 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8762 if ( result == -1) {
\r
8763 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8764 errorText_ = errorStream_.str();
\r
8770 // Check the device channel support.
\r
8771 stream_.nUserChannels[mode] = channels;
\r
8772 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8774 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8775 errorText_ = errorStream_.str();
\r
8779 // Set the number of channels.
\r
8780 int deviceChannels = channels + firstChannel;
\r
8781 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8782 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8784 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8785 errorText_ = errorStream_.str();
\r
8788 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8790 // Get the data format mask
\r
8792 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8793 if ( result == -1 ) {
\r
8795 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8796 errorText_ = errorStream_.str();
\r
8800 // Determine how to set the device format.
\r
8801 stream_.userFormat = format;
\r
8802 int deviceFormat = -1;
\r
8803 stream_.doByteSwap[mode] = false;
\r
8804 if ( format == RTAUDIO_SINT8 ) {
\r
8805 if ( mask & AFMT_S8 ) {
\r
8806 deviceFormat = AFMT_S8;
\r
8807 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8810 else if ( format == RTAUDIO_SINT16 ) {
\r
8811 if ( mask & AFMT_S16_NE ) {
\r
8812 deviceFormat = AFMT_S16_NE;
\r
8813 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8815 else if ( mask & AFMT_S16_OE ) {
\r
8816 deviceFormat = AFMT_S16_OE;
\r
8817 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8818 stream_.doByteSwap[mode] = true;
\r
8821 else if ( format == RTAUDIO_SINT24 ) {
\r
8822 if ( mask & AFMT_S24_NE ) {
\r
8823 deviceFormat = AFMT_S24_NE;
\r
8824 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8826 else if ( mask & AFMT_S24_OE ) {
\r
8827 deviceFormat = AFMT_S24_OE;
\r
8828 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8829 stream_.doByteSwap[mode] = true;
\r
8832 else if ( format == RTAUDIO_SINT32 ) {
\r
8833 if ( mask & AFMT_S32_NE ) {
\r
8834 deviceFormat = AFMT_S32_NE;
\r
8835 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8837 else if ( mask & AFMT_S32_OE ) {
\r
8838 deviceFormat = AFMT_S32_OE;
\r
8839 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8840 stream_.doByteSwap[mode] = true;
\r
8844 if ( deviceFormat == -1 ) {
\r
8845 // The user requested format is not natively supported by the device.
\r
8846 if ( mask & AFMT_S16_NE ) {
\r
8847 deviceFormat = AFMT_S16_NE;
\r
8848 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8850 else if ( mask & AFMT_S32_NE ) {
\r
8851 deviceFormat = AFMT_S32_NE;
\r
8852 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8854 else if ( mask & AFMT_S24_NE ) {
\r
8855 deviceFormat = AFMT_S24_NE;
\r
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8858 else if ( mask & AFMT_S16_OE ) {
\r
8859 deviceFormat = AFMT_S16_OE;
\r
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8861 stream_.doByteSwap[mode] = true;
\r
8863 else if ( mask & AFMT_S32_OE ) {
\r
8864 deviceFormat = AFMT_S32_OE;
\r
8865 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8866 stream_.doByteSwap[mode] = true;
\r
8868 else if ( mask & AFMT_S24_OE ) {
\r
8869 deviceFormat = AFMT_S24_OE;
\r
8870 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8871 stream_.doByteSwap[mode] = true;
\r
8873 else if ( mask & AFMT_S8) {
\r
8874 deviceFormat = AFMT_S8;
\r
8875 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8879 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8880 // This really shouldn't happen ...
\r
8882 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8883 errorText_ = errorStream_.str();
\r
8887 // Set the data format.
\r
8888 int temp = deviceFormat;
\r
8889 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8890 if ( result == -1 || deviceFormat != temp ) {
\r
8892 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8893 errorText_ = errorStream_.str();
\r
8897 // Attempt to set the buffer size. According to OSS, the minimum
\r
8898 // number of buffers is two. The supposed minimum buffer size is 16
\r
8899 // bytes, so that will be our lower bound. The argument to this
\r
8900 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8901 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8902 // We'll check the actual value used near the end of the setup
\r
8904 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8905 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8907 if ( options ) buffers = options->numberOfBuffers;
\r
8908 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8909 if ( buffers < 2 ) buffers = 3;
\r
8910 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8911 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8912 if ( result == -1 ) {
\r
8914 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8915 errorText_ = errorStream_.str();
\r
8918 stream_.nBuffers = buffers;
\r
8920 // Save buffer size (in sample frames).
\r
8921 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8922 stream_.bufferSize = *bufferSize;
\r
8924 // Set the sample rate.
\r
8925 int srate = sampleRate;
\r
8926 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8927 if ( result == -1 ) {
\r
8929 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8930 errorText_ = errorStream_.str();
\r
8934 // Verify the sample rate setup worked.
\r
8935 if ( abs( srate - sampleRate ) > 100 ) {
\r
8937 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8938 errorText_ = errorStream_.str();
\r
8941 stream_.sampleRate = sampleRate;
\r
8943 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8944 // We're doing duplex setup here.
\r
8945 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8946 stream_.nDeviceChannels[0] = deviceChannels;
\r
8949 // Set interleaving parameters.
\r
8950 stream_.userInterleaved = true;
\r
8951 stream_.deviceInterleaved[mode] = true;
\r
8952 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8953 stream_.userInterleaved = false;
\r
8955 // Set flags for buffer conversion
\r
8956 stream_.doConvertBuffer[mode] = false;
\r
8957 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8958 stream_.doConvertBuffer[mode] = true;
\r
8959 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8960 stream_.doConvertBuffer[mode] = true;
\r
8961 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8962 stream_.nUserChannels[mode] > 1 )
\r
8963 stream_.doConvertBuffer[mode] = true;
\r
8965 // Allocate the stream handles if necessary and then save.
\r
8966 if ( stream_.apiHandle == 0 ) {
\r
8968 handle = new OssHandle;
\r
8970 catch ( std::bad_alloc& ) {
\r
8971 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8975 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8976 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8980 stream_.apiHandle = (void *) handle;
\r
8983 handle = (OssHandle *) stream_.apiHandle;
\r
8985 handle->id[mode] = fd;
\r
8987 // Allocate necessary internal buffers.
\r
8988 unsigned long bufferBytes;
\r
8989 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8990 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8991 if ( stream_.userBuffer[mode] == NULL ) {
\r
8992 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8996 if ( stream_.doConvertBuffer[mode] ) {
\r
8998 bool makeBuffer = true;
\r
8999 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9000 if ( mode == INPUT ) {
\r
9001 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9002 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9003 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9007 if ( makeBuffer ) {
\r
9008 bufferBytes *= *bufferSize;
\r
9009 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9010 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9011 if ( stream_.deviceBuffer == NULL ) {
\r
9012 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9018 stream_.device[mode] = device;
\r
9019 stream_.state = STREAM_STOPPED;
\r
9021 // Setup the buffer conversion information structure.
\r
9022 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9024 // Setup thread if necessary.
\r
9025 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9026 // We had already set up an output stream.
\r
9027 stream_.mode = DUPLEX;
\r
9028 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9031 stream_.mode = mode;
\r
9033 // Setup callback thread.
\r
9034 stream_.callbackInfo.object = (void *) this;
\r
9036 // Set the thread attributes for joinable and realtime scheduling
\r
9037 // priority. The higher priority will only take affect if the
\r
9038 // program is run as root or suid.
\r
9039 pthread_attr_t attr;
\r
9040 pthread_attr_init( &attr );
\r
9041 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9042 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9043 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9044 struct sched_param param;
\r
9045 int priority = options->priority;
\r
9046 int min = sched_get_priority_min( SCHED_RR );
\r
9047 int max = sched_get_priority_max( SCHED_RR );
\r
9048 if ( priority < min ) priority = min;
\r
9049 else if ( priority > max ) priority = max;
\r
9050 param.sched_priority = priority;
\r
9051 pthread_attr_setschedparam( &attr, ¶m );
\r
9052 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9055 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9057 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9060 stream_.callbackInfo.isRunning = true;
\r
9061 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9062 pthread_attr_destroy( &attr );
\r
9064 stream_.callbackInfo.isRunning = false;
\r
9065 errorText_ = "RtApiOss::error creating callback thread!";
\r
9074 pthread_cond_destroy( &handle->runnable );
\r
9075 if ( handle->id[0] ) close( handle->id[0] );
\r
9076 if ( handle->id[1] ) close( handle->id[1] );
\r
9078 stream_.apiHandle = 0;
\r
9081 for ( int i=0; i<2; i++ ) {
\r
9082 if ( stream_.userBuffer[i] ) {
\r
9083 free( stream_.userBuffer[i] );
\r
9084 stream_.userBuffer[i] = 0;
\r
9088 if ( stream_.deviceBuffer ) {
\r
9089 free( stream_.deviceBuffer );
\r
9090 stream_.deviceBuffer = 0;
\r
9096 void RtApiOss :: closeStream()
\r
9098 if ( stream_.state == STREAM_CLOSED ) {
\r
9099 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9100 error( RtAudioError::WARNING );
\r
9104 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9105 stream_.callbackInfo.isRunning = false;
\r
9106 MUTEX_LOCK( &stream_.mutex );
\r
9107 if ( stream_.state == STREAM_STOPPED )
\r
9108 pthread_cond_signal( &handle->runnable );
\r
9109 MUTEX_UNLOCK( &stream_.mutex );
\r
9110 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9112 if ( stream_.state == STREAM_RUNNING ) {
\r
9113 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9114 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9116 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9117 stream_.state = STREAM_STOPPED;
\r
9121 pthread_cond_destroy( &handle->runnable );
\r
9122 if ( handle->id[0] ) close( handle->id[0] );
\r
9123 if ( handle->id[1] ) close( handle->id[1] );
\r
9125 stream_.apiHandle = 0;
\r
9128 for ( int i=0; i<2; i++ ) {
\r
9129 if ( stream_.userBuffer[i] ) {
\r
9130 free( stream_.userBuffer[i] );
\r
9131 stream_.userBuffer[i] = 0;
\r
9135 if ( stream_.deviceBuffer ) {
\r
9136 free( stream_.deviceBuffer );
\r
9137 stream_.deviceBuffer = 0;
\r
9140 stream_.mode = UNINITIALIZED;
\r
9141 stream_.state = STREAM_CLOSED;
\r
9144 void RtApiOss :: startStream()
\r
9147 if ( stream_.state == STREAM_RUNNING ) {
\r
9148 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9149 error( RtAudioError::WARNING );
\r
9153 MUTEX_LOCK( &stream_.mutex );
\r
9155 stream_.state = STREAM_RUNNING;
\r
9157 // No need to do anything else here ... OSS automatically starts
\r
9158 // when fed samples.
\r
9160 MUTEX_UNLOCK( &stream_.mutex );
\r
9162 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9163 pthread_cond_signal( &handle->runnable );
\r
9166 void RtApiOss :: stopStream()
\r
9169 if ( stream_.state == STREAM_STOPPED ) {
\r
9170 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9171 error( RtAudioError::WARNING );
\r
9175 MUTEX_LOCK( &stream_.mutex );
\r
9177 // The state might change while waiting on a mutex.
\r
9178 if ( stream_.state == STREAM_STOPPED ) {
\r
9179 MUTEX_UNLOCK( &stream_.mutex );
\r
9184 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9185 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9187 // Flush the output with zeros a few times.
\r
9190 RtAudioFormat format;
\r
9192 if ( stream_.doConvertBuffer[0] ) {
\r
9193 buffer = stream_.deviceBuffer;
\r
9194 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9195 format = stream_.deviceFormat[0];
\r
9198 buffer = stream_.userBuffer[0];
\r
9199 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9200 format = stream_.userFormat;
\r
9203 memset( buffer, 0, samples * formatBytes(format) );
\r
9204 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9205 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9206 if ( result == -1 ) {
\r
9207 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9208 error( RtAudioError::WARNING );
\r
9212 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9213 if ( result == -1 ) {
\r
9214 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9215 errorText_ = errorStream_.str();
\r
9218 handle->triggered = false;
\r
9221 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9222 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9223 if ( result == -1 ) {
\r
9224 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9225 errorText_ = errorStream_.str();
\r
9231 stream_.state = STREAM_STOPPED;
\r
9232 MUTEX_UNLOCK( &stream_.mutex );
\r
9234 if ( result != -1 ) return;
\r
9235 error( RtAudioError::SYSTEM_ERROR );
\r
9238 void RtApiOss :: abortStream()
\r
9241 if ( stream_.state == STREAM_STOPPED ) {
\r
9242 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9243 error( RtAudioError::WARNING );
\r
9247 MUTEX_LOCK( &stream_.mutex );
\r
9249 // The state might change while waiting on a mutex.
\r
9250 if ( stream_.state == STREAM_STOPPED ) {
\r
9251 MUTEX_UNLOCK( &stream_.mutex );
\r
9256 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9258 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9259 if ( result == -1 ) {
\r
9260 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9261 errorText_ = errorStream_.str();
\r
9264 handle->triggered = false;
\r
9267 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9268 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9269 if ( result == -1 ) {
\r
9270 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9271 errorText_ = errorStream_.str();
\r
9277 stream_.state = STREAM_STOPPED;
\r
9278 MUTEX_UNLOCK( &stream_.mutex );
\r
9280 if ( result != -1 ) return;
\r
9281 error( RtAudioError::SYSTEM_ERROR );
\r
9284 void RtApiOss :: callbackEvent()
\r
9286 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9287 if ( stream_.state == STREAM_STOPPED ) {
\r
9288 MUTEX_LOCK( &stream_.mutex );
\r
9289 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9290 if ( stream_.state != STREAM_RUNNING ) {
\r
9291 MUTEX_UNLOCK( &stream_.mutex );
\r
9294 MUTEX_UNLOCK( &stream_.mutex );
\r
9297 if ( stream_.state == STREAM_CLOSED ) {
\r
9298 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9299 error( RtAudioError::WARNING );
\r
9303 // Invoke user callback to get fresh output data.
\r
9304 int doStopStream = 0;
\r
9305 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9306 double streamTime = getStreamTime();
\r
9307 RtAudioStreamStatus status = 0;
\r
9308 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9309 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9310 handle->xrun[0] = false;
\r
9312 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9313 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9314 handle->xrun[1] = false;
\r
9316 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9317 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9318 if ( doStopStream == 2 ) {
\r
9319 this->abortStream();
\r
9323 MUTEX_LOCK( &stream_.mutex );
\r
9325 // The state might change while waiting on a mutex.
\r
9326 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9331 RtAudioFormat format;
\r
9333 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9335 // Setup parameters and do buffer conversion if necessary.
\r
9336 if ( stream_.doConvertBuffer[0] ) {
\r
9337 buffer = stream_.deviceBuffer;
\r
9338 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9339 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9340 format = stream_.deviceFormat[0];
\r
9343 buffer = stream_.userBuffer[0];
\r
9344 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9345 format = stream_.userFormat;
\r
9348 // Do byte swapping if necessary.
\r
9349 if ( stream_.doByteSwap[0] )
\r
9350 byteSwapBuffer( buffer, samples, format );
\r
9352 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9354 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9355 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9356 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9357 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9358 handle->triggered = true;
\r
9361 // Write samples to device.
\r
9362 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9364 if ( result == -1 ) {
\r
9365 // We'll assume this is an underrun, though there isn't a
\r
9366 // specific means for determining that.
\r
9367 handle->xrun[0] = true;
\r
9368 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9369 error( RtAudioError::WARNING );
\r
9370 // Continue on to input section.
\r
9374 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9376 // Setup parameters.
\r
9377 if ( stream_.doConvertBuffer[1] ) {
\r
9378 buffer = stream_.deviceBuffer;
\r
9379 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9380 format = stream_.deviceFormat[1];
\r
9383 buffer = stream_.userBuffer[1];
\r
9384 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9385 format = stream_.userFormat;
\r
9388 // Read samples from device.
\r
9389 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9391 if ( result == -1 ) {
\r
9392 // We'll assume this is an overrun, though there isn't a
\r
9393 // specific means for determining that.
\r
9394 handle->xrun[1] = true;
\r
9395 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9396 error( RtAudioError::WARNING );
\r
9400 // Do byte swapping if necessary.
\r
9401 if ( stream_.doByteSwap[1] )
\r
9402 byteSwapBuffer( buffer, samples, format );
\r
9404 // Do buffer conversion if necessary.
\r
9405 if ( stream_.doConvertBuffer[1] )
\r
9406 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9410 MUTEX_UNLOCK( &stream_.mutex );
\r
9412 RtApi::tickStreamTime();
\r
9413 if ( doStopStream == 1 ) this->stopStream();
\r
9416 static void *ossCallbackHandler( void *ptr )
\r
9418 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9419 RtApiOss *object = (RtApiOss *) info->object;
\r
9420 bool *isRunning = &info->isRunning;
\r
9422 while ( *isRunning == true ) {
\r
9423 pthread_testcancel();
\r
9424 object->callbackEvent();
\r
9427 pthread_exit( NULL );
\r
9430 //******************** End of __LINUX_OSS__ *********************//
\r
9434 // *************************************************** //
\r
9436 // Protected common (OS-independent) RtAudio methods.
\r
9438 // *************************************************** //
\r
9440 // This method can be modified to control the behavior of error
\r
9441 // message printing.
\r
9442 void RtApi :: error( RtAudioError::Type type )
\r
9444 errorStream_.str(""); // clear the ostringstream
\r
9446 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9447 if ( errorCallback ) {
\r
9448 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9450 if ( firstErrorOccurred_ )
\r
9453 firstErrorOccurred_ = true;
\r
9454 const std::string errorMessage = errorText_;
\r
9456 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9457 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9461 errorCallback( type, errorMessage );
\r
9462 firstErrorOccurred_ = false;
\r
9466 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9467 std::cerr << '\n' << errorText_ << "\n\n";
\r
9468 else if ( type != RtAudioError::WARNING )
\r
9469 throw( RtAudioError( errorText_, type ) );
\r
9472 void RtApi :: verifyStream()
\r
9474 if ( stream_.state == STREAM_CLOSED ) {
\r
9475 errorText_ = "RtApi:: a stream is not open!";
\r
9476 error( RtAudioError::INVALID_USE );
\r
9480 void RtApi :: clearStreamInfo()
\r
9482 stream_.mode = UNINITIALIZED;
\r
9483 stream_.state = STREAM_CLOSED;
\r
9484 stream_.sampleRate = 0;
\r
9485 stream_.bufferSize = 0;
\r
9486 stream_.nBuffers = 0;
\r
9487 stream_.userFormat = 0;
\r
9488 stream_.userInterleaved = true;
\r
9489 stream_.streamTime = 0.0;
\r
9490 stream_.apiHandle = 0;
\r
9491 stream_.deviceBuffer = 0;
\r
9492 stream_.callbackInfo.callback = 0;
\r
9493 stream_.callbackInfo.userData = 0;
\r
9494 stream_.callbackInfo.isRunning = false;
\r
9495 stream_.callbackInfo.errorCallback = 0;
\r
9496 for ( int i=0; i<2; i++ ) {
\r
9497 stream_.device[i] = 11111;
\r
9498 stream_.doConvertBuffer[i] = false;
\r
9499 stream_.deviceInterleaved[i] = true;
\r
9500 stream_.doByteSwap[i] = false;
\r
9501 stream_.nUserChannels[i] = 0;
\r
9502 stream_.nDeviceChannels[i] = 0;
\r
9503 stream_.channelOffset[i] = 0;
\r
9504 stream_.deviceFormat[i] = 0;
\r
9505 stream_.latency[i] = 0;
\r
9506 stream_.userBuffer[i] = 0;
\r
9507 stream_.convertInfo[i].channels = 0;
\r
9508 stream_.convertInfo[i].inJump = 0;
\r
9509 stream_.convertInfo[i].outJump = 0;
\r
9510 stream_.convertInfo[i].inFormat = 0;
\r
9511 stream_.convertInfo[i].outFormat = 0;
\r
9512 stream_.convertInfo[i].inOffset.clear();
\r
9513 stream_.convertInfo[i].outOffset.clear();
\r
9517 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9519 if ( format == RTAUDIO_SINT16 )
\r
9521 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9523 else if ( format == RTAUDIO_FLOAT64 )
\r
9525 else if ( format == RTAUDIO_SINT24 )
\r
9527 else if ( format == RTAUDIO_SINT8 )
\r
9530 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9531 error( RtAudioError::WARNING );
\r
9536 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9538 if ( mode == INPUT ) { // convert device to user buffer
\r
9539 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9540 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9541 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9542 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9544 else { // convert user to device buffer
\r
9545 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9546 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9547 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9548 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9551 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9552 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9554 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9556 // Set up the interleave/deinterleave offsets.
\r
9557 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9558 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9559 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9560 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9561 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9562 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9563 stream_.convertInfo[mode].inJump = 1;
\r
9567 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9568 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9569 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9570 stream_.convertInfo[mode].outJump = 1;
\r
9574 else { // no (de)interleaving
\r
9575 if ( stream_.userInterleaved ) {
\r
9576 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9577 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9578 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9582 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9583 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9584 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9585 stream_.convertInfo[mode].inJump = 1;
\r
9586 stream_.convertInfo[mode].outJump = 1;
\r
9591 // Add channel offset.
\r
9592 if ( firstChannel > 0 ) {
\r
9593 if ( stream_.deviceInterleaved[mode] ) {
\r
9594 if ( mode == OUTPUT ) {
\r
9595 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9596 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9599 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9600 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9604 if ( mode == OUTPUT ) {
\r
9605 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9606 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9609 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9610 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9616 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9618 // This function does format conversion, input/output channel compensation, and
\r
9619 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9620 // the lower three bytes of a 32-bit integer.
\r
9622 // Clear our device buffer when in/out duplex device channels are different
\r
9623 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9624 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9625 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9628 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9630 Float64 *out = (Float64 *)outBuffer;
\r
9632 if (info.inFormat == RTAUDIO_SINT8) {
\r
9633 signed char *in = (signed char *)inBuffer;
\r
9634 scale = 1.0 / 127.5;
\r
9635 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9636 for (j=0; j<info.channels; j++) {
\r
9637 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9638 out[info.outOffset[j]] += 0.5;
\r
9639 out[info.outOffset[j]] *= scale;
\r
9641 in += info.inJump;
\r
9642 out += info.outJump;
\r
9645 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9646 Int16 *in = (Int16 *)inBuffer;
\r
9647 scale = 1.0 / 32767.5;
\r
9648 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9649 for (j=0; j<info.channels; j++) {
\r
9650 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9651 out[info.outOffset[j]] += 0.5;
\r
9652 out[info.outOffset[j]] *= scale;
\r
9654 in += info.inJump;
\r
9655 out += info.outJump;
\r
9658 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9659 Int24 *in = (Int24 *)inBuffer;
\r
9660 scale = 1.0 / 8388607.5;
\r
9661 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9662 for (j=0; j<info.channels; j++) {
\r
9663 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9664 out[info.outOffset[j]] += 0.5;
\r
9665 out[info.outOffset[j]] *= scale;
\r
9667 in += info.inJump;
\r
9668 out += info.outJump;
\r
9671 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9672 Int32 *in = (Int32 *)inBuffer;
\r
9673 scale = 1.0 / 2147483647.5;
\r
9674 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9675 for (j=0; j<info.channels; j++) {
\r
9676 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9677 out[info.outOffset[j]] += 0.5;
\r
9678 out[info.outOffset[j]] *= scale;
\r
9680 in += info.inJump;
\r
9681 out += info.outJump;
\r
9684 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9685 Float32 *in = (Float32 *)inBuffer;
\r
9686 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9687 for (j=0; j<info.channels; j++) {
\r
9688 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9690 in += info.inJump;
\r
9691 out += info.outJump;
\r
9694 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9695 // Channel compensation and/or (de)interleaving only.
\r
9696 Float64 *in = (Float64 *)inBuffer;
\r
9697 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9698 for (j=0; j<info.channels; j++) {
\r
9699 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9701 in += info.inJump;
\r
9702 out += info.outJump;
\r
9706 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9708 Float32 *out = (Float32 *)outBuffer;
\r
9710 if (info.inFormat == RTAUDIO_SINT8) {
\r
9711 signed char *in = (signed char *)inBuffer;
\r
9712 scale = (Float32) ( 1.0 / 127.5 );
\r
9713 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9714 for (j=0; j<info.channels; j++) {
\r
9715 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9716 out[info.outOffset[j]] += 0.5;
\r
9717 out[info.outOffset[j]] *= scale;
\r
9719 in += info.inJump;
\r
9720 out += info.outJump;
\r
9723 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9724 Int16 *in = (Int16 *)inBuffer;
\r
9725 scale = (Float32) ( 1.0 / 32767.5 );
\r
9726 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9727 for (j=0; j<info.channels; j++) {
\r
9728 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9729 out[info.outOffset[j]] += 0.5;
\r
9730 out[info.outOffset[j]] *= scale;
\r
9732 in += info.inJump;
\r
9733 out += info.outJump;
\r
9736 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9737 Int24 *in = (Int24 *)inBuffer;
\r
9738 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9739 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9740 for (j=0; j<info.channels; j++) {
\r
9741 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9742 out[info.outOffset[j]] += 0.5;
\r
9743 out[info.outOffset[j]] *= scale;
\r
9745 in += info.inJump;
\r
9746 out += info.outJump;
\r
9749 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9750 Int32 *in = (Int32 *)inBuffer;
\r
9751 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9752 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9753 for (j=0; j<info.channels; j++) {
\r
9754 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9755 out[info.outOffset[j]] += 0.5;
\r
9756 out[info.outOffset[j]] *= scale;
\r
9758 in += info.inJump;
\r
9759 out += info.outJump;
\r
9762 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9763 // Channel compensation and/or (de)interleaving only.
\r
9764 Float32 *in = (Float32 *)inBuffer;
\r
9765 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9766 for (j=0; j<info.channels; j++) {
\r
9767 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9769 in += info.inJump;
\r
9770 out += info.outJump;
\r
9773 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9774 Float64 *in = (Float64 *)inBuffer;
\r
9775 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9776 for (j=0; j<info.channels; j++) {
\r
9777 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9779 in += info.inJump;
\r
9780 out += info.outJump;
\r
9784 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9785 Int32 *out = (Int32 *)outBuffer;
\r
9786 if (info.inFormat == RTAUDIO_SINT8) {
\r
9787 signed char *in = (signed char *)inBuffer;
\r
9788 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9789 for (j=0; j<info.channels; j++) {
\r
9790 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9791 out[info.outOffset[j]] <<= 24;
\r
9793 in += info.inJump;
\r
9794 out += info.outJump;
\r
9797 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9798 Int16 *in = (Int16 *)inBuffer;
\r
9799 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9800 for (j=0; j<info.channels; j++) {
\r
9801 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9802 out[info.outOffset[j]] <<= 16;
\r
9804 in += info.inJump;
\r
9805 out += info.outJump;
\r
9808 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9809 Int24 *in = (Int24 *)inBuffer;
\r
9810 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9811 for (j=0; j<info.channels; j++) {
\r
9812 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9813 out[info.outOffset[j]] <<= 8;
\r
9815 in += info.inJump;
\r
9816 out += info.outJump;
\r
9819 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9820 // Channel compensation and/or (de)interleaving only.
\r
9821 Int32 *in = (Int32 *)inBuffer;
\r
9822 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9823 for (j=0; j<info.channels; j++) {
\r
9824 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9826 in += info.inJump;
\r
9827 out += info.outJump;
\r
9830 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9831 Float32 *in = (Float32 *)inBuffer;
\r
9832 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9833 for (j=0; j<info.channels; j++) {
\r
9834 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9836 in += info.inJump;
\r
9837 out += info.outJump;
\r
9840 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9841 Float64 *in = (Float64 *)inBuffer;
\r
9842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9843 for (j=0; j<info.channels; j++) {
\r
9844 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9846 in += info.inJump;
\r
9847 out += info.outJump;
\r
9851 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9852 Int24 *out = (Int24 *)outBuffer;
\r
9853 if (info.inFormat == RTAUDIO_SINT8) {
\r
9854 signed char *in = (signed char *)inBuffer;
\r
9855 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9856 for (j=0; j<info.channels; j++) {
\r
9857 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9858 //out[info.outOffset[j]] <<= 16;
\r
9860 in += info.inJump;
\r
9861 out += info.outJump;
\r
9864 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9865 Int16 *in = (Int16 *)inBuffer;
\r
9866 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9867 for (j=0; j<info.channels; j++) {
\r
9868 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9869 //out[info.outOffset[j]] <<= 8;
\r
9871 in += info.inJump;
\r
9872 out += info.outJump;
\r
9875 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9876 // Channel compensation and/or (de)interleaving only.
\r
9877 Int24 *in = (Int24 *)inBuffer;
\r
9878 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9879 for (j=0; j<info.channels; j++) {
\r
9880 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9882 in += info.inJump;
\r
9883 out += info.outJump;
\r
9886 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9887 Int32 *in = (Int32 *)inBuffer;
\r
9888 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9889 for (j=0; j<info.channels; j++) {
\r
9890 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9891 //out[info.outOffset[j]] >>= 8;
\r
9893 in += info.inJump;
\r
9894 out += info.outJump;
\r
9897 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9898 Float32 *in = (Float32 *)inBuffer;
\r
9899 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9900 for (j=0; j<info.channels; j++) {
\r
9901 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9903 in += info.inJump;
\r
9904 out += info.outJump;
\r
9907 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9908 Float64 *in = (Float64 *)inBuffer;
\r
9909 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9910 for (j=0; j<info.channels; j++) {
\r
9911 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9913 in += info.inJump;
\r
9914 out += info.outJump;
\r
9918 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9919 Int16 *out = (Int16 *)outBuffer;
\r
9920 if (info.inFormat == RTAUDIO_SINT8) {
\r
9921 signed char *in = (signed char *)inBuffer;
\r
9922 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9923 for (j=0; j<info.channels; j++) {
\r
9924 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9925 out[info.outOffset[j]] <<= 8;
\r
9927 in += info.inJump;
\r
9928 out += info.outJump;
\r
9931 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9932 // Channel compensation and/or (de)interleaving only.
\r
9933 Int16 *in = (Int16 *)inBuffer;
\r
9934 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9935 for (j=0; j<info.channels; j++) {
\r
9936 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9938 in += info.inJump;
\r
9939 out += info.outJump;
\r
9942 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9943 Int24 *in = (Int24 *)inBuffer;
\r
9944 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9945 for (j=0; j<info.channels; j++) {
\r
9946 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9948 in += info.inJump;
\r
9949 out += info.outJump;
\r
9952 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9953 Int32 *in = (Int32 *)inBuffer;
\r
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9955 for (j=0; j<info.channels; j++) {
\r
9956 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9958 in += info.inJump;
\r
9959 out += info.outJump;
\r
9962 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9963 Float32 *in = (Float32 *)inBuffer;
\r
9964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9965 for (j=0; j<info.channels; j++) {
\r
9966 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9968 in += info.inJump;
\r
9969 out += info.outJump;
\r
9972 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9973 Float64 *in = (Float64 *)inBuffer;
\r
9974 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9975 for (j=0; j<info.channels; j++) {
\r
9976 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9978 in += info.inJump;
\r
9979 out += info.outJump;
\r
9983 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9984 signed char *out = (signed char *)outBuffer;
\r
9985 if (info.inFormat == RTAUDIO_SINT8) {
\r
9986 // Channel compensation and/or (de)interleaving only.
\r
9987 signed char *in = (signed char *)inBuffer;
\r
9988 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9989 for (j=0; j<info.channels; j++) {
\r
9990 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9992 in += info.inJump;
\r
9993 out += info.outJump;
\r
9996 if (info.inFormat == RTAUDIO_SINT16) {
\r
9997 Int16 *in = (Int16 *)inBuffer;
\r
9998 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9999 for (j=0; j<info.channels; j++) {
\r
10000 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10002 in += info.inJump;
\r
10003 out += info.outJump;
\r
10006 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10007 Int24 *in = (Int24 *)inBuffer;
\r
10008 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10009 for (j=0; j<info.channels; j++) {
\r
10010 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10012 in += info.inJump;
\r
10013 out += info.outJump;
\r
10016 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10017 Int32 *in = (Int32 *)inBuffer;
\r
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10019 for (j=0; j<info.channels; j++) {
\r
10020 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10022 in += info.inJump;
\r
10023 out += info.outJump;
\r
10026 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10027 Float32 *in = (Float32 *)inBuffer;
\r
10028 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10029 for (j=0; j<info.channels; j++) {
\r
10030 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10032 in += info.inJump;
\r
10033 out += info.outJump;
\r
10036 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10037 Float64 *in = (Float64 *)inBuffer;
\r
10038 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10039 for (j=0; j<info.channels; j++) {
\r
10040 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10042 in += info.inJump;
\r
10043 out += info.outJump;
\r
10049 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10050 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10051 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10053 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10055 register char val;
\r
10056 register char *ptr;
\r
10059 if ( format == RTAUDIO_SINT16 ) {
\r
10060 for ( unsigned int i=0; i<samples; i++ ) {
\r
10061 // Swap 1st and 2nd bytes.
\r
10063 *(ptr) = *(ptr+1);
\r
10066 // Increment 2 bytes.
\r
10070 else if ( format == RTAUDIO_SINT32 ||
\r
10071 format == RTAUDIO_FLOAT32 ) {
\r
10072 for ( unsigned int i=0; i<samples; i++ ) {
\r
10073 // Swap 1st and 4th bytes.
\r
10075 *(ptr) = *(ptr+3);
\r
10078 // Swap 2nd and 3rd bytes.
\r
10081 *(ptr) = *(ptr+1);
\r
10084 // Increment 3 more bytes.
\r
10088 else if ( format == RTAUDIO_SINT24 ) {
\r
10089 for ( unsigned int i=0; i<samples; i++ ) {
\r
10090 // Swap 1st and 3rd bytes.
\r
10092 *(ptr) = *(ptr+2);
\r
10095 // Increment 2 more bytes.
\r
10099 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10100 for ( unsigned int i=0; i<samples; i++ ) {
\r
10101 // Swap 1st and 8th bytes
\r
10103 *(ptr) = *(ptr+7);
\r
10106 // Swap 2nd and 7th bytes
\r
10109 *(ptr) = *(ptr+5);
\r
10112 // Swap 3rd and 6th bytes
\r
10115 *(ptr) = *(ptr+3);
\r
10118 // Swap 4th and 5th bytes
\r
10121 *(ptr) = *(ptr+1);
\r
10124 // Increment 5 more bytes.
\r
10130 // Indentation settings for Vim and Emacs
\r
10132 // Local Variables:
\r
10133 // c-basic-offset: 2
\r
10134 // indent-tabs-mode: nil
\r
10137 // vim: et sts=2 sw=2
\r