1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1687 // Don't bother draining input
\r
1688 if ( handle->drainCounter ) {
\r
1689 handle->drainCounter++;
\r
1693 AudioDeviceID inputDevice;
\r
1694 inputDevice = handle->id[1];
\r
1695 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1697 if ( handle->nStreams[1] == 1 ) {
\r
1698 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1699 convertBuffer( stream_.userBuffer[1],
\r
1700 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1701 stream_.convertInfo[1] );
\r
1703 else { // copy to user buffer
\r
1704 memcpy( stream_.userBuffer[1],
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1706 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1709 else { // read from multiple streams
\r
1710 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1711 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1713 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1714 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1715 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1716 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1717 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1720 else { // read from multiple multi-channel streams
\r
1721 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1722 Float32 *out, *in;
\r
1724 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1725 UInt32 outChannels = stream_.nUserChannels[1];
\r
1726 if ( stream_.doConvertBuffer[1] ) {
\r
1727 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1728 outChannels = stream_.nDeviceChannels[1];
\r
1731 if ( outInterleaved ) outOffset = 1;
\r
1732 else outOffset = stream_.bufferSize;
\r
1734 channelsLeft = outChannels;
\r
1735 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1737 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1738 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1741 // Account for possible channel offset in first stream
\r
1742 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1743 streamChannels -= stream_.channelOffset[1];
\r
1744 inJump = stream_.channelOffset[1];
\r
1748 // Account for possible unread channels at end of the last stream
\r
1749 if ( streamChannels > channelsLeft ) {
\r
1750 inJump = streamChannels - channelsLeft;
\r
1751 streamChannels = channelsLeft;
\r
1754 // Determine output buffer offsets and skips
\r
1755 if ( outInterleaved ) {
\r
1756 outJump = outChannels;
\r
1757 out += outChannels - channelsLeft;
\r
1761 out += (outChannels - channelsLeft) * outOffset;
\r
1764 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1765 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1766 out[j*outOffset] = *in++;
\r
1771 channelsLeft -= streamChannels;
\r
1775 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1776 convertBuffer( stream_.userBuffer[1],
\r
1777 stream_.deviceBuffer,
\r
1778 stream_.convertInfo[1] );
\r
1784 //MUTEX_UNLOCK( &stream_.mutex );
\r
1786 RtApi::tickStreamTime();
\r
1790 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1794 case kAudioHardwareNotRunningError:
\r
1795 return "kAudioHardwareNotRunningError";
\r
1797 case kAudioHardwareUnspecifiedError:
\r
1798 return "kAudioHardwareUnspecifiedError";
\r
1800 case kAudioHardwareUnknownPropertyError:
\r
1801 return "kAudioHardwareUnknownPropertyError";
\r
1803 case kAudioHardwareBadPropertySizeError:
\r
1804 return "kAudioHardwareBadPropertySizeError";
\r
1806 case kAudioHardwareIllegalOperationError:
\r
1807 return "kAudioHardwareIllegalOperationError";
\r
1809 case kAudioHardwareBadObjectError:
\r
1810 return "kAudioHardwareBadObjectError";
\r
1812 case kAudioHardwareBadDeviceError:
\r
1813 return "kAudioHardwareBadDeviceError";
\r
1815 case kAudioHardwareBadStreamError:
\r
1816 return "kAudioHardwareBadStreamError";
\r
1818 case kAudioHardwareUnsupportedOperationError:
\r
1819 return "kAudioHardwareUnsupportedOperationError";
\r
1821 case kAudioDeviceUnsupportedFormatError:
\r
1822 return "kAudioDeviceUnsupportedFormatError";
\r
1824 case kAudioDevicePermissionsError:
\r
1825 return "kAudioDevicePermissionsError";
\r
1828 return "CoreAudio unknown error";
\r
1832 //******************** End of __MACOSX_CORE__ *********************//
\r
1835 #if defined(__UNIX_JACK__)
\r
1837 // JACK is a low-latency audio server, originally written for the
\r
1838 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1839 // connect a number of different applications to an audio device, as
\r
1840 // well as allowing them to share audio between themselves.
\r
1842 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1843 // have ports connected to the server. The JACK server is typically
\r
1844 // started in a terminal as follows:
\r
1846 // .jackd -d alsa -d hw:0
\r
1848 // or through an interface program such as qjackctl. Many of the
\r
1849 // parameters normally set for a stream are fixed by the JACK server
\r
1850 // and can be specified when the JACK server is started. In
\r
1853 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1855 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1856 // frames, and number of buffers = 4. Once the server is running, it
\r
1857 // is not possible to override these values. If the values are not
\r
1858 // specified in the command-line, the JACK server uses default values.
\r
1860 // The JACK server does not have to be running when an instance of
\r
1861 // RtApiJack is created, though the function getDeviceCount() will
\r
1862 // report 0 devices found until JACK has been started. When no
\r
1863 // devices are available (i.e., the JACK server is not running), a
\r
1864 // stream cannot be opened.
\r
1866 #include <jack/jack.h>
\r
1867 #include <unistd.h>
\r
1870 // A structure to hold various information related to the Jack API
\r
1871 // implementation.
\r
1872 struct JackHandle {
\r
1873 jack_client_t *client;
\r
1874 jack_port_t **ports[2];
\r
1875 std::string deviceName[2];
\r
1877 pthread_cond_t condition;
\r
1878 int drainCounter; // Tracks callback counts when draining
\r
1879 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1882 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1885 static void jackSilentError( const char * ) {};
\r
1887 RtApiJack :: RtApiJack()
\r
1889 // Nothing to do here.
\r
1890 #if !defined(__RTAUDIO_DEBUG__)
\r
1891 // Turn off Jack's internal error reporting.
\r
1892 jack_set_error_function( &jackSilentError );
\r
1896 RtApiJack :: ~RtApiJack()
\r
1898 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1901 unsigned int RtApiJack :: getDeviceCount( void )
\r
1903 // See if we can become a jack client.
\r
1904 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1905 jack_status_t *status = NULL;
\r
1906 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1907 if ( client == 0 ) return 0;
\r
1909 const char **ports;
\r
1910 std::string port, previousPort;
\r
1911 unsigned int nChannels = 0, nDevices = 0;
\r
1912 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1914 // Parse the port names up to the first colon (:).
\r
1915 size_t iColon = 0;
\r
1917 port = (char *) ports[ nChannels ];
\r
1918 iColon = port.find(":");
\r
1919 if ( iColon != std::string::npos ) {
\r
1920 port = port.substr( 0, iColon + 1 );
\r
1921 if ( port != previousPort ) {
\r
1923 previousPort = port;
\r
1926 } while ( ports[++nChannels] );
\r
1930 jack_client_close( client );
\r
1934 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1936 RtAudio::DeviceInfo info;
\r
1937 info.probed = false;
\r
1939 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1940 jack_status_t *status = NULL;
\r
1941 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1942 if ( client == 0 ) {
\r
1943 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1944 error( RtAudioError::WARNING );
\r
1948 const char **ports;
\r
1949 std::string port, previousPort;
\r
1950 unsigned int nPorts = 0, nDevices = 0;
\r
1951 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1953 // Parse the port names up to the first colon (:).
\r
1954 size_t iColon = 0;
\r
1956 port = (char *) ports[ nPorts ];
\r
1957 iColon = port.find(":");
\r
1958 if ( iColon != std::string::npos ) {
\r
1959 port = port.substr( 0, iColon );
\r
1960 if ( port != previousPort ) {
\r
1961 if ( nDevices == device ) info.name = port;
\r
1963 previousPort = port;
\r
1966 } while ( ports[++nPorts] );
\r
1970 if ( device >= nDevices ) {
\r
1971 jack_client_close( client );
\r
1972 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1973 error( RtAudioError::INVALID_USE );
\r
1977 // Get the current jack server sample rate.
\r
1978 info.sampleRates.clear();
\r
1979 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1981 // Count the available ports containing the client name as device
\r
1982 // channels. Jack "input ports" equal RtAudio output channels.
\r
1983 unsigned int nChannels = 0;
\r
1984 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1986 while ( ports[ nChannels ] ) nChannels++;
\r
1988 info.outputChannels = nChannels;
\r
1991 // Jack "output ports" equal RtAudio input channels.
\r
1993 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1995 while ( ports[ nChannels ] ) nChannels++;
\r
1997 info.inputChannels = nChannels;
\r
2000 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2001 jack_client_close(client);
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 // If device opens for both playback and capture, we determine the channels.
\r
2008 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2009 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2011 // Jack always uses 32-bit floats.
\r
2012 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2014 // Jack doesn't provide default devices so we'll use the first available one.
\r
2015 if ( device == 0 && info.outputChannels > 0 )
\r
2016 info.isDefaultOutput = true;
\r
2017 if ( device == 0 && info.inputChannels > 0 )
\r
2018 info.isDefaultInput = true;
\r
2020 jack_client_close(client);
\r
2021 info.probed = true;
\r
2025 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2027 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2029 RtApiJack *object = (RtApiJack *) info->object;
\r
2030 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2035 // This function will be called by a spawned thread when the Jack
\r
2036 // server signals that it is shutting down. It is necessary to handle
\r
2037 // it this way because the jackShutdown() function must return before
\r
2038 // the jack_deactivate() function (in closeStream()) will return.
\r
2039 static void *jackCloseStream( void *ptr )
\r
2041 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2042 RtApiJack *object = (RtApiJack *) info->object;
\r
2044 object->closeStream();
\r
2046 pthread_exit( NULL );
\r
2048 static void jackShutdown( void *infoPointer )
\r
2050 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2051 RtApiJack *object = (RtApiJack *) info->object;
\r
2053 // Check current stream state. If stopped, then we'll assume this
\r
2054 // was called as a result of a call to RtApiJack::stopStream (the
\r
2055 // deactivation of a client handle causes this function to be called).
\r
2056 // If not, we'll assume the Jack server is shutting down or some
\r
2057 // other problem occurred and we should close the stream.
\r
2058 if ( object->isStreamRunning() == false ) return;
\r
2060 ThreadHandle threadId;
\r
2061 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2062 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2065 static int jackXrun( void *infoPointer )
\r
2067 JackHandle *handle = (JackHandle *) infoPointer;
\r
2069 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2070 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2075 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2076 unsigned int firstChannel, unsigned int sampleRate,
\r
2077 RtAudioFormat format, unsigned int *bufferSize,
\r
2078 RtAudio::StreamOptions *options )
\r
2080 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2082 // Look for jack server and try to become a client (only do once per stream).
\r
2083 jack_client_t *client = 0;
\r
2084 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2085 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2086 jack_status_t *status = NULL;
\r
2087 if ( options && !options->streamName.empty() )
\r
2088 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2090 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2091 if ( client == 0 ) {
\r
2092 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2093 error( RtAudioError::WARNING );
\r
2098 // The handle must have been created on an earlier pass.
\r
2099 client = handle->client;
\r
2102 const char **ports;
\r
2103 std::string port, previousPort, deviceName;
\r
2104 unsigned int nPorts = 0, nDevices = 0;
\r
2105 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2107 // Parse the port names up to the first colon (:).
\r
2108 size_t iColon = 0;
\r
2110 port = (char *) ports[ nPorts ];
\r
2111 iColon = port.find(":");
\r
2112 if ( iColon != std::string::npos ) {
\r
2113 port = port.substr( 0, iColon );
\r
2114 if ( port != previousPort ) {
\r
2115 if ( nDevices == device ) deviceName = port;
\r
2117 previousPort = port;
\r
2120 } while ( ports[++nPorts] );
\r
2124 if ( device >= nDevices ) {
\r
2125 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2129 // Count the available ports containing the client name as device
\r
2130 // channels. Jack "input ports" equal RtAudio output channels.
\r
2131 unsigned int nChannels = 0;
\r
2132 unsigned long flag = JackPortIsInput;
\r
2133 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2134 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2136 while ( ports[ nChannels ] ) nChannels++;
\r
2140 // Compare the jack ports for specified client to the requested number of channels.
\r
2141 if ( nChannels < (channels + firstChannel) ) {
\r
2142 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2143 errorText_ = errorStream_.str();
\r
2147 // Check the jack server sample rate.
\r
2148 unsigned int jackRate = jack_get_sample_rate( client );
\r
2149 if ( sampleRate != jackRate ) {
\r
2150 jack_client_close( client );
\r
2151 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2152 errorText_ = errorStream_.str();
\r
2155 stream_.sampleRate = jackRate;
\r
2157 // Get the latency of the JACK port.
\r
2158 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2159 if ( ports[ firstChannel ] ) {
\r
2160 // Added by Ge Wang
\r
2161 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2162 // the range (usually the min and max are equal)
\r
2163 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2164 // get the latency range
\r
2165 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2166 // be optimistic, use the min!
\r
2167 stream_.latency[mode] = latrange.min;
\r
2168 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2172 // The jack server always uses 32-bit floating-point data.
\r
2173 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2174 stream_.userFormat = format;
\r
2176 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2177 else stream_.userInterleaved = true;
\r
2179 // Jack always uses non-interleaved buffers.
\r
2180 stream_.deviceInterleaved[mode] = false;
\r
2182 // Jack always provides host byte-ordered data.
\r
2183 stream_.doByteSwap[mode] = false;
\r
2185 // Get the buffer size. The buffer size and number of buffers
\r
2186 // (periods) is set when the jack server is started.
\r
2187 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2188 *bufferSize = stream_.bufferSize;
\r
2190 stream_.nDeviceChannels[mode] = channels;
\r
2191 stream_.nUserChannels[mode] = channels;
\r
2193 // Set flags for buffer conversion.
\r
2194 stream_.doConvertBuffer[mode] = false;
\r
2195 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2196 stream_.doConvertBuffer[mode] = true;
\r
2197 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2198 stream_.nUserChannels[mode] > 1 )
\r
2199 stream_.doConvertBuffer[mode] = true;
\r
2201 // Allocate our JackHandle structure for the stream.
\r
2202 if ( handle == 0 ) {
\r
2204 handle = new JackHandle;
\r
2206 catch ( std::bad_alloc& ) {
\r
2207 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2211 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2212 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2215 stream_.apiHandle = (void *) handle;
\r
2216 handle->client = client;
\r
2218 handle->deviceName[mode] = deviceName;
\r
2220 // Allocate necessary internal buffers.
\r
2221 unsigned long bufferBytes;
\r
2222 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2223 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2224 if ( stream_.userBuffer[mode] == NULL ) {
\r
2225 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2229 if ( stream_.doConvertBuffer[mode] ) {
\r
2231 bool makeBuffer = true;
\r
2232 if ( mode == OUTPUT )
\r
2233 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2234 else { // mode == INPUT
\r
2235 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2236 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2237 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2238 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2242 if ( makeBuffer ) {
\r
2243 bufferBytes *= *bufferSize;
\r
2244 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2245 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2246 if ( stream_.deviceBuffer == NULL ) {
\r
2247 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2253 // Allocate memory for the Jack ports (channels) identifiers.
\r
2254 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2255 if ( handle->ports[mode] == NULL ) {
\r
2256 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2260 stream_.device[mode] = device;
\r
2261 stream_.channelOffset[mode] = firstChannel;
\r
2262 stream_.state = STREAM_STOPPED;
\r
2263 stream_.callbackInfo.object = (void *) this;
\r
2265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2266 // We had already set up the stream for output.
\r
2267 stream_.mode = DUPLEX;
\r
2269 stream_.mode = mode;
\r
2270 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2271 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2272 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2275 // Register our ports.
\r
2277 if ( mode == OUTPUT ) {
\r
2278 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2279 snprintf( label, 64, "outport %d", i );
\r
2280 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2281 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2285 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2286 snprintf( label, 64, "inport %d", i );
\r
2287 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2288 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2292 // Setup the buffer conversion information structure. We don't use
\r
2293 // buffers to do channel offsets, so we override that parameter
\r
2295 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2301 pthread_cond_destroy( &handle->condition );
\r
2302 jack_client_close( handle->client );
\r
2304 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2305 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2308 stream_.apiHandle = 0;
\r
2311 for ( int i=0; i<2; i++ ) {
\r
2312 if ( stream_.userBuffer[i] ) {
\r
2313 free( stream_.userBuffer[i] );
\r
2314 stream_.userBuffer[i] = 0;
\r
2318 if ( stream_.deviceBuffer ) {
\r
2319 free( stream_.deviceBuffer );
\r
2320 stream_.deviceBuffer = 0;
\r
2326 void RtApiJack :: closeStream( void )
\r
2328 if ( stream_.state == STREAM_CLOSED ) {
\r
2329 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2330 error( RtAudioError::WARNING );
\r
2334 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2337 if ( stream_.state == STREAM_RUNNING )
\r
2338 jack_deactivate( handle->client );
\r
2340 jack_client_close( handle->client );
\r
2344 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2345 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2346 pthread_cond_destroy( &handle->condition );
\r
2348 stream_.apiHandle = 0;
\r
2351 for ( int i=0; i<2; i++ ) {
\r
2352 if ( stream_.userBuffer[i] ) {
\r
2353 free( stream_.userBuffer[i] );
\r
2354 stream_.userBuffer[i] = 0;
\r
2358 if ( stream_.deviceBuffer ) {
\r
2359 free( stream_.deviceBuffer );
\r
2360 stream_.deviceBuffer = 0;
\r
2363 stream_.mode = UNINITIALIZED;
\r
2364 stream_.state = STREAM_CLOSED;
\r
2367 void RtApiJack :: startStream( void )
\r
2370 if ( stream_.state == STREAM_RUNNING ) {
\r
2371 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2372 error( RtAudioError::WARNING );
\r
2376 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2377 int result = jack_activate( handle->client );
\r
2379 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2383 const char **ports;
\r
2385 // Get the list of available ports.
\r
2386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2388 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2389 if ( ports == NULL) {
\r
2390 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2394 // Now make the port connections. Since RtAudio wasn't designed to
\r
2395 // allow the user to select particular channels of a device, we'll
\r
2396 // just open the first "nChannels" ports with offset.
\r
2397 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2399 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2400 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2403 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2410 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2412 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2413 if ( ports == NULL) {
\r
2414 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2418 // Now make the port connections. See note above.
\r
2419 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2421 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2422 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2425 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2432 handle->drainCounter = 0;
\r
2433 handle->internalDrain = false;
\r
2434 stream_.state = STREAM_RUNNING;
\r
2437 if ( result == 0 ) return;
\r
2438 error( RtAudioError::SYSTEM_ERROR );
\r
2441 void RtApiJack :: stopStream( void )
\r
2444 if ( stream_.state == STREAM_STOPPED ) {
\r
2445 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2446 error( RtAudioError::WARNING );
\r
2450 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2453 if ( handle->drainCounter == 0 ) {
\r
2454 handle->drainCounter = 2;
\r
2455 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2459 jack_deactivate( handle->client );
\r
2460 stream_.state = STREAM_STOPPED;
\r
2463 void RtApiJack :: abortStream( void )
\r
2466 if ( stream_.state == STREAM_STOPPED ) {
\r
2467 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2468 error( RtAudioError::WARNING );
\r
2472 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2473 handle->drainCounter = 2;
\r
2478 // This function will be called by a spawned thread when the user
\r
2479 // callback function signals that the stream should be stopped or
\r
2480 // aborted. It is necessary to handle it this way because the
\r
2481 // callbackEvent() function must return before the jack_deactivate()
\r
2482 // function will return.
\r
2483 static void *jackStopStream( void *ptr )
\r
2485 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2486 RtApiJack *object = (RtApiJack *) info->object;
\r
2488 object->stopStream();
\r
2489 pthread_exit( NULL );
\r
2492 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2494 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2495 if ( stream_.state == STREAM_CLOSED ) {
\r
2496 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2497 error( RtAudioError::WARNING );
\r
2500 if ( stream_.bufferSize != nframes ) {
\r
2501 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2502 error( RtAudioError::WARNING );
\r
2506 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2507 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 // Check if we were draining the stream and signal is finished.
\r
2510 if ( handle->drainCounter > 3 ) {
\r
2511 ThreadHandle threadId;
\r
2513 stream_.state = STREAM_STOPPING;
\r
2514 if ( handle->internalDrain == true )
\r
2515 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2517 pthread_cond_signal( &handle->condition );
\r
2521 // Invoke user callback first, to get fresh output data.
\r
2522 if ( handle->drainCounter == 0 ) {
\r
2523 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2524 double streamTime = getStreamTime();
\r
2525 RtAudioStreamStatus status = 0;
\r
2526 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2527 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2528 handle->xrun[0] = false;
\r
2530 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2531 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2532 handle->xrun[1] = false;
\r
2534 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2535 stream_.bufferSize, streamTime, status, info->userData );
\r
2536 if ( cbReturnValue == 2 ) {
\r
2537 stream_.state = STREAM_STOPPING;
\r
2538 handle->drainCounter = 2;
\r
2540 pthread_create( &id, NULL, jackStopStream, info );
\r
2543 else if ( cbReturnValue == 1 ) {
\r
2544 handle->drainCounter = 1;
\r
2545 handle->internalDrain = true;
\r
2549 jack_default_audio_sample_t *jackbuffer;
\r
2550 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2551 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2553 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2555 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2556 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2557 memset( jackbuffer, 0, bufferBytes );
\r
2561 else if ( stream_.doConvertBuffer[0] ) {
\r
2563 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2565 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2566 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2567 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2570 else { // no buffer conversion
\r
2571 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2572 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2573 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2578 // Don't bother draining input
\r
2579 if ( handle->drainCounter ) {
\r
2580 handle->drainCounter++;
\r
2584 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2586 if ( stream_.doConvertBuffer[1] ) {
\r
2587 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2588 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2589 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2591 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2593 else { // no buffer conversion
\r
2594 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2595 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2596 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2602 RtApi::tickStreamTime();
\r
2605 //******************** End of __UNIX_JACK__ *********************//
\r
2608 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2610 // The ASIO API is designed around a callback scheme, so this
\r
2611 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2612 // Jack. The primary constraint with ASIO is that it only allows
\r
2613 // access to a single driver at a time. Thus, it is not possible to
\r
2614 // have more than one simultaneous RtAudio stream.
\r
2616 // This implementation also requires a number of external ASIO files
\r
2617 // and a few global variables. The ASIO callback scheme does not
\r
2618 // allow for the passing of user data, so we must create a global
\r
2619 // pointer to our callbackInfo structure.
\r
2621 // On unix systems, we make use of a pthread condition variable.
\r
2622 // Since there is no equivalent in Windows, I hacked something based
\r
2623 // on information found in
\r
2624 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2626 #include "asiosys.h"
\r
2628 #include "iasiothiscallresolver.h"
\r
2629 #include "asiodrivers.h"
\r
2632 static AsioDrivers drivers;
\r
2633 static ASIOCallbacks asioCallbacks;
\r
2634 static ASIODriverInfo driverInfo;
\r
2635 static CallbackInfo *asioCallbackInfo;
\r
2636 static bool asioXRun;
\r
2638 struct AsioHandle {
\r
2639 int drainCounter; // Tracks callback counts when draining
\r
2640 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2641 ASIOBufferInfo *bufferInfos;
\r
2645 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2648 // Function declarations (definitions at end of section)
\r
2649 static const char* getAsioErrorString( ASIOError result );
\r
2650 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2651 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2653 RtApiAsio :: RtApiAsio()
\r
2655 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2656 // CoInitialize beforehand, but it must be for appartment threading
\r
2657 // (in which case, CoInitilialize will return S_FALSE here).
\r
2658 coInitialized_ = false;
\r
2659 HRESULT hr = CoInitialize( NULL );
\r
2660 if ( FAILED(hr) ) {
\r
2661 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2662 error( RtAudioError::WARNING );
\r
2664 coInitialized_ = true;
\r
2666 drivers.removeCurrentDriver();
\r
2667 driverInfo.asioVersion = 2;
\r
2669 // See note in DirectSound implementation about GetDesktopWindow().
\r
2670 driverInfo.sysRef = GetForegroundWindow();
\r
2673 RtApiAsio :: ~RtApiAsio()
\r
2675 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2676 if ( coInitialized_ ) CoUninitialize();
\r
2679 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2681 return (unsigned int) drivers.asioGetNumDev();
\r
2684 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2686 RtAudio::DeviceInfo info;
\r
2687 info.probed = false;
\r
2690 unsigned int nDevices = getDeviceCount();
\r
2691 if ( nDevices == 0 ) {
\r
2692 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2693 error( RtAudioError::INVALID_USE );
\r
2697 if ( device >= nDevices ) {
\r
2698 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2699 error( RtAudioError::INVALID_USE );
\r
2703 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2704 if ( stream_.state != STREAM_CLOSED ) {
\r
2705 if ( device >= devices_.size() ) {
\r
2706 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2707 error( RtAudioError::WARNING );
\r
2710 return devices_[ device ];
\r
2713 char driverName[32];
\r
2714 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2715 if ( result != ASE_OK ) {
\r
2716 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2717 errorText_ = errorStream_.str();
\r
2718 error( RtAudioError::WARNING );
\r
2722 info.name = driverName;
\r
2724 if ( !drivers.loadDriver( driverName ) ) {
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtAudioError::WARNING );
\r
2731 result = ASIOInit( &driverInfo );
\r
2732 if ( result != ASE_OK ) {
\r
2733 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2734 errorText_ = errorStream_.str();
\r
2735 error( RtAudioError::WARNING );
\r
2739 // Determine the device channel information.
\r
2740 long inputChannels, outputChannels;
\r
2741 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2742 if ( result != ASE_OK ) {
\r
2743 drivers.removeCurrentDriver();
\r
2744 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2745 errorText_ = errorStream_.str();
\r
2746 error( RtAudioError::WARNING );
\r
2750 info.outputChannels = outputChannels;
\r
2751 info.inputChannels = inputChannels;
\r
2752 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2753 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2755 // Determine the supported sample rates.
\r
2756 info.sampleRates.clear();
\r
2757 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2758 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2759 if ( result == ASE_OK )
\r
2760 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2763 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2764 ASIOChannelInfo channelInfo;
\r
2765 channelInfo.channel = 0;
\r
2766 channelInfo.isInput = true;
\r
2767 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2768 result = ASIOGetChannelInfo( &channelInfo );
\r
2769 if ( result != ASE_OK ) {
\r
2770 drivers.removeCurrentDriver();
\r
2771 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2772 errorText_ = errorStream_.str();
\r
2773 error( RtAudioError::WARNING );
\r
2777 info.nativeFormats = 0;
\r
2778 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT16;
\r
2780 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_SINT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2784 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2785 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2786 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2787 info.nativeFormats |= RTAUDIO_SINT24;
\r
2789 if ( info.outputChannels > 0 )
\r
2790 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2791 if ( info.inputChannels > 0 )
\r
2792 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2794 info.probed = true;
\r
2795 drivers.removeCurrentDriver();
\r
2799 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2801 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2802 object->callbackEvent( index );
\r
2805 void RtApiAsio :: saveDeviceInfo( void )
\r
2809 unsigned int nDevices = getDeviceCount();
\r
2810 devices_.resize( nDevices );
\r
2811 for ( unsigned int i=0; i<nDevices; i++ )
\r
2812 devices_[i] = getDeviceInfo( i );
\r
2815 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2816 unsigned int firstChannel, unsigned int sampleRate,
\r
2817 RtAudioFormat format, unsigned int *bufferSize,
\r
2818 RtAudio::StreamOptions *options )
\r
2820 // For ASIO, a duplex stream MUST use the same driver.
\r
2821 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2822 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2826 char driverName[32];
\r
2827 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2828 if ( result != ASE_OK ) {
\r
2829 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2830 errorText_ = errorStream_.str();
\r
2834 // Only load the driver once for duplex stream.
\r
2835 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2836 // The getDeviceInfo() function will not work when a stream is open
\r
2837 // because ASIO does not allow multiple devices to run at the same
\r
2838 // time. Thus, we'll probe the system before opening a stream and
\r
2839 // save the results for use by getDeviceInfo().
\r
2840 this->saveDeviceInfo();
\r
2842 if ( !drivers.loadDriver( driverName ) ) {
\r
2843 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2844 errorText_ = errorStream_.str();
\r
2848 result = ASIOInit( &driverInfo );
\r
2849 if ( result != ASE_OK ) {
\r
2850 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2851 errorText_ = errorStream_.str();
\r
2856 // Check the device channel count.
\r
2857 long inputChannels, outputChannels;
\r
2858 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2859 if ( result != ASE_OK ) {
\r
2860 drivers.removeCurrentDriver();
\r
2861 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2862 errorText_ = errorStream_.str();
\r
2866 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2867 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2870 errorText_ = errorStream_.str();
\r
2873 stream_.nDeviceChannels[mode] = channels;
\r
2874 stream_.nUserChannels[mode] = channels;
\r
2875 stream_.channelOffset[mode] = firstChannel;
\r
2877 // Verify the sample rate is supported.
\r
2878 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2879 if ( result != ASE_OK ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Get the current sample rate
\r
2887 ASIOSampleRate currentRate;
\r
2888 result = ASIOGetSampleRate( ¤tRate );
\r
2889 if ( result != ASE_OK ) {
\r
2890 drivers.removeCurrentDriver();
\r
2891 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2892 errorText_ = errorStream_.str();
\r
2896 // Set the sample rate only if necessary
\r
2897 if ( currentRate != sampleRate ) {
\r
2898 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2899 if ( result != ASE_OK ) {
\r
2900 drivers.removeCurrentDriver();
\r
2901 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2902 errorText_ = errorStream_.str();
\r
2907 // Determine the driver data type.
\r
2908 ASIOChannelInfo channelInfo;
\r
2909 channelInfo.channel = 0;
\r
2910 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2911 else channelInfo.isInput = true;
\r
2912 result = ASIOGetChannelInfo( &channelInfo );
\r
2913 if ( result != ASE_OK ) {
\r
2914 drivers.removeCurrentDriver();
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2916 errorText_ = errorStream_.str();
\r
2920 // Assuming WINDOWS host is always little-endian.
\r
2921 stream_.doByteSwap[mode] = false;
\r
2922 stream_.userFormat = format;
\r
2923 stream_.deviceFormat[mode] = 0;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2925 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2926 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2928 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2930 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2932 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2933 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2934 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2936 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2937 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2938 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2940 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2941 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2942 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2945 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2946 drivers.removeCurrentDriver();
\r
2947 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2948 errorText_ = errorStream_.str();
\r
2952 // Set the buffer size. For a duplex stream, this will end up
\r
2953 // setting the buffer size based on the input constraints, which
\r
2955 long minSize, maxSize, preferSize, granularity;
\r
2956 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2957 if ( result != ASE_OK ) {
\r
2958 drivers.removeCurrentDriver();
\r
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2960 errorText_ = errorStream_.str();
\r
2964 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2965 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2966 else if ( granularity == -1 ) {
\r
2967 // Make sure bufferSize is a power of two.
\r
2968 int log2_of_min_size = 0;
\r
2969 int log2_of_max_size = 0;
\r
2971 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2972 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2973 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2976 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2977 int min_delta_num = log2_of_min_size;
\r
2979 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2980 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2981 if (current_delta < min_delta) {
\r
2982 min_delta = current_delta;
\r
2983 min_delta_num = i;
\r
2987 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2988 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2989 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2991 else if ( granularity != 0 ) {
\r
2992 // Set to an even multiple of granularity, rounding up.
\r
2993 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2996 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2997 drivers.removeCurrentDriver();
\r
2998 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3002 stream_.bufferSize = *bufferSize;
\r
3003 stream_.nBuffers = 2;
\r
3005 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3006 else stream_.userInterleaved = true;
\r
3008 // ASIO always uses non-interleaved buffers.
\r
3009 stream_.deviceInterleaved[mode] = false;
\r
3011 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3012 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3013 if ( handle == 0 ) {
\r
3015 handle = new AsioHandle;
\r
3017 catch ( std::bad_alloc& ) {
\r
3018 //if ( handle == NULL ) {
\r
3019 drivers.removeCurrentDriver();
\r
3020 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3023 handle->bufferInfos = 0;
\r
3025 // Create a manual-reset event.
\r
3026 handle->condition = CreateEvent( NULL, // no security
\r
3027 TRUE, // manual-reset
\r
3028 FALSE, // non-signaled initially
\r
3029 NULL ); // unnamed
\r
3030 stream_.apiHandle = (void *) handle;
\r
3033 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3034 // and output separately, we'll have to dispose of previously
\r
3035 // created output buffers for a duplex stream.
\r
3036 long inputLatency, outputLatency;
\r
3037 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3038 ASIODisposeBuffers();
\r
3039 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3042 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3043 bool buffersAllocated = false;
\r
3044 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3045 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3046 if ( handle->bufferInfos == NULL ) {
\r
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3048 errorText_ = errorStream_.str();
\r
3052 ASIOBufferInfo *infos;
\r
3053 infos = handle->bufferInfos;
\r
3054 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3055 infos->isInput = ASIOFalse;
\r
3056 infos->channelNum = i + stream_.channelOffset[0];
\r
3057 infos->buffers[0] = infos->buffers[1] = 0;
\r
3059 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3060 infos->isInput = ASIOTrue;
\r
3061 infos->channelNum = i + stream_.channelOffset[1];
\r
3062 infos->buffers[0] = infos->buffers[1] = 0;
\r
3065 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3066 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3067 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3068 asioCallbacks.asioMessage = &asioMessages;
\r
3069 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3070 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3071 if ( result != ASE_OK ) {
\r
3072 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3073 errorText_ = errorStream_.str();
\r
3076 buffersAllocated = true;
\r
3078 // Set flags for buffer conversion.
\r
3079 stream_.doConvertBuffer[mode] = false;
\r
3080 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3081 stream_.doConvertBuffer[mode] = true;
\r
3082 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3083 stream_.nUserChannels[mode] > 1 )
\r
3084 stream_.doConvertBuffer[mode] = true;
\r
3086 // Allocate necessary internal buffers
\r
3087 unsigned long bufferBytes;
\r
3088 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3089 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3090 if ( stream_.userBuffer[mode] == NULL ) {
\r
3091 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3095 if ( stream_.doConvertBuffer[mode] ) {
\r
3097 bool makeBuffer = true;
\r
3098 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3099 if ( mode == INPUT ) {
\r
3100 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3101 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3102 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3106 if ( makeBuffer ) {
\r
3107 bufferBytes *= *bufferSize;
\r
3108 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3109 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3110 if ( stream_.deviceBuffer == NULL ) {
\r
3111 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3117 stream_.sampleRate = sampleRate;
\r
3118 stream_.device[mode] = device;
\r
3119 stream_.state = STREAM_STOPPED;
\r
3120 asioCallbackInfo = &stream_.callbackInfo;
\r
3121 stream_.callbackInfo.object = (void *) this;
\r
3122 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3123 // We had already set up an output stream.
\r
3124 stream_.mode = DUPLEX;
\r
3126 stream_.mode = mode;
\r
3128 // Determine device latencies
\r
3129 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3130 if ( result != ASE_OK ) {
\r
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3132 errorText_ = errorStream_.str();
\r
3133 error( RtAudioError::WARNING); // warn but don't fail
\r
3136 stream_.latency[0] = outputLatency;
\r
3137 stream_.latency[1] = inputLatency;
\r
3140 // Setup the buffer conversion information structure. We don't use
\r
3141 // buffers to do channel offsets, so we override that parameter
\r
3143 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3148 if ( buffersAllocated )
\r
3149 ASIODisposeBuffers();
\r
3150 drivers.removeCurrentDriver();
\r
3153 CloseHandle( handle->condition );
\r
3154 if ( handle->bufferInfos )
\r
3155 free( handle->bufferInfos );
\r
3157 stream_.apiHandle = 0;
\r
3160 for ( int i=0; i<2; i++ ) {
\r
3161 if ( stream_.userBuffer[i] ) {
\r
3162 free( stream_.userBuffer[i] );
\r
3163 stream_.userBuffer[i] = 0;
\r
3167 if ( stream_.deviceBuffer ) {
\r
3168 free( stream_.deviceBuffer );
\r
3169 stream_.deviceBuffer = 0;
\r
3175 void RtApiAsio :: closeStream()
\r
3177 if ( stream_.state == STREAM_CLOSED ) {
\r
3178 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3179 error( RtAudioError::WARNING );
\r
3183 if ( stream_.state == STREAM_RUNNING ) {
\r
3184 stream_.state = STREAM_STOPPED;
\r
3187 ASIODisposeBuffers();
\r
3188 drivers.removeCurrentDriver();
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3192 CloseHandle( handle->condition );
\r
3193 if ( handle->bufferInfos )
\r
3194 free( handle->bufferInfos );
\r
3196 stream_.apiHandle = 0;
\r
3199 for ( int i=0; i<2; i++ ) {
\r
3200 if ( stream_.userBuffer[i] ) {
\r
3201 free( stream_.userBuffer[i] );
\r
3202 stream_.userBuffer[i] = 0;
\r
3206 if ( stream_.deviceBuffer ) {
\r
3207 free( stream_.deviceBuffer );
\r
3208 stream_.deviceBuffer = 0;
\r
3211 stream_.mode = UNINITIALIZED;
\r
3212 stream_.state = STREAM_CLOSED;
\r
3215 bool stopThreadCalled = false;
\r
3217 void RtApiAsio :: startStream()
\r
3220 if ( stream_.state == STREAM_RUNNING ) {
\r
3221 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3222 error( RtAudioError::WARNING );
\r
3226 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3227 ASIOError result = ASIOStart();
\r
3228 if ( result != ASE_OK ) {
\r
3229 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3230 errorText_ = errorStream_.str();
\r
3234 handle->drainCounter = 0;
\r
3235 handle->internalDrain = false;
\r
3236 ResetEvent( handle->condition );
\r
3237 stream_.state = STREAM_RUNNING;
\r
3241 stopThreadCalled = false;
\r
3243 if ( result == ASE_OK ) return;
\r
3244 error( RtAudioError::SYSTEM_ERROR );
\r
3247 void RtApiAsio :: stopStream()
\r
3250 if ( stream_.state == STREAM_STOPPED ) {
\r
3251 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3252 error( RtAudioError::WARNING );
\r
3256 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3258 if ( handle->drainCounter == 0 ) {
\r
3259 handle->drainCounter = 2;
\r
3260 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3264 stream_.state = STREAM_STOPPED;
\r
3266 ASIOError result = ASIOStop();
\r
3267 if ( result != ASE_OK ) {
\r
3268 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3269 errorText_ = errorStream_.str();
\r
3272 if ( result == ASE_OK ) return;
\r
3273 error( RtAudioError::SYSTEM_ERROR );
\r
3276 void RtApiAsio :: abortStream()
\r
3279 if ( stream_.state == STREAM_STOPPED ) {
\r
3280 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3281 error( RtAudioError::WARNING );
\r
3285 // The following lines were commented-out because some behavior was
\r
3286 // noted where the device buffers need to be zeroed to avoid
\r
3287 // continuing sound, even when the device buffers are completely
\r
3288 // disposed. So now, calling abort is the same as calling stop.
\r
3289 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3290 // handle->drainCounter = 2;
\r
3294 // This function will be called by a spawned thread when the user
\r
3295 // callback function signals that the stream should be stopped or
\r
3296 // aborted. It is necessary to handle it this way because the
\r
3297 // callbackEvent() function must return before the ASIOStop()
\r
3298 // function will return.
\r
3299 static unsigned __stdcall asioStopStream( void *ptr )
\r
3301 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3302 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3304 object->stopStream();
\r
3305 _endthreadex( 0 );
\r
3309 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3311 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3312 if ( stream_.state == STREAM_CLOSED ) {
\r
3313 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3314 error( RtAudioError::WARNING );
\r
3318 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3321 // Check if we were draining the stream and signal if finished.
\r
3322 if ( handle->drainCounter > 3 ) {
\r
3324 stream_.state = STREAM_STOPPING;
\r
3325 if ( handle->internalDrain == false )
\r
3326 SetEvent( handle->condition );
\r
3327 else { // spawn a thread to stop the stream
\r
3328 unsigned threadId;
\r
3329 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3330 &stream_.callbackInfo, 0, &threadId );
\r
3335 // Invoke user callback to get fresh output data UNLESS we are
\r
3336 // draining stream.
\r
3337 if ( handle->drainCounter == 0 ) {
\r
3338 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3339 double streamTime = getStreamTime();
\r
3340 RtAudioStreamStatus status = 0;
\r
3341 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3342 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3345 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3346 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3349 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3350 stream_.bufferSize, streamTime, status, info->userData );
\r
3351 if ( cbReturnValue == 2 ) {
\r
3352 stream_.state = STREAM_STOPPING;
\r
3353 handle->drainCounter = 2;
\r
3354 unsigned threadId;
\r
3355 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3356 &stream_.callbackInfo, 0, &threadId );
\r
3359 else if ( cbReturnValue == 1 ) {
\r
3360 handle->drainCounter = 1;
\r
3361 handle->internalDrain = true;
\r
3365 unsigned int nChannels, bufferBytes, i, j;
\r
3366 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3369 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3371 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3373 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3374 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3375 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3379 else if ( stream_.doConvertBuffer[0] ) {
\r
3381 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3382 if ( stream_.doByteSwap[0] )
\r
3383 byteSwapBuffer( stream_.deviceBuffer,
\r
3384 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3385 stream_.deviceFormat[0] );
\r
3387 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3388 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3389 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3390 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3396 if ( stream_.doByteSwap[0] )
\r
3397 byteSwapBuffer( stream_.userBuffer[0],
\r
3398 stream_.bufferSize * stream_.nUserChannels[0],
\r
3399 stream_.userFormat );
\r
3401 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3402 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3403 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3404 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3410 // Don't bother draining input
\r
3411 if ( handle->drainCounter ) {
\r
3412 handle->drainCounter++;
\r
3416 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3418 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3420 if (stream_.doConvertBuffer[1]) {
\r
3422 // Always interleave ASIO input data.
\r
3423 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3424 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3425 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3426 handle->bufferInfos[i].buffers[bufferIndex],
\r
3430 if ( stream_.doByteSwap[1] )
\r
3431 byteSwapBuffer( stream_.deviceBuffer,
\r
3432 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3433 stream_.deviceFormat[1] );
\r
3434 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3438 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3439 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3440 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3441 handle->bufferInfos[i].buffers[bufferIndex],
\r
3446 if ( stream_.doByteSwap[1] )
\r
3447 byteSwapBuffer( stream_.userBuffer[1],
\r
3448 stream_.bufferSize * stream_.nUserChannels[1],
\r
3449 stream_.userFormat );
\r
3454 // The following call was suggested by Malte Clasen. While the API
\r
3455 // documentation indicates it should not be required, some device
\r
3456 // drivers apparently do not function correctly without it.
\r
3457 ASIOOutputReady();
\r
3459 RtApi::tickStreamTime();
\r
3463 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3465 // The ASIO documentation says that this usually only happens during
\r
3466 // external sync. Audio processing is not stopped by the driver,
\r
3467 // actual sample rate might not have even changed, maybe only the
\r
3468 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3471 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3473 object->stopStream();
\r
3475 catch ( RtAudioError &exception ) {
\r
3476 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3480 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3483 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3487 switch( selector ) {
\r
3488 case kAsioSelectorSupported:
\r
3489 if ( value == kAsioResetRequest
\r
3490 || value == kAsioEngineVersion
\r
3491 || value == kAsioResyncRequest
\r
3492 || value == kAsioLatenciesChanged
\r
3493 // The following three were added for ASIO 2.0, you don't
\r
3494 // necessarily have to support them.
\r
3495 || value == kAsioSupportsTimeInfo
\r
3496 || value == kAsioSupportsTimeCode
\r
3497 || value == kAsioSupportsInputMonitor)
\r
3500 case kAsioResetRequest:
\r
3501 // Defer the task and perform the reset of the driver during the
\r
3502 // next "safe" situation. You cannot reset the driver right now,
\r
3503 // as this code is called from the driver. Reset the driver is
\r
3504 // done by completely destruct is. I.e. ASIOStop(),
\r
3505 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3507 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3510 case kAsioResyncRequest:
\r
3511 // This informs the application that the driver encountered some
\r
3512 // non-fatal data loss. It is used for synchronization purposes
\r
3513 // of different media. Added mainly to work around the Win16Mutex
\r
3514 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3515 // which could lose data because the Mutex was held too long by
\r
3516 // another thread. However a driver can issue it in other
\r
3517 // situations, too.
\r
3518 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3522 case kAsioLatenciesChanged:
\r
3523 // This will inform the host application that the drivers were
\r
3524 // latencies changed. Beware, it this does not mean that the
\r
3525 // buffer sizes have changed! You might need to update internal
\r
3527 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3530 case kAsioEngineVersion:
\r
3531 // Return the supported ASIO version of the host application. If
\r
3532 // a host application does not implement this selector, ASIO 1.0
\r
3533 // is assumed by the driver.
\r
3536 case kAsioSupportsTimeInfo:
\r
3537 // Informs the driver whether the
\r
3538 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3539 // For compatibility with ASIO 1.0 drivers the host application
\r
3540 // should always support the "old" bufferSwitch method, too.
\r
3543 case kAsioSupportsTimeCode:
\r
3544 // Informs the driver whether application is interested in time
\r
3545 // code info. If an application does not need to know about time
\r
3546 // code, the driver has less work to do.
\r
3553 static const char* getAsioErrorString( ASIOError result )
\r
3558 const char*message;
\r
3561 static const Messages m[] =
\r
3563 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3564 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3565 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3566 { ASE_InvalidMode, "Invalid mode." },
\r
3567 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3568 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3569 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3572 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3573 if ( m[i].value == result ) return m[i].message;
\r
3575 return "Unknown error.";
\r
3578 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3582 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3584 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3585 // - Introduces support for the Windows WASAPI API
\r
3586 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3587 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3588 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3593 #include <audioclient.h>
\r
3595 #include <mmdeviceapi.h>
\r
3596 #include <functiondiscoverykeys_devpkey.h>
\r
3598 //=============================================================================
\r
3600 #define SAFE_RELEASE( objectPtr )\
\r
3603 objectPtr->Release();\
\r
3604 objectPtr = NULL;\
\r
3607 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3609 //-----------------------------------------------------------------------------
\r
3611 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3612 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3613 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3614 // provide intermediate storage for read / write synchronization.
\r
3615 class WasapiBuffer
\r
3619 : buffer_( NULL ),
\r
3628 // sets the length of the internal ring buffer
\r
3629 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3632 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3634 bufferSize_ = bufferSize;
\r
3639 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3640 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3642 if ( !buffer || // incoming buffer is NULL
\r
3643 bufferSize == 0 || // incoming buffer has no data
\r
3644 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3649 unsigned int relOutIndex = outIndex_;
\r
3650 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3651 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3652 relOutIndex += bufferSize_;
\r
3655 // "in" index can end on the "out" index but cannot begin at it
\r
3656 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3657 return false; // not enough space between "in" index and "out" index
\r
3660 // copy buffer from external to internal
\r
3661 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3662 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3663 int fromInSize = bufferSize - fromZeroSize;
\r
3667 case RTAUDIO_SINT8:
\r
3668 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3669 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3671 case RTAUDIO_SINT16:
\r
3672 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3673 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3675 case RTAUDIO_SINT24:
\r
3676 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3677 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3679 case RTAUDIO_SINT32:
\r
3680 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3681 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3683 case RTAUDIO_FLOAT32:
\r
3684 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3685 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3687 case RTAUDIO_FLOAT64:
\r
3688 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3689 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3693 // update "in" index
\r
3694 inIndex_ += bufferSize;
\r
3695 inIndex_ %= bufferSize_;
\r
3700 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3701 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3703 if ( !buffer || // incoming buffer is NULL
\r
3704 bufferSize == 0 || // incoming buffer has no data
\r
3705 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3710 unsigned int relInIndex = inIndex_;
\r
3711 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3712 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3713 relInIndex += bufferSize_;
\r
3716 // "out" index can begin at and end on the "in" index
\r
3717 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3718 return false; // not enough space between "out" index and "in" index
\r
3721 // copy buffer from internal to external
\r
3722 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3723 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3724 int fromOutSize = bufferSize - fromZeroSize;
\r
3728 case RTAUDIO_SINT8:
\r
3729 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3730 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3732 case RTAUDIO_SINT16:
\r
3733 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3734 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3736 case RTAUDIO_SINT24:
\r
3737 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3738 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3740 case RTAUDIO_SINT32:
\r
3741 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3742 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3744 case RTAUDIO_FLOAT32:
\r
3745 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3746 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3748 case RTAUDIO_FLOAT64:
\r
3749 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3750 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3754 // update "out" index
\r
3755 outIndex_ += bufferSize;
\r
3756 outIndex_ %= bufferSize_;
\r
3763 unsigned int bufferSize_;
\r
3764 unsigned int inIndex_;
\r
3765 unsigned int outIndex_;
\r
3768 //-----------------------------------------------------------------------------
\r
3770 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3771 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3772 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3773 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3774 // one rate and its multiple.
\r
3775 void convertBufferWasapi( char* outBuffer,
\r
3776 const char* inBuffer,
\r
3777 const unsigned int& channelCount,
\r
3778 const unsigned int& inSampleRate,
\r
3779 const unsigned int& outSampleRate,
\r
3780 const unsigned int& inSampleCount,
\r
3781 unsigned int& outSampleCount,
\r
3782 const RtAudioFormat& format )
\r
3784 // calculate the new outSampleCount and relative sampleStep
\r
3785 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3786 float sampleStep = 1.0f / sampleRatio;
\r
3787 float inSampleFraction = 0.0f;
\r
3789 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3791 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3792 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3794 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3798 case RTAUDIO_SINT8:
\r
3799 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3801 case RTAUDIO_SINT16:
\r
3802 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3804 case RTAUDIO_SINT24:
\r
3805 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3807 case RTAUDIO_SINT32:
\r
3808 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3810 case RTAUDIO_FLOAT32:
\r
3811 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3813 case RTAUDIO_FLOAT64:
\r
3814 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3818 // jump to next in sample
\r
3819 inSampleFraction += sampleStep;
\r
3823 //-----------------------------------------------------------------------------
\r
3825 // A structure to hold various information related to the WASAPI implementation.
\r
3826 struct WasapiHandle
\r
3828 IAudioClient* captureAudioClient;
\r
3829 IAudioClient* renderAudioClient;
\r
3830 IAudioCaptureClient* captureClient;
\r
3831 IAudioRenderClient* renderClient;
\r
3832 HANDLE captureEvent;
\r
3833 HANDLE renderEvent;
\r
3836 : captureAudioClient( NULL ),
\r
3837 renderAudioClient( NULL ),
\r
3838 captureClient( NULL ),
\r
3839 renderClient( NULL ),
\r
3840 captureEvent( NULL ),
\r
3841 renderEvent( NULL ) {}
\r
3844 //=============================================================================
\r
3846 RtApiWasapi::RtApiWasapi()
\r
3847 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3849 // WASAPI can run either apartment or multi-threaded
\r
3850 HRESULT hr = CoInitialize( NULL );
\r
3852 if ( !FAILED( hr ) )
\r
3853 coInitialized_ = true;
\r
3855 // Instantiate device enumerator
\r
3856 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3857 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3858 ( void** ) &deviceEnumerator_ );
\r
3860 if ( FAILED( hr ) ) {
\r
3861 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3862 error( RtAudioError::DRIVER_ERROR );
\r
3866 //-----------------------------------------------------------------------------
\r
3868 RtApiWasapi::~RtApiWasapi()
\r
3870 // if this object previously called CoInitialize()
\r
3871 if ( coInitialized_ ) {
\r
3875 if ( stream_.state != STREAM_CLOSED ) {
\r
3879 SAFE_RELEASE( deviceEnumerator_ );
\r
3882 //=============================================================================
\r
3884 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3886 unsigned int captureDeviceCount = 0;
\r
3887 unsigned int renderDeviceCount = 0;
\r
3889 IMMDeviceCollection* captureDevices = NULL;
\r
3890 IMMDeviceCollection* renderDevices = NULL;
\r
3892 // Count capture devices
\r
3893 errorText_.clear();
\r
3894 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3895 if ( FAILED( hr ) ) {
\r
3896 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3900 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3901 if ( FAILED( hr ) ) {
\r
3902 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3906 // Count render devices
\r
3907 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3908 if ( FAILED( hr ) ) {
\r
3909 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3913 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3914 if ( FAILED( hr ) ) {
\r
3915 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3920 // release all references
\r
3921 SAFE_RELEASE( captureDevices );
\r
3922 SAFE_RELEASE( renderDevices );
\r
3924 if ( errorText_.empty() )
\r
3925 return captureDeviceCount + renderDeviceCount;
\r
3927 error( RtAudioError::DRIVER_ERROR );
\r
3931 //-----------------------------------------------------------------------------
\r
3933 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3935 RtAudio::DeviceInfo info;
\r
3936 unsigned int captureDeviceCount = 0;
\r
3937 unsigned int renderDeviceCount = 0;
\r
3938 std::wstring deviceName;
\r
3939 std::string defaultDeviceName;
\r
3940 bool isCaptureDevice = false;
\r
3942 PROPVARIANT deviceNameProp;
\r
3943 PROPVARIANT defaultDeviceNameProp;
\r
3945 IMMDeviceCollection* captureDevices = NULL;
\r
3946 IMMDeviceCollection* renderDevices = NULL;
\r
3947 IMMDevice* devicePtr = NULL;
\r
3948 IMMDevice* defaultDevicePtr = NULL;
\r
3949 IAudioClient* audioClient = NULL;
\r
3950 IPropertyStore* devicePropStore = NULL;
\r
3951 IPropertyStore* defaultDevicePropStore = NULL;
\r
3953 WAVEFORMATEX* deviceFormat = NULL;
\r
3954 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3957 info.probed = false;
\r
3959 // Count capture devices
\r
3960 errorText_.clear();
\r
3961 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3962 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3963 if ( FAILED( hr ) ) {
\r
3964 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3968 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3969 if ( FAILED( hr ) ) {
\r
3970 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3974 // Count render devices
\r
3975 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3976 if ( FAILED( hr ) ) {
\r
3977 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3981 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3982 if ( FAILED( hr ) ) {
\r
3983 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3987 // validate device index
\r
3988 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3989 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3990 errorType = RtAudioError::INVALID_USE;
\r
3994 // determine whether index falls within capture or render devices
\r
3995 if ( device >= renderDeviceCount ) {
\r
3996 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3997 if ( FAILED( hr ) ) {
\r
3998 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4001 isCaptureDevice = true;
\r
4004 hr = renderDevices->Item( device, &devicePtr );
\r
4005 if ( FAILED( hr ) ) {
\r
4006 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4009 isCaptureDevice = false;
\r
4012 // get default device name
\r
4013 if ( isCaptureDevice ) {
\r
4014 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4015 if ( FAILED( hr ) ) {
\r
4016 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4021 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4022 if ( FAILED( hr ) ) {
\r
4023 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4028 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4029 if ( FAILED( hr ) ) {
\r
4030 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4033 PropVariantInit( &defaultDeviceNameProp );
\r
4035 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4036 if ( FAILED( hr ) ) {
\r
4037 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4041 deviceName = defaultDeviceNameProp.pwszVal;
\r
4042 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4045 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4046 if ( FAILED( hr ) ) {
\r
4047 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4051 PropVariantInit( &deviceNameProp );
\r
4053 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4054 if ( FAILED( hr ) ) {
\r
4055 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4059 deviceName = deviceNameProp.pwszVal;
\r
4060 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4063 if ( isCaptureDevice ) {
\r
4064 info.isDefaultInput = info.name == defaultDeviceName;
\r
4065 info.isDefaultOutput = false;
\r
4068 info.isDefaultInput = false;
\r
4069 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4073 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4074 if ( FAILED( hr ) ) {
\r
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4079 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4080 if ( FAILED( hr ) ) {
\r
4081 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4085 if ( isCaptureDevice ) {
\r
4086 info.inputChannels = deviceFormat->nChannels;
\r
4087 info.outputChannels = 0;
\r
4088 info.duplexChannels = 0;
\r
4091 info.inputChannels = 0;
\r
4092 info.outputChannels = deviceFormat->nChannels;
\r
4093 info.duplexChannels = 0;
\r
4097 info.sampleRates.clear();
\r
4099 // allow support for all sample rates as we have a built-in sample rate converter
\r
4100 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4101 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4105 info.nativeFormats = 0;
\r
4107 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4108 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4109 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4111 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4112 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4114 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4115 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4118 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4119 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4120 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4122 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4123 info.nativeFormats |= RTAUDIO_SINT8;
\r
4125 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4126 info.nativeFormats |= RTAUDIO_SINT16;
\r
4128 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4129 info.nativeFormats |= RTAUDIO_SINT24;
\r
4131 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4132 info.nativeFormats |= RTAUDIO_SINT32;
\r
4137 info.probed = true;
\r
4140 // release all references
\r
4141 PropVariantClear( &deviceNameProp );
\r
4142 PropVariantClear( &defaultDeviceNameProp );
\r
4144 SAFE_RELEASE( captureDevices );
\r
4145 SAFE_RELEASE( renderDevices );
\r
4146 SAFE_RELEASE( devicePtr );
\r
4147 SAFE_RELEASE( defaultDevicePtr );
\r
4148 SAFE_RELEASE( audioClient );
\r
4149 SAFE_RELEASE( devicePropStore );
\r
4150 SAFE_RELEASE( defaultDevicePropStore );
\r
4152 CoTaskMemFree( deviceFormat );
\r
4153 CoTaskMemFree( closestMatchFormat );
\r
4155 if ( !errorText_.empty() )
\r
4156 error( errorType );
\r
4160 //-----------------------------------------------------------------------------
\r
4162 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4164 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4165 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4173 //-----------------------------------------------------------------------------
\r
4175 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4177 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4178 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4186 //-----------------------------------------------------------------------------
\r
4188 void RtApiWasapi::closeStream( void )
\r
4190 if ( stream_.state == STREAM_CLOSED ) {
\r
4191 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4192 error( RtAudioError::WARNING );
\r
4196 if ( stream_.state != STREAM_STOPPED )
\r
4199 // clean up stream memory
\r
4200 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4201 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4203 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4204 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4206 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4207 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4209 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4210 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4212 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4213 stream_.apiHandle = NULL;
\r
4215 for ( int i = 0; i < 2; i++ ) {
\r
4216 if ( stream_.userBuffer[i] ) {
\r
4217 free( stream_.userBuffer[i] );
\r
4218 stream_.userBuffer[i] = 0;
\r
4222 if ( stream_.deviceBuffer ) {
\r
4223 free( stream_.deviceBuffer );
\r
4224 stream_.deviceBuffer = 0;
\r
4227 // update stream state
\r
4228 stream_.state = STREAM_CLOSED;
\r
4231 //-----------------------------------------------------------------------------
\r
4233 void RtApiWasapi::startStream( void )
\r
4237 if ( stream_.state == STREAM_RUNNING ) {
\r
4238 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4239 error( RtAudioError::WARNING );
\r
4243 // update stream state
\r
4244 stream_.state = STREAM_RUNNING;
\r
4246 // create WASAPI stream thread
\r
4247 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4249 if ( !stream_.callbackInfo.thread ) {
\r
4250 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4251 error( RtAudioError::THREAD_ERROR );
\r
4254 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4255 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4259 //-----------------------------------------------------------------------------
\r
4261 void RtApiWasapi::stopStream( void )
\r
4265 if ( stream_.state == STREAM_STOPPED ) {
\r
4266 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4267 error( RtAudioError::WARNING );
\r
4271 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4272 stream_.state = STREAM_STOPPING;
\r
4274 // wait until stream thread is stopped
\r
4275 while( stream_.state != STREAM_STOPPED ) {
\r
4279 // Wait for the last buffer to play before stopping.
\r
4280 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4282 // stop capture client if applicable
\r
4283 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4284 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4285 if ( FAILED( hr ) ) {
\r
4286 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4287 error( RtAudioError::DRIVER_ERROR );
\r
4292 // stop render client if applicable
\r
4293 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4294 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4295 if ( FAILED( hr ) ) {
\r
4296 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4297 error( RtAudioError::DRIVER_ERROR );
\r
4302 // close thread handle
\r
4303 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4304 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4305 error( RtAudioError::THREAD_ERROR );
\r
4309 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4312 //-----------------------------------------------------------------------------
\r
4314 void RtApiWasapi::abortStream( void )
\r
4318 if ( stream_.state == STREAM_STOPPED ) {
\r
4319 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4320 error( RtAudioError::WARNING );
\r
4324 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4325 stream_.state = STREAM_STOPPING;
\r
4327 // wait until stream thread is stopped
\r
4328 while ( stream_.state != STREAM_STOPPED ) {
\r
4332 // stop capture client if applicable
\r
4333 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4334 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4335 if ( FAILED( hr ) ) {
\r
4336 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4337 error( RtAudioError::DRIVER_ERROR );
\r
4342 // stop render client if applicable
\r
4343 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4344 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4345 if ( FAILED( hr ) ) {
\r
4346 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4347 error( RtAudioError::DRIVER_ERROR );
\r
4352 // close thread handle
\r
4353 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4354 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4355 error( RtAudioError::THREAD_ERROR );
\r
4359 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4362 //-----------------------------------------------------------------------------
\r
4364 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4365 unsigned int firstChannel, unsigned int sampleRate,
\r
4366 RtAudioFormat format, unsigned int* bufferSize,
\r
4367 RtAudio::StreamOptions* options )
\r
4369 bool methodResult = FAILURE;
\r
4370 unsigned int captureDeviceCount = 0;
\r
4371 unsigned int renderDeviceCount = 0;
\r
4373 IMMDeviceCollection* captureDevices = NULL;
\r
4374 IMMDeviceCollection* renderDevices = NULL;
\r
4375 IMMDevice* devicePtr = NULL;
\r
4376 WAVEFORMATEX* deviceFormat = NULL;
\r
4377 unsigned int bufferBytes;
\r
4378 stream_.state = STREAM_STOPPED;
\r
4380 // create API Handle if not already created
\r
4381 if ( !stream_.apiHandle )
\r
4382 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4384 // Count capture devices
\r
4385 errorText_.clear();
\r
4386 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4387 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4388 if ( FAILED( hr ) ) {
\r
4389 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4393 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4394 if ( FAILED( hr ) ) {
\r
4395 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4399 // Count render devices
\r
4400 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4401 if ( FAILED( hr ) ) {
\r
4402 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4406 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4407 if ( FAILED( hr ) ) {
\r
4408 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4412 // validate device index
\r
4413 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4414 errorType = RtAudioError::INVALID_USE;
\r
4415 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4419 // determine whether index falls within capture or render devices
\r
4420 if ( device >= renderDeviceCount ) {
\r
4421 if ( mode != INPUT ) {
\r
4422 errorType = RtAudioError::INVALID_USE;
\r
4423 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4427 // retrieve captureAudioClient from devicePtr
\r
4428 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4430 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4431 if ( FAILED( hr ) ) {
\r
4432 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4436 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4437 NULL, ( void** ) &captureAudioClient );
\r
4438 if ( FAILED( hr ) ) {
\r
4439 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4443 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4444 if ( FAILED( hr ) ) {
\r
4445 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4449 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4450 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4453 if ( mode != OUTPUT ) {
\r
4454 errorType = RtAudioError::INVALID_USE;
\r
4455 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4459 // retrieve renderAudioClient from devicePtr
\r
4460 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4462 hr = renderDevices->Item( device, &devicePtr );
\r
4463 if ( FAILED( hr ) ) {
\r
4464 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4468 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4469 NULL, ( void** ) &renderAudioClient );
\r
4470 if ( FAILED( hr ) ) {
\r
4471 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4475 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4476 if ( FAILED( hr ) ) {
\r
4477 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4481 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4482 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4485 // fill stream data
\r
4486 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4487 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4488 stream_.mode = DUPLEX;
\r
4491 stream_.mode = mode;
\r
4494 stream_.device[mode] = device;
\r
4495 stream_.doByteSwap[mode] = false;
\r
4496 stream_.sampleRate = sampleRate;
\r
4497 stream_.bufferSize = *bufferSize;
\r
4498 stream_.nBuffers = 1;
\r
4499 stream_.nUserChannels[mode] = channels;
\r
4500 stream_.channelOffset[mode] = firstChannel;
\r
4501 stream_.userFormat = format;
\r
4502 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4504 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4505 stream_.userInterleaved = false;
\r
4507 stream_.userInterleaved = true;
\r
4508 stream_.deviceInterleaved[mode] = true;
\r
4510 // Set flags for buffer conversion.
\r
4511 stream_.doConvertBuffer[mode] = false;
\r
4512 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4513 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4514 stream_.doConvertBuffer[mode] = true;
\r
4515 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4516 stream_.nUserChannels[mode] > 1 )
\r
4517 stream_.doConvertBuffer[mode] = true;
\r
4519 if ( stream_.doConvertBuffer[mode] )
\r
4520 setConvertInfo( mode, 0 );
\r
4522 // Allocate necessary internal buffers
\r
4523 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4525 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4526 if ( !stream_.userBuffer[mode] ) {
\r
4527 errorType = RtAudioError::MEMORY_ERROR;
\r
4528 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4532 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4533 stream_.callbackInfo.priority = 15;
\r
4535 stream_.callbackInfo.priority = 0;
\r
4537 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4538 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4540 methodResult = SUCCESS;
\r
4544 SAFE_RELEASE( captureDevices );
\r
4545 SAFE_RELEASE( renderDevices );
\r
4546 SAFE_RELEASE( devicePtr );
\r
4547 CoTaskMemFree( deviceFormat );
\r
4549 // if method failed, close the stream
\r
4550 if ( methodResult == FAILURE )
\r
4553 if ( !errorText_.empty() )
\r
4554 error( errorType );
\r
4555 return methodResult;
\r
4558 //=============================================================================
\r
4560 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4563 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4568 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4571 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4576 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4579 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4584 //-----------------------------------------------------------------------------
\r
4586 void RtApiWasapi::wasapiThread()
\r
4588 // as this is a new thread, we must CoInitialize it
\r
4589 CoInitialize( NULL );
\r
4593 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4594 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4595 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4596 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4597 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4598 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4600 WAVEFORMATEX* captureFormat = NULL;
\r
4601 WAVEFORMATEX* renderFormat = NULL;
\r
4602 float captureSrRatio = 0.0f;
\r
4603 float renderSrRatio = 0.0f;
\r
4604 WasapiBuffer captureBuffer;
\r
4605 WasapiBuffer renderBuffer;
\r
4607 // declare local stream variables
\r
4608 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4609 BYTE* streamBuffer = NULL;
\r
4610 unsigned long captureFlags = 0;
\r
4611 unsigned int bufferFrameCount = 0;
\r
4612 unsigned int numFramesPadding = 0;
\r
4613 unsigned int convBufferSize = 0;
\r
4614 bool callbackPushed = false;
\r
4615 bool callbackPulled = false;
\r
4616 bool callbackStopped = false;
\r
4617 int callbackResult = 0;
\r
4619 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4620 char* convBuffer = NULL;
\r
4621 unsigned int convBuffSize = 0;
\r
4622 unsigned int deviceBuffSize = 0;
\r
4624 errorText_.clear();
\r
4625 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4627 // Attempt to assign "Pro Audio" characteristic to thread
\r
4628 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4630 DWORD taskIndex = 0;
\r
4631 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4632 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4633 FreeLibrary( AvrtDll );
\r
4636 // start capture stream if applicable
\r
4637 if ( captureAudioClient ) {
\r
4638 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4639 if ( FAILED( hr ) ) {
\r
4640 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4644 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4646 // initialize capture stream according to desire buffer size
\r
4647 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4648 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4650 if ( !captureClient ) {
\r
4651 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4652 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4653 desiredBufferPeriod,
\r
4654 desiredBufferPeriod,
\r
4657 if ( FAILED( hr ) ) {
\r
4658 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4662 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4663 ( void** ) &captureClient );
\r
4664 if ( FAILED( hr ) ) {
\r
4665 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4669 // configure captureEvent to trigger on every available capture buffer
\r
4670 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4671 if ( !captureEvent ) {
\r
4672 errorType = RtAudioError::SYSTEM_ERROR;
\r
4673 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4677 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4678 if ( FAILED( hr ) ) {
\r
4679 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4683 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4684 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4687 unsigned int inBufferSize = 0;
\r
4688 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4689 if ( FAILED( hr ) ) {
\r
4690 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4694 // scale outBufferSize according to stream->user sample rate ratio
\r
4695 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4696 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4698 // set captureBuffer size
\r
4699 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4701 // reset the capture stream
\r
4702 hr = captureAudioClient->Reset();
\r
4703 if ( FAILED( hr ) ) {
\r
4704 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4708 // start the capture stream
\r
4709 hr = captureAudioClient->Start();
\r
4710 if ( FAILED( hr ) ) {
\r
4711 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4716 // start render stream if applicable
\r
4717 if ( renderAudioClient ) {
\r
4718 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4719 if ( FAILED( hr ) ) {
\r
4720 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4724 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4726 // initialize render stream according to desire buffer size
\r
4727 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4728 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4730 if ( !renderClient ) {
\r
4731 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4732 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4733 desiredBufferPeriod,
\r
4734 desiredBufferPeriod,
\r
4737 if ( FAILED( hr ) ) {
\r
4738 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4742 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4743 ( void** ) &renderClient );
\r
4744 if ( FAILED( hr ) ) {
\r
4745 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4749 // configure renderEvent to trigger on every available render buffer
\r
4750 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4751 if ( !renderEvent ) {
\r
4752 errorType = RtAudioError::SYSTEM_ERROR;
\r
4753 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4757 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4758 if ( FAILED( hr ) ) {
\r
4759 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4763 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4764 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4767 unsigned int outBufferSize = 0;
\r
4768 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4769 if ( FAILED( hr ) ) {
\r
4770 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4774 // scale inBufferSize according to user->stream sample rate ratio
\r
4775 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4776 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4778 // set renderBuffer size
\r
4779 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4781 // reset the render stream
\r
4782 hr = renderAudioClient->Reset();
\r
4783 if ( FAILED( hr ) ) {
\r
4784 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4788 // start the render stream
\r
4789 hr = renderAudioClient->Start();
\r
4790 if ( FAILED( hr ) ) {
\r
4791 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4796 if ( stream_.mode == INPUT ) {
\r
4797 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4798 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4800 else if ( stream_.mode == OUTPUT ) {
\r
4801 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4802 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4804 else if ( stream_.mode == DUPLEX ) {
\r
4805 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4806 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4807 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4808 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4811 convBuffer = ( char* ) malloc( convBuffSize );
\r
4812 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4813 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4814 errorType = RtAudioError::MEMORY_ERROR;
\r
4815 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4819 // stream process loop
\r
4820 while ( stream_.state != STREAM_STOPPING ) {
\r
4821 if ( !callbackPulled ) {
\r
4824 // 1. Pull callback buffer from inputBuffer
\r
4825 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4826 // Convert callback buffer to user format
\r
4828 if ( captureAudioClient ) {
\r
4829 // Pull callback buffer from inputBuffer
\r
4830 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4831 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4832 stream_.deviceFormat[INPUT] );
\r
4834 if ( callbackPulled ) {
\r
4835 // Convert callback buffer to user sample rate
\r
4836 convertBufferWasapi( stream_.deviceBuffer,
\r
4838 stream_.nDeviceChannels[INPUT],
\r
4839 captureFormat->nSamplesPerSec,
\r
4840 stream_.sampleRate,
\r
4841 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4843 stream_.deviceFormat[INPUT] );
\r
4845 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4846 // Convert callback buffer to user format
\r
4847 convertBuffer( stream_.userBuffer[INPUT],
\r
4848 stream_.deviceBuffer,
\r
4849 stream_.convertInfo[INPUT] );
\r
4852 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4853 memcpy( stream_.userBuffer[INPUT],
\r
4854 stream_.deviceBuffer,
\r
4855 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4860 // if there is no capture stream, set callbackPulled flag
\r
4861 callbackPulled = true;
\r
4864 // Execute Callback
\r
4865 // ================
\r
4866 // 1. Execute user callback method
\r
4867 // 2. Handle return value from callback
\r
4869 // if callback has not requested the stream to stop
\r
4870 if ( callbackPulled && !callbackStopped ) {
\r
4871 // Execute user callback method
\r
4872 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4873 stream_.userBuffer[INPUT],
\r
4874 stream_.bufferSize,
\r
4876 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4877 stream_.callbackInfo.userData );
\r
4879 // Handle return value from callback
\r
4880 if ( callbackResult == 1 ) {
\r
4881 // instantiate a thread to stop this thread
\r
4882 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4883 if ( !threadHandle ) {
\r
4884 errorType = RtAudioError::THREAD_ERROR;
\r
4885 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4888 else if ( !CloseHandle( threadHandle ) ) {
\r
4889 errorType = RtAudioError::THREAD_ERROR;
\r
4890 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4894 callbackStopped = true;
\r
4896 else if ( callbackResult == 2 ) {
\r
4897 // instantiate a thread to stop this thread
\r
4898 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4899 if ( !threadHandle ) {
\r
4900 errorType = RtAudioError::THREAD_ERROR;
\r
4901 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4904 else if ( !CloseHandle( threadHandle ) ) {
\r
4905 errorType = RtAudioError::THREAD_ERROR;
\r
4906 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4910 callbackStopped = true;
\r
4915 // Callback Output
\r
4916 // ===============
\r
4917 // 1. Convert callback buffer to stream format
\r
4918 // 2. Convert callback buffer to stream sample rate and channel count
\r
4919 // 3. Push callback buffer into outputBuffer
\r
4921 if ( renderAudioClient && callbackPulled ) {
\r
4922 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4923 // Convert callback buffer to stream format
\r
4924 convertBuffer( stream_.deviceBuffer,
\r
4925 stream_.userBuffer[OUTPUT],
\r
4926 stream_.convertInfo[OUTPUT] );
\r
4930 // Convert callback buffer to stream sample rate
\r
4931 convertBufferWasapi( convBuffer,
\r
4932 stream_.deviceBuffer,
\r
4933 stream_.nDeviceChannels[OUTPUT],
\r
4934 stream_.sampleRate,
\r
4935 renderFormat->nSamplesPerSec,
\r
4936 stream_.bufferSize,
\r
4938 stream_.deviceFormat[OUTPUT] );
\r
4940 // Push callback buffer into outputBuffer
\r
4941 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4942 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4943 stream_.deviceFormat[OUTPUT] );
\r
4946 // if there is no render stream, set callbackPushed flag
\r
4947 callbackPushed = true;
\r
4952 // 1. Get capture buffer from stream
\r
4953 // 2. Push capture buffer into inputBuffer
\r
4954 // 3. If 2. was successful: Release capture buffer
\r
4956 if ( captureAudioClient ) {
\r
4957 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4958 if ( !callbackPulled ) {
\r
4959 WaitForSingleObject( captureEvent, INFINITE );
\r
4962 // Get capture buffer from stream
\r
4963 hr = captureClient->GetBuffer( &streamBuffer,
\r
4964 &bufferFrameCount,
\r
4965 &captureFlags, NULL, NULL );
\r
4966 if ( FAILED( hr ) ) {
\r
4967 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4971 if ( bufferFrameCount != 0 ) {
\r
4972 // Push capture buffer into inputBuffer
\r
4973 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4974 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4975 stream_.deviceFormat[INPUT] ) )
\r
4977 // Release capture buffer
\r
4978 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4979 if ( FAILED( hr ) ) {
\r
4980 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4986 // Inform WASAPI that capture was unsuccessful
\r
4987 hr = captureClient->ReleaseBuffer( 0 );
\r
4988 if ( FAILED( hr ) ) {
\r
4989 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4996 // Inform WASAPI that capture was unsuccessful
\r
4997 hr = captureClient->ReleaseBuffer( 0 );
\r
4998 if ( FAILED( hr ) ) {
\r
4999 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5007 // 1. Get render buffer from stream
\r
5008 // 2. Pull next buffer from outputBuffer
\r
5009 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5010 // Release render buffer
\r
5012 if ( renderAudioClient ) {
\r
5013 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5014 if ( callbackPulled && !callbackPushed ) {
\r
5015 WaitForSingleObject( renderEvent, INFINITE );
\r
5018 // Get render buffer from stream
\r
5019 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5020 if ( FAILED( hr ) ) {
\r
5021 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5025 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5026 if ( FAILED( hr ) ) {
\r
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5031 bufferFrameCount -= numFramesPadding;
\r
5033 if ( bufferFrameCount != 0 ) {
\r
5034 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5035 if ( FAILED( hr ) ) {
\r
5036 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5040 // Pull next buffer from outputBuffer
\r
5041 // Fill render buffer with next buffer
\r
5042 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5043 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5044 stream_.deviceFormat[OUTPUT] ) )
\r
5046 // Release render buffer
\r
5047 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5048 if ( FAILED( hr ) ) {
\r
5049 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5055 // Inform WASAPI that render was unsuccessful
\r
5056 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5057 if ( FAILED( hr ) ) {
\r
5058 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5065 // Inform WASAPI that render was unsuccessful
\r
5066 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5067 if ( FAILED( hr ) ) {
\r
5068 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5074 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5075 if ( callbackPushed ) {
\r
5076 callbackPulled = false;
\r
5079 // tick stream time
\r
5080 RtApi::tickStreamTime();
\r
5085 CoTaskMemFree( captureFormat );
\r
5086 CoTaskMemFree( renderFormat );
\r
5088 //delete convBuffer;
\r
5089 free ( convBuffer );
\r
5093 // update stream state
\r
5094 stream_.state = STREAM_STOPPED;
\r
5096 if ( errorText_.empty() )
\r
5099 error( errorType );
\r
5102 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5106 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5108 // Modified by Robin Davies, October 2005
\r
5109 // - Improvements to DirectX pointer chasing.
\r
5110 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5111 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5112 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5113 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5115 #include <dsound.h>
\r
5116 #include <assert.h>
\r
5117 #include <algorithm>
\r
5119 #if defined(__MINGW32__)
\r
5120 // missing from latest mingw winapi
\r
5121 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5122 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5123 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5124 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5127 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5129 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5130 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5133 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5135 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5136 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5137 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5138 return pointer >= earlierPointer && pointer < laterPointer;
\r
5141 // A structure to hold various information related to the DirectSound
\r
5142 // API implementation.
\r
5144 unsigned int drainCounter; // Tracks callback counts when draining
\r
5145 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5149 UINT bufferPointer[2];
\r
5150 DWORD dsBufferSize[2];
\r
5151 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5155 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5158 // Declarations for utility functions, callbacks, and structures
\r
5159 // specific to the DirectSound implementation.
\r
5160 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5161 LPCTSTR description,
\r
5163 LPVOID lpContext );
\r
5165 static const char* getErrorString( int code );
\r
5167 static unsigned __stdcall callbackHandler( void *ptr );
\r
5176 : found(false) { validId[0] = false; validId[1] = false; }
\r
5179 struct DsProbeData {
\r
5181 std::vector<struct DsDevice>* dsDevices;
\r
5184 RtApiDs :: RtApiDs()
\r
5186 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5187 // accept whatever the mainline chose for a threading model.
\r
5188 coInitialized_ = false;
\r
5189 HRESULT hr = CoInitialize( NULL );
\r
5190 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5193 RtApiDs :: ~RtApiDs()
\r
5195 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5196 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5199 // The DirectSound default output is always the first device.
\r
5200 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5205 // The DirectSound default input is always the first input device,
\r
5206 // which is the first capture device enumerated.
\r
5207 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5212 unsigned int RtApiDs :: getDeviceCount( void )
\r
5214 // Set query flag for previously found devices to false, so that we
\r
5215 // can check for any devices that have disappeared.
\r
5216 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5217 dsDevices[i].found = false;
\r
5219 // Query DirectSound devices.
\r
5220 struct DsProbeData probeInfo;
\r
5221 probeInfo.isInput = false;
\r
5222 probeInfo.dsDevices = &dsDevices;
\r
5223 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5224 if ( FAILED( result ) ) {
\r
5225 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5226 errorText_ = errorStream_.str();
\r
5227 error( RtAudioError::WARNING );
\r
5230 // Query DirectSoundCapture devices.
\r
5231 probeInfo.isInput = true;
\r
5232 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5233 if ( FAILED( result ) ) {
\r
5234 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5235 errorText_ = errorStream_.str();
\r
5236 error( RtAudioError::WARNING );
\r
5239 // Clean out any devices that may have disappeared.
\r
5240 std::vector< int > indices;
\r
5241 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5242 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5243 //unsigned int nErased = 0;
\r
5244 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5245 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5246 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5248 return static_cast<unsigned int>(dsDevices.size());
\r
5251 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5253 RtAudio::DeviceInfo info;
\r
5254 info.probed = false;
\r
5256 if ( dsDevices.size() == 0 ) {
\r
5257 // Force a query of all devices
\r
5259 if ( dsDevices.size() == 0 ) {
\r
5260 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5261 error( RtAudioError::INVALID_USE );
\r
5266 if ( device >= dsDevices.size() ) {
\r
5267 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5268 error( RtAudioError::INVALID_USE );
\r
5273 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5275 LPDIRECTSOUND output;
\r
5277 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5278 if ( FAILED( result ) ) {
\r
5279 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5280 errorText_ = errorStream_.str();
\r
5281 error( RtAudioError::WARNING );
\r
5285 outCaps.dwSize = sizeof( outCaps );
\r
5286 result = output->GetCaps( &outCaps );
\r
5287 if ( FAILED( result ) ) {
\r
5288 output->Release();
\r
5289 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5290 errorText_ = errorStream_.str();
\r
5291 error( RtAudioError::WARNING );
\r
5295 // Get output channel information.
\r
5296 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5298 // Get sample rate information.
\r
5299 info.sampleRates.clear();
\r
5300 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5301 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5302 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5303 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5306 // Get format information.
\r
5307 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5308 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5310 output->Release();
\r
5312 if ( getDefaultOutputDevice() == device )
\r
5313 info.isDefaultOutput = true;
\r
5315 if ( dsDevices[ device ].validId[1] == false ) {
\r
5316 info.name = dsDevices[ device ].name;
\r
5317 info.probed = true;
\r
5323 LPDIRECTSOUNDCAPTURE input;
\r
5324 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5325 if ( FAILED( result ) ) {
\r
5326 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5327 errorText_ = errorStream_.str();
\r
5328 error( RtAudioError::WARNING );
\r
5333 inCaps.dwSize = sizeof( inCaps );
\r
5334 result = input->GetCaps( &inCaps );
\r
5335 if ( FAILED( result ) ) {
\r
5337 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5338 errorText_ = errorStream_.str();
\r
5339 error( RtAudioError::WARNING );
\r
5343 // Get input channel information.
\r
5344 info.inputChannels = inCaps.dwChannels;
\r
5346 // Get sample rate and format information.
\r
5347 std::vector<unsigned int> rates;
\r
5348 if ( inCaps.dwChannels >= 2 ) {
\r
5349 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5350 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5351 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5352 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5353 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5354 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5355 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5358 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5362 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5364 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5371 else if ( inCaps.dwChannels == 1 ) {
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5374 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5375 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5376 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5377 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5381 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5387 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5391 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5394 else info.inputChannels = 0; // technically, this would be an error
\r
5398 if ( info.inputChannels == 0 ) return info;
\r
5400 // Copy the supported rates to the info structure but avoid duplication.
\r
5402 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5404 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5405 if ( rates[i] == info.sampleRates[j] ) {
\r
5410 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5412 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5414 // If device opens for both playback and capture, we determine the channels.
\r
5415 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5416 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5418 if ( device == 0 ) info.isDefaultInput = true;
\r
5420 // Copy name and return.
\r
5421 info.name = dsDevices[ device ].name;
\r
5422 info.probed = true;
\r
5426 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5427 unsigned int firstChannel, unsigned int sampleRate,
\r
5428 RtAudioFormat format, unsigned int *bufferSize,
\r
5429 RtAudio::StreamOptions *options )
\r
5431 if ( channels + firstChannel > 2 ) {
\r
5432 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5436 size_t nDevices = dsDevices.size();
\r
5437 if ( nDevices == 0 ) {
\r
5438 // This should not happen because a check is made before this function is called.
\r
5439 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5443 if ( device >= nDevices ) {
\r
5444 // This should not happen because a check is made before this function is called.
\r
5445 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5449 if ( mode == OUTPUT ) {
\r
5450 if ( dsDevices[ device ].validId[0] == false ) {
\r
5451 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5452 errorText_ = errorStream_.str();
\r
5456 else { // mode == INPUT
\r
5457 if ( dsDevices[ device ].validId[1] == false ) {
\r
5458 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5459 errorText_ = errorStream_.str();
\r
5464 // According to a note in PortAudio, using GetDesktopWindow()
\r
5465 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5466 // that occur when the application's window is not the foreground
\r
5467 // window. Also, if the application window closes before the
\r
5468 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5469 // problems when using GetDesktopWindow() but it seems fine now
\r
5470 // (January 2010). I'll leave it commented here.
\r
5471 // HWND hWnd = GetForegroundWindow();
\r
5472 HWND hWnd = GetDesktopWindow();
\r
5474 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5475 // two. This is a judgement call and a value of two is probably too
\r
5476 // low for capture, but it should work for playback.
\r
5478 if ( options ) nBuffers = options->numberOfBuffers;
\r
5479 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5480 if ( nBuffers < 2 ) nBuffers = 3;
\r
5482 // Check the lower range of the user-specified buffer size and set
\r
5483 // (arbitrarily) to a lower bound of 32.
\r
5484 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5486 // Create the wave format structure. The data format setting will
\r
5487 // be determined later.
\r
5488 WAVEFORMATEX waveFormat;
\r
5489 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5490 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5491 waveFormat.nChannels = channels + firstChannel;
\r
5492 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5494 // Determine the device buffer size. By default, we'll use the value
\r
5495 // defined above (32K), but we will grow it to make allowances for
\r
5496 // very large software buffer sizes.
\r
5497 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5498 DWORD dsPointerLeadTime = 0;
\r
5500 void *ohandle = 0, *bhandle = 0;
\r
5502 if ( mode == OUTPUT ) {
\r
5504 LPDIRECTSOUND output;
\r
5505 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5506 if ( FAILED( result ) ) {
\r
5507 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5508 errorText_ = errorStream_.str();
\r
5513 outCaps.dwSize = sizeof( outCaps );
\r
5514 result = output->GetCaps( &outCaps );
\r
5515 if ( FAILED( result ) ) {
\r
5516 output->Release();
\r
5517 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5518 errorText_ = errorStream_.str();
\r
5522 // Check channel information.
\r
5523 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5524 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5525 errorText_ = errorStream_.str();
\r
5529 // Check format information. Use 16-bit format unless not
\r
5530 // supported or user requests 8-bit.
\r
5531 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5532 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5533 waveFormat.wBitsPerSample = 16;
\r
5534 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5537 waveFormat.wBitsPerSample = 8;
\r
5538 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5540 stream_.userFormat = format;
\r
5542 // Update wave format structure and buffer information.
\r
5543 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5544 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5545 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5547 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5548 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5549 dsBufferSize *= 2;
\r
5551 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5552 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5553 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5554 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5555 if ( FAILED( result ) ) {
\r
5556 output->Release();
\r
5557 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5558 errorText_ = errorStream_.str();
\r
5562 // Even though we will write to the secondary buffer, we need to
\r
5563 // access the primary buffer to set the correct output format
\r
5564 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5565 // buffer description.
\r
5566 DSBUFFERDESC bufferDescription;
\r
5567 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5568 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5569 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5571 // Obtain the primary buffer
\r
5572 LPDIRECTSOUNDBUFFER buffer;
\r
5573 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5574 if ( FAILED( result ) ) {
\r
5575 output->Release();
\r
5576 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5577 errorText_ = errorStream_.str();
\r
5581 // Set the primary DS buffer sound format.
\r
5582 result = buffer->SetFormat( &waveFormat );
\r
5583 if ( FAILED( result ) ) {
\r
5584 output->Release();
\r
5585 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5586 errorText_ = errorStream_.str();
\r
5590 // Setup the secondary DS buffer description.
\r
5591 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5592 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5593 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5594 DSBCAPS_GLOBALFOCUS |
\r
5595 DSBCAPS_GETCURRENTPOSITION2 |
\r
5596 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5597 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5598 bufferDescription.lpwfxFormat = &waveFormat;
\r
5600 // Try to create the secondary DS buffer. If that doesn't work,
\r
5601 // try to use software mixing. Otherwise, there's a problem.
\r
5602 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5603 if ( FAILED( result ) ) {
\r
5604 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5605 DSBCAPS_GLOBALFOCUS |
\r
5606 DSBCAPS_GETCURRENTPOSITION2 |
\r
5607 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5608 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5609 if ( FAILED( result ) ) {
\r
5610 output->Release();
\r
5611 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5612 errorText_ = errorStream_.str();
\r
5617 // Get the buffer size ... might be different from what we specified.
\r
5619 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5620 result = buffer->GetCaps( &dsbcaps );
\r
5621 if ( FAILED( result ) ) {
\r
5622 output->Release();
\r
5623 buffer->Release();
\r
5624 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5625 errorText_ = errorStream_.str();
\r
5629 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5631 // Lock the DS buffer
\r
5634 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5635 if ( FAILED( result ) ) {
\r
5636 output->Release();
\r
5637 buffer->Release();
\r
5638 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5639 errorText_ = errorStream_.str();
\r
5643 // Zero the DS buffer
\r
5644 ZeroMemory( audioPtr, dataLen );
\r
5646 // Unlock the DS buffer
\r
5647 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5648 if ( FAILED( result ) ) {
\r
5649 output->Release();
\r
5650 buffer->Release();
\r
5651 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5652 errorText_ = errorStream_.str();
\r
5656 ohandle = (void *) output;
\r
5657 bhandle = (void *) buffer;
\r
5660 if ( mode == INPUT ) {
\r
5662 LPDIRECTSOUNDCAPTURE input;
\r
5663 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5664 if ( FAILED( result ) ) {
\r
5665 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5666 errorText_ = errorStream_.str();
\r
5671 inCaps.dwSize = sizeof( inCaps );
\r
5672 result = input->GetCaps( &inCaps );
\r
5673 if ( FAILED( result ) ) {
\r
5675 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5676 errorText_ = errorStream_.str();
\r
5680 // Check channel information.
\r
5681 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5682 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5686 // Check format information. Use 16-bit format unless user
\r
5687 // requests 8-bit.
\r
5688 DWORD deviceFormats;
\r
5689 if ( channels + firstChannel == 2 ) {
\r
5690 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5691 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5692 waveFormat.wBitsPerSample = 8;
\r
5693 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5695 else { // assume 16-bit is supported
\r
5696 waveFormat.wBitsPerSample = 16;
\r
5697 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5700 else { // channel == 1
\r
5701 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5702 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5703 waveFormat.wBitsPerSample = 8;
\r
5704 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5706 else { // assume 16-bit is supported
\r
5707 waveFormat.wBitsPerSample = 16;
\r
5708 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5711 stream_.userFormat = format;
\r
5713 // Update wave format structure and buffer information.
\r
5714 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5715 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5716 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5718 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5719 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5720 dsBufferSize *= 2;
\r
5722 // Setup the secondary DS buffer description.
\r
5723 DSCBUFFERDESC bufferDescription;
\r
5724 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5725 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5726 bufferDescription.dwFlags = 0;
\r
5727 bufferDescription.dwReserved = 0;
\r
5728 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5729 bufferDescription.lpwfxFormat = &waveFormat;
\r
5731 // Create the capture buffer.
\r
5732 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5733 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5734 if ( FAILED( result ) ) {
\r
5736 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5737 errorText_ = errorStream_.str();
\r
5741 // Get the buffer size ... might be different from what we specified.
\r
5742 DSCBCAPS dscbcaps;
\r
5743 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5744 result = buffer->GetCaps( &dscbcaps );
\r
5745 if ( FAILED( result ) ) {
\r
5747 buffer->Release();
\r
5748 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5749 errorText_ = errorStream_.str();
\r
5753 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5755 // NOTE: We could have a problem here if this is a duplex stream
\r
5756 // and the play and capture hardware buffer sizes are different
\r
5757 // (I'm actually not sure if that is a problem or not).
\r
5758 // Currently, we are not verifying that.
\r
5760 // Lock the capture buffer
\r
5763 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5764 if ( FAILED( result ) ) {
\r
5766 buffer->Release();
\r
5767 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5768 errorText_ = errorStream_.str();
\r
5772 // Zero the buffer
\r
5773 ZeroMemory( audioPtr, dataLen );
\r
5775 // Unlock the buffer
\r
5776 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5777 if ( FAILED( result ) ) {
\r
5779 buffer->Release();
\r
5780 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5781 errorText_ = errorStream_.str();
\r
5785 ohandle = (void *) input;
\r
5786 bhandle = (void *) buffer;
\r
5789 // Set various stream parameters
\r
5790 DsHandle *handle = 0;
\r
5791 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5792 stream_.nUserChannels[mode] = channels;
\r
5793 stream_.bufferSize = *bufferSize;
\r
5794 stream_.channelOffset[mode] = firstChannel;
\r
5795 stream_.deviceInterleaved[mode] = true;
\r
5796 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5797 else stream_.userInterleaved = true;
\r
5799 // Set flag for buffer conversion
\r
5800 stream_.doConvertBuffer[mode] = false;
\r
5801 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5802 stream_.doConvertBuffer[mode] = true;
\r
5803 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5804 stream_.doConvertBuffer[mode] = true;
\r
5805 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5806 stream_.nUserChannels[mode] > 1 )
\r
5807 stream_.doConvertBuffer[mode] = true;
\r
5809 // Allocate necessary internal buffers
\r
5810 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5811 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5812 if ( stream_.userBuffer[mode] == NULL ) {
\r
5813 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5817 if ( stream_.doConvertBuffer[mode] ) {
\r
5819 bool makeBuffer = true;
\r
5820 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5821 if ( mode == INPUT ) {
\r
5822 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5823 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5824 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5828 if ( makeBuffer ) {
\r
5829 bufferBytes *= *bufferSize;
\r
5830 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5831 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5832 if ( stream_.deviceBuffer == NULL ) {
\r
5833 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5839 // Allocate our DsHandle structures for the stream.
\r
5840 if ( stream_.apiHandle == 0 ) {
\r
5842 handle = new DsHandle;
\r
5844 catch ( std::bad_alloc& ) {
\r
5845 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5849 // Create a manual-reset event.
\r
5850 handle->condition = CreateEvent( NULL, // no security
\r
5851 TRUE, // manual-reset
\r
5852 FALSE, // non-signaled initially
\r
5853 NULL ); // unnamed
\r
5854 stream_.apiHandle = (void *) handle;
\r
5857 handle = (DsHandle *) stream_.apiHandle;
\r
5858 handle->id[mode] = ohandle;
\r
5859 handle->buffer[mode] = bhandle;
\r
5860 handle->dsBufferSize[mode] = dsBufferSize;
\r
5861 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5863 stream_.device[mode] = device;
\r
5864 stream_.state = STREAM_STOPPED;
\r
5865 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5866 // We had already set up an output stream.
\r
5867 stream_.mode = DUPLEX;
\r
5869 stream_.mode = mode;
\r
5870 stream_.nBuffers = nBuffers;
\r
5871 stream_.sampleRate = sampleRate;
\r
5873 // Setup the buffer conversion information structure.
\r
5874 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5876 // Setup the callback thread.
\r
5877 if ( stream_.callbackInfo.isRunning == false ) {
\r
5878 unsigned threadId;
\r
5879 stream_.callbackInfo.isRunning = true;
\r
5880 stream_.callbackInfo.object = (void *) this;
\r
5881 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5882 &stream_.callbackInfo, 0, &threadId );
\r
5883 if ( stream_.callbackInfo.thread == 0 ) {
\r
5884 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5888 // Boost DS thread priority
\r
5889 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5895 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5896 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5897 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5898 if ( buffer ) buffer->Release();
\r
5899 object->Release();
\r
5901 if ( handle->buffer[1] ) {
\r
5902 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5903 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5904 if ( buffer ) buffer->Release();
\r
5905 object->Release();
\r
5907 CloseHandle( handle->condition );
\r
5909 stream_.apiHandle = 0;
\r
5912 for ( int i=0; i<2; i++ ) {
\r
5913 if ( stream_.userBuffer[i] ) {
\r
5914 free( stream_.userBuffer[i] );
\r
5915 stream_.userBuffer[i] = 0;
\r
5919 if ( stream_.deviceBuffer ) {
\r
5920 free( stream_.deviceBuffer );
\r
5921 stream_.deviceBuffer = 0;
\r
5924 stream_.state = STREAM_CLOSED;
\r
5928 void RtApiDs :: closeStream()
\r
5930 if ( stream_.state == STREAM_CLOSED ) {
\r
5931 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5932 error( RtAudioError::WARNING );
\r
5936 // Stop the callback thread.
\r
5937 stream_.callbackInfo.isRunning = false;
\r
5938 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5939 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5941 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5943 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5944 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5945 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5948 buffer->Release();
\r
5950 object->Release();
\r
5952 if ( handle->buffer[1] ) {
\r
5953 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5954 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5957 buffer->Release();
\r
5959 object->Release();
\r
5961 CloseHandle( handle->condition );
\r
5963 stream_.apiHandle = 0;
\r
5966 for ( int i=0; i<2; i++ ) {
\r
5967 if ( stream_.userBuffer[i] ) {
\r
5968 free( stream_.userBuffer[i] );
\r
5969 stream_.userBuffer[i] = 0;
\r
5973 if ( stream_.deviceBuffer ) {
\r
5974 free( stream_.deviceBuffer );
\r
5975 stream_.deviceBuffer = 0;
\r
5978 stream_.mode = UNINITIALIZED;
\r
5979 stream_.state = STREAM_CLOSED;
\r
5982 void RtApiDs :: startStream()
\r
5985 if ( stream_.state == STREAM_RUNNING ) {
\r
5986 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5987 error( RtAudioError::WARNING );
\r
5991 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5993 // Increase scheduler frequency on lesser windows (a side-effect of
\r
5994 // increasing timer accuracy). On greater windows (Win2K or later),
\r
5995 // this is already in effect.
\r
5996 timeBeginPeriod( 1 );
\r
5998 buffersRolling = false;
\r
5999 duplexPrerollBytes = 0;
\r
6001 if ( stream_.mode == DUPLEX ) {
\r
6002 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6003 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6006 HRESULT result = 0;
\r
6007 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6009 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6010 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6011 if ( FAILED( result ) ) {
\r
6012 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6013 errorText_ = errorStream_.str();
\r
6018 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6020 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6021 result = buffer->Start( DSCBSTART_LOOPING );
\r
6022 if ( FAILED( result ) ) {
\r
6023 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6024 errorText_ = errorStream_.str();
\r
6029 handle->drainCounter = 0;
\r
6030 handle->internalDrain = false;
\r
6031 ResetEvent( handle->condition );
\r
6032 stream_.state = STREAM_RUNNING;
\r
6035 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6038 void RtApiDs :: stopStream()
\r
6041 if ( stream_.state == STREAM_STOPPED ) {
\r
6042 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6043 error( RtAudioError::WARNING );
\r
6047 HRESULT result = 0;
\r
6050 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6051 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6052 if ( handle->drainCounter == 0 ) {
\r
6053 handle->drainCounter = 2;
\r
6054 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6057 stream_.state = STREAM_STOPPED;
\r
6059 MUTEX_LOCK( &stream_.mutex );
\r
6061 // Stop the buffer and clear memory
\r
6062 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6063 result = buffer->Stop();
\r
6064 if ( FAILED( result ) ) {
\r
6065 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6066 errorText_ = errorStream_.str();
\r
6070 // Lock the buffer and clear it so that if we start to play again,
\r
6071 // we won't have old data playing.
\r
6072 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6073 if ( FAILED( result ) ) {
\r
6074 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6075 errorText_ = errorStream_.str();
\r
6079 // Zero the DS buffer
\r
6080 ZeroMemory( audioPtr, dataLen );
\r
6082 // Unlock the DS buffer
\r
6083 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6084 if ( FAILED( result ) ) {
\r
6085 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6086 errorText_ = errorStream_.str();
\r
6090 // If we start playing again, we must begin at beginning of buffer.
\r
6091 handle->bufferPointer[0] = 0;
\r
6094 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6095 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6099 stream_.state = STREAM_STOPPED;
\r
6101 if ( stream_.mode != DUPLEX )
\r
6102 MUTEX_LOCK( &stream_.mutex );
\r
6104 result = buffer->Stop();
\r
6105 if ( FAILED( result ) ) {
\r
6106 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6107 errorText_ = errorStream_.str();
\r
6111 // Lock the buffer and clear it so that if we start to play again,
\r
6112 // we won't have old data playing.
\r
6113 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6114 if ( FAILED( result ) ) {
\r
6115 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6116 errorText_ = errorStream_.str();
\r
6120 // Zero the DS buffer
\r
6121 ZeroMemory( audioPtr, dataLen );
\r
6123 // Unlock the DS buffer
\r
6124 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6125 if ( FAILED( result ) ) {
\r
6126 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6127 errorText_ = errorStream_.str();
\r
6131 // If we start recording again, we must begin at beginning of buffer.
\r
6132 handle->bufferPointer[1] = 0;
\r
6136 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6137 MUTEX_UNLOCK( &stream_.mutex );
\r
6139 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6142 void RtApiDs :: abortStream()
\r
6145 if ( stream_.state == STREAM_STOPPED ) {
\r
6146 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6147 error( RtAudioError::WARNING );
\r
6151 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6152 handle->drainCounter = 2;
\r
6157 void RtApiDs :: callbackEvent()
\r
6159 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6160 Sleep( 50 ); // sleep 50 milliseconds
\r
6164 if ( stream_.state == STREAM_CLOSED ) {
\r
6165 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6166 error( RtAudioError::WARNING );
\r
6170 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6171 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6173 // Check if we were draining the stream and signal is finished.
\r
6174 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6176 stream_.state = STREAM_STOPPING;
\r
6177 if ( handle->internalDrain == false )
\r
6178 SetEvent( handle->condition );
\r
6184 // Invoke user callback to get fresh output data UNLESS we are
\r
6185 // draining stream.
\r
6186 if ( handle->drainCounter == 0 ) {
\r
6187 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6188 double streamTime = getStreamTime();
\r
6189 RtAudioStreamStatus status = 0;
\r
6190 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6191 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6192 handle->xrun[0] = false;
\r
6194 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6195 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6196 handle->xrun[1] = false;
\r
6198 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6199 stream_.bufferSize, streamTime, status, info->userData );
\r
6200 if ( cbReturnValue == 2 ) {
\r
6201 stream_.state = STREAM_STOPPING;
\r
6202 handle->drainCounter = 2;
\r
6206 else if ( cbReturnValue == 1 ) {
\r
6207 handle->drainCounter = 1;
\r
6208 handle->internalDrain = true;
\r
6213 DWORD currentWritePointer, safeWritePointer;
\r
6214 DWORD currentReadPointer, safeReadPointer;
\r
6215 UINT nextWritePointer;
\r
6217 LPVOID buffer1 = NULL;
\r
6218 LPVOID buffer2 = NULL;
\r
6219 DWORD bufferSize1 = 0;
\r
6220 DWORD bufferSize2 = 0;
\r
6225 MUTEX_LOCK( &stream_.mutex );
\r
6226 if ( stream_.state == STREAM_STOPPED ) {
\r
6227 MUTEX_UNLOCK( &stream_.mutex );
\r
6231 if ( buffersRolling == false ) {
\r
6232 if ( stream_.mode == DUPLEX ) {
\r
6233 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6235 // It takes a while for the devices to get rolling. As a result,
\r
6236 // there's no guarantee that the capture and write device pointers
\r
6237 // will move in lockstep. Wait here for both devices to start
\r
6238 // rolling, and then set our buffer pointers accordingly.
\r
6239 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6240 // bytes later than the write buffer.
\r
6242 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6243 // take place between the two GetCurrentPosition calls... but I'm
\r
6244 // really not sure how to solve the problem. Temporarily boost to
\r
6245 // Realtime priority, maybe; but I'm not sure what priority the
\r
6246 // DirectSound service threads run at. We *should* be roughly
\r
6247 // within a ms or so of correct.
\r
6249 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6250 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6252 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6254 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6255 if ( FAILED( result ) ) {
\r
6256 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6257 errorText_ = errorStream_.str();
\r
6258 error( RtAudioError::SYSTEM_ERROR );
\r
6261 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6262 if ( FAILED( result ) ) {
\r
6263 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6264 errorText_ = errorStream_.str();
\r
6265 error( RtAudioError::SYSTEM_ERROR );
\r
6269 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6270 if ( FAILED( result ) ) {
\r
6271 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6272 errorText_ = errorStream_.str();
\r
6273 error( RtAudioError::SYSTEM_ERROR );
\r
6276 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6277 if ( FAILED( result ) ) {
\r
6278 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6279 errorText_ = errorStream_.str();
\r
6280 error( RtAudioError::SYSTEM_ERROR );
\r
6283 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6287 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6289 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6290 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6291 handle->bufferPointer[1] = safeReadPointer;
\r
6293 else if ( stream_.mode == OUTPUT ) {
\r
6295 // Set the proper nextWritePosition after initial startup.
\r
6296 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6297 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6298 if ( FAILED( result ) ) {
\r
6299 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6300 errorText_ = errorStream_.str();
\r
6301 error( RtAudioError::SYSTEM_ERROR );
\r
6304 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6305 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6308 buffersRolling = true;
\r
6311 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6313 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6315 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6316 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6317 bufferBytes *= formatBytes( stream_.userFormat );
\r
6318 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6321 // Setup parameters and do buffer conversion if necessary.
\r
6322 if ( stream_.doConvertBuffer[0] ) {
\r
6323 buffer = stream_.deviceBuffer;
\r
6324 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6325 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6326 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6329 buffer = stream_.userBuffer[0];
\r
6330 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6331 bufferBytes *= formatBytes( stream_.userFormat );
\r
6334 // No byte swapping necessary in DirectSound implementation.
\r
6336 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6337 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6339 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6340 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6342 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6343 nextWritePointer = handle->bufferPointer[0];
\r
6345 DWORD endWrite, leadPointer;
\r
6347 // Find out where the read and "safe write" pointers are.
\r
6348 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6349 if ( FAILED( result ) ) {
\r
6350 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6351 errorText_ = errorStream_.str();
\r
6352 error( RtAudioError::SYSTEM_ERROR );
\r
6356 // We will copy our output buffer into the region between
\r
6357 // safeWritePointer and leadPointer. If leadPointer is not
\r
6358 // beyond the next endWrite position, wait until it is.
\r
6359 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6360 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6361 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6362 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6363 endWrite = nextWritePointer + bufferBytes;
\r
6365 // Check whether the entire write region is behind the play pointer.
\r
6366 if ( leadPointer >= endWrite ) break;
\r
6368 // If we are here, then we must wait until the leadPointer advances
\r
6369 // beyond the end of our next write region. We use the
\r
6370 // Sleep() function to suspend operation until that happens.
\r
6371 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6372 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6373 if ( millis < 1.0 ) millis = 1.0;
\r
6374 Sleep( (DWORD) millis );
\r
6377 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6378 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6379 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6380 handle->xrun[0] = true;
\r
6381 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6382 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6383 handle->bufferPointer[0] = nextWritePointer;
\r
6384 endWrite = nextWritePointer + bufferBytes;
\r
6387 // Lock free space in the buffer
\r
6388 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6389 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6390 if ( FAILED( result ) ) {
\r
6391 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6392 errorText_ = errorStream_.str();
\r
6393 error( RtAudioError::SYSTEM_ERROR );
\r
6397 // Copy our buffer into the DS buffer
\r
6398 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6399 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6401 // Update our buffer offset and unlock sound buffer
\r
6402 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6403 if ( FAILED( result ) ) {
\r
6404 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6405 errorText_ = errorStream_.str();
\r
6406 error( RtAudioError::SYSTEM_ERROR );
\r
6409 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6410 handle->bufferPointer[0] = nextWritePointer;
\r
6413 // Don't bother draining input
\r
6414 if ( handle->drainCounter ) {
\r
6415 handle->drainCounter++;
\r
6419 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6421 // Setup parameters.
\r
6422 if ( stream_.doConvertBuffer[1] ) {
\r
6423 buffer = stream_.deviceBuffer;
\r
6424 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6425 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6428 buffer = stream_.userBuffer[1];
\r
6429 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6430 bufferBytes *= formatBytes( stream_.userFormat );
\r
6433 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6434 long nextReadPointer = handle->bufferPointer[1];
\r
6435 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6437 // Find out where the write and "safe read" pointers are.
\r
6438 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6439 if ( FAILED( result ) ) {
\r
6440 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6441 errorText_ = errorStream_.str();
\r
6442 error( RtAudioError::SYSTEM_ERROR );
\r
6446 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6447 DWORD endRead = nextReadPointer + bufferBytes;
\r
6449 // Handling depends on whether we are INPUT or DUPLEX.
\r
6450 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6451 // then a wait here will drag the write pointers into the forbidden zone.
\r
6453 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6454 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6455 // practical way to sync up the read and write pointers reliably, given the
\r
6456 // the very complex relationship between phase and increment of the read and write
\r
6459 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6460 // provide a pre-roll period of 0.5 seconds in which we return
\r
6461 // zeros from the read buffer while the pointers sync up.
\r
6463 if ( stream_.mode == DUPLEX ) {
\r
6464 if ( safeReadPointer < endRead ) {
\r
6465 if ( duplexPrerollBytes <= 0 ) {
\r
6466 // Pre-roll time over. Be more agressive.
\r
6467 int adjustment = endRead-safeReadPointer;
\r
6469 handle->xrun[1] = true;
\r
6471 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6472 // and perform fine adjustments later.
\r
6473 // - small adjustments: back off by twice as much.
\r
6474 if ( adjustment >= 2*bufferBytes )
\r
6475 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6477 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6479 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6483 // In pre=roll time. Just do it.
\r
6484 nextReadPointer = safeReadPointer - bufferBytes;
\r
6485 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6487 endRead = nextReadPointer + bufferBytes;
\r
6490 else { // mode == INPUT
\r
6491 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6492 // See comments for playback.
\r
6493 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6494 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6495 if ( millis < 1.0 ) millis = 1.0;
\r
6496 Sleep( (DWORD) millis );
\r
6498 // Wake up and find out where we are now.
\r
6499 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6500 if ( FAILED( result ) ) {
\r
6501 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6502 errorText_ = errorStream_.str();
\r
6503 error( RtAudioError::SYSTEM_ERROR );
\r
6507 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6511 // Lock free space in the buffer
\r
6512 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6513 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6514 if ( FAILED( result ) ) {
\r
6515 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6516 errorText_ = errorStream_.str();
\r
6517 error( RtAudioError::SYSTEM_ERROR );
\r
6521 if ( duplexPrerollBytes <= 0 ) {
\r
6522 // Copy our buffer into the DS buffer
\r
6523 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6524 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6527 memset( buffer, 0, bufferSize1 );
\r
6528 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6529 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6532 // Update our buffer offset and unlock sound buffer
\r
6533 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6534 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6535 if ( FAILED( result ) ) {
\r
6536 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6537 errorText_ = errorStream_.str();
\r
6538 error( RtAudioError::SYSTEM_ERROR );
\r
6541 handle->bufferPointer[1] = nextReadPointer;
\r
6543 // No byte swapping necessary in DirectSound implementation.
\r
6545 // If necessary, convert 8-bit data from unsigned to signed.
\r
6546 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6547 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6549 // Do buffer conversion if necessary.
\r
6550 if ( stream_.doConvertBuffer[1] )
\r
6551 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6555 MUTEX_UNLOCK( &stream_.mutex );
\r
6556 RtApi::tickStreamTime();
\r
6559 // Definitions for utility functions and callbacks
\r
6560 // specific to the DirectSound implementation.
\r
6562 static unsigned __stdcall callbackHandler( void *ptr )
\r
6564 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6565 RtApiDs *object = (RtApiDs *) info->object;
\r
6566 bool* isRunning = &info->isRunning;
\r
6568 while ( *isRunning == true ) {
\r
6569 object->callbackEvent();
\r
6572 _endthreadex( 0 );
\r
6576 #include "tchar.h"
\r
6578 static std::string convertTChar( LPCTSTR name )
\r
6580 #if defined( UNICODE ) || defined( _UNICODE )
\r
6581 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6582 std::string s( length-1, '\0' );
\r
6583 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6585 std::string s( name );
\r
6591 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6592 LPCTSTR description,
\r
6593 LPCTSTR /*module*/,
\r
6594 LPVOID lpContext )
\r
6596 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6597 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6600 bool validDevice = false;
\r
6601 if ( probeInfo.isInput == true ) {
\r
6603 LPDIRECTSOUNDCAPTURE object;
\r
6605 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6606 if ( hr != DS_OK ) return TRUE;
\r
6608 caps.dwSize = sizeof(caps);
\r
6609 hr = object->GetCaps( &caps );
\r
6610 if ( hr == DS_OK ) {
\r
6611 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6612 validDevice = true;
\r
6614 object->Release();
\r
6618 LPDIRECTSOUND object;
\r
6619 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6620 if ( hr != DS_OK ) return TRUE;
\r
6622 caps.dwSize = sizeof(caps);
\r
6623 hr = object->GetCaps( &caps );
\r
6624 if ( hr == DS_OK ) {
\r
6625 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6626 validDevice = true;
\r
6628 object->Release();
\r
6631 // If good device, then save its name and guid.
\r
6632 std::string name = convertTChar( description );
\r
6633 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6634 if ( lpguid == NULL )
\r
6635 name = "Default Device";
\r
6636 if ( validDevice ) {
\r
6637 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6638 if ( dsDevices[i].name == name ) {
\r
6639 dsDevices[i].found = true;
\r
6640 if ( probeInfo.isInput ) {
\r
6641 dsDevices[i].id[1] = lpguid;
\r
6642 dsDevices[i].validId[1] = true;
\r
6645 dsDevices[i].id[0] = lpguid;
\r
6646 dsDevices[i].validId[0] = true;
\r
6653 device.name = name;
\r
6654 device.found = true;
\r
6655 if ( probeInfo.isInput ) {
\r
6656 device.id[1] = lpguid;
\r
6657 device.validId[1] = true;
\r
6660 device.id[0] = lpguid;
\r
6661 device.validId[0] = true;
\r
6663 dsDevices.push_back( device );
\r
6669 static const char* getErrorString( int code )
\r
6673 case DSERR_ALLOCATED:
\r
6674 return "Already allocated";
\r
6676 case DSERR_CONTROLUNAVAIL:
\r
6677 return "Control unavailable";
\r
6679 case DSERR_INVALIDPARAM:
\r
6680 return "Invalid parameter";
\r
6682 case DSERR_INVALIDCALL:
\r
6683 return "Invalid call";
\r
6685 case DSERR_GENERIC:
\r
6686 return "Generic error";
\r
6688 case DSERR_PRIOLEVELNEEDED:
\r
6689 return "Priority level needed";
\r
6691 case DSERR_OUTOFMEMORY:
\r
6692 return "Out of memory";
\r
6694 case DSERR_BADFORMAT:
\r
6695 return "The sample rate or the channel format is not supported";
\r
6697 case DSERR_UNSUPPORTED:
\r
6698 return "Not supported";
\r
6700 case DSERR_NODRIVER:
\r
6701 return "No driver";
\r
6703 case DSERR_ALREADYINITIALIZED:
\r
6704 return "Already initialized";
\r
6706 case DSERR_NOAGGREGATION:
\r
6707 return "No aggregation";
\r
6709 case DSERR_BUFFERLOST:
\r
6710 return "Buffer lost";
\r
6712 case DSERR_OTHERAPPHASPRIO:
\r
6713 return "Another application already has priority";
\r
6715 case DSERR_UNINITIALIZED:
\r
6716 return "Uninitialized";
\r
6719 return "DirectSound unknown error";
\r
6722 //******************** End of __WINDOWS_DS__ *********************//
\r
6726 #if defined(__LINUX_ALSA__)
\r
6728 #include <alsa/asoundlib.h>
\r
6729 #include <unistd.h>
\r
6731 // A structure to hold various information related to the ALSA API
\r
6732 // implementation.
\r
6733 struct AlsaHandle {
\r
6734 snd_pcm_t *handles[2];
\r
6735 bool synchronized;
\r
6737 pthread_cond_t runnable_cv;
\r
6741 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6744 static void *alsaCallbackHandler( void * ptr );
\r
6746 RtApiAlsa :: RtApiAlsa()
\r
6748 // Nothing to do here.
\r
6751 RtApiAlsa :: ~RtApiAlsa()
\r
6753 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6756 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6758 unsigned nDevices = 0;
\r
6759 int result, subdevice, card;
\r
6761 snd_ctl_t *handle;
\r
6763 // Count cards and devices
\r
6765 snd_card_next( &card );
\r
6766 while ( card >= 0 ) {
\r
6767 sprintf( name, "hw:%d", card );
\r
6768 result = snd_ctl_open( &handle, name, 0 );
\r
6769 if ( result < 0 ) {
\r
6770 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6771 errorText_ = errorStream_.str();
\r
6772 error( RtAudioError::WARNING );
\r
6777 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6778 if ( result < 0 ) {
\r
6779 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6780 errorText_ = errorStream_.str();
\r
6781 error( RtAudioError::WARNING );
\r
6784 if ( subdevice < 0 )
\r
6789 snd_ctl_close( handle );
\r
6790 snd_card_next( &card );
\r
6793 result = snd_ctl_open( &handle, "default", 0 );
\r
6794 if (result == 0) {
\r
6796 snd_ctl_close( handle );
\r
6802 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6804 RtAudio::DeviceInfo info;
\r
6805 info.probed = false;
\r
6807 unsigned nDevices = 0;
\r
6808 int result, subdevice, card;
\r
6810 snd_ctl_t *chandle;
\r
6812 // Count cards and devices
\r
6814 snd_card_next( &card );
\r
6815 while ( card >= 0 ) {
\r
6816 sprintf( name, "hw:%d", card );
\r
6817 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6818 if ( result < 0 ) {
\r
6819 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6820 errorText_ = errorStream_.str();
\r
6821 error( RtAudioError::WARNING );
\r
6826 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6827 if ( result < 0 ) {
\r
6828 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6829 errorText_ = errorStream_.str();
\r
6830 error( RtAudioError::WARNING );
\r
6833 if ( subdevice < 0 ) break;
\r
6834 if ( nDevices == device ) {
\r
6835 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6841 snd_ctl_close( chandle );
\r
6842 snd_card_next( &card );
\r
6845 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6846 if ( result == 0 ) {
\r
6847 if ( nDevices == device ) {
\r
6848 strcpy( name, "default" );
\r
6854 if ( nDevices == 0 ) {
\r
6855 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6856 error( RtAudioError::INVALID_USE );
\r
6860 if ( device >= nDevices ) {
\r
6861 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6862 error( RtAudioError::INVALID_USE );
\r
6868 // If a stream is already open, we cannot probe the stream devices.
\r
6869 // Thus, use the saved results.
\r
6870 if ( stream_.state != STREAM_CLOSED &&
\r
6871 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6872 snd_ctl_close( chandle );
\r
6873 if ( device >= devices_.size() ) {
\r
6874 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6875 error( RtAudioError::WARNING );
\r
6878 return devices_[ device ];
\r
6881 int openMode = SND_PCM_ASYNC;
\r
6882 snd_pcm_stream_t stream;
\r
6883 snd_pcm_info_t *pcminfo;
\r
6884 snd_pcm_info_alloca( &pcminfo );
\r
6885 snd_pcm_t *phandle;
\r
6886 snd_pcm_hw_params_t *params;
\r
6887 snd_pcm_hw_params_alloca( ¶ms );
\r
6889 // First try for playback unless default device (which has subdev -1)
\r
6890 stream = SND_PCM_STREAM_PLAYBACK;
\r
6891 snd_pcm_info_set_stream( pcminfo, stream );
\r
6892 if ( subdevice != -1 ) {
\r
6893 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6894 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6896 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6897 if ( result < 0 ) {
\r
6898 // Device probably doesn't support playback.
\r
6899 goto captureProbe;
\r
6903 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6904 if ( result < 0 ) {
\r
6905 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6906 errorText_ = errorStream_.str();
\r
6907 error( RtAudioError::WARNING );
\r
6908 goto captureProbe;
\r
6911 // The device is open ... fill the parameter structure.
\r
6912 result = snd_pcm_hw_params_any( phandle, params );
\r
6913 if ( result < 0 ) {
\r
6914 snd_pcm_close( phandle );
\r
6915 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6916 errorText_ = errorStream_.str();
\r
6917 error( RtAudioError::WARNING );
\r
6918 goto captureProbe;
\r
6921 // Get output channel information.
\r
6922 unsigned int value;
\r
6923 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6924 if ( result < 0 ) {
\r
6925 snd_pcm_close( phandle );
\r
6926 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6927 errorText_ = errorStream_.str();
\r
6928 error( RtAudioError::WARNING );
\r
6929 goto captureProbe;
\r
6931 info.outputChannels = value;
\r
6932 snd_pcm_close( phandle );
\r
6935 stream = SND_PCM_STREAM_CAPTURE;
\r
6936 snd_pcm_info_set_stream( pcminfo, stream );
\r
6938 // Now try for capture unless default device (with subdev = -1)
\r
6939 if ( subdevice != -1 ) {
\r
6940 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6941 snd_ctl_close( chandle );
\r
6942 if ( result < 0 ) {
\r
6943 // Device probably doesn't support capture.
\r
6944 if ( info.outputChannels == 0 ) return info;
\r
6945 goto probeParameters;
\r
6949 snd_ctl_close( chandle );
\r
6951 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6952 if ( result < 0 ) {
\r
6953 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6954 errorText_ = errorStream_.str();
\r
6955 error( RtAudioError::WARNING );
\r
6956 if ( info.outputChannels == 0 ) return info;
\r
6957 goto probeParameters;
\r
6960 // The device is open ... fill the parameter structure.
\r
6961 result = snd_pcm_hw_params_any( phandle, params );
\r
6962 if ( result < 0 ) {
\r
6963 snd_pcm_close( phandle );
\r
6964 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6965 errorText_ = errorStream_.str();
\r
6966 error( RtAudioError::WARNING );
\r
6967 if ( info.outputChannels == 0 ) return info;
\r
6968 goto probeParameters;
\r
6971 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6972 if ( result < 0 ) {
\r
6973 snd_pcm_close( phandle );
\r
6974 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6975 errorText_ = errorStream_.str();
\r
6976 error( RtAudioError::WARNING );
\r
6977 if ( info.outputChannels == 0 ) return info;
\r
6978 goto probeParameters;
\r
6980 info.inputChannels = value;
\r
6981 snd_pcm_close( phandle );
\r
6983 // If device opens for both playback and capture, we determine the channels.
\r
6984 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6985 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6987 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6988 if ( device == 0 && info.outputChannels > 0 )
\r
6989 info.isDefaultOutput = true;
\r
6990 if ( device == 0 && info.inputChannels > 0 )
\r
6991 info.isDefaultInput = true;
\r
6994 // At this point, we just need to figure out the supported data
\r
6995 // formats and sample rates. We'll proceed by opening the device in
\r
6996 // the direction with the maximum number of channels, or playback if
\r
6997 // they are equal. This might limit our sample rate options, but so
\r
7000 if ( info.outputChannels >= info.inputChannels )
\r
7001 stream = SND_PCM_STREAM_PLAYBACK;
\r
7003 stream = SND_PCM_STREAM_CAPTURE;
\r
7004 snd_pcm_info_set_stream( pcminfo, stream );
\r
7006 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7007 if ( result < 0 ) {
\r
7008 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7009 errorText_ = errorStream_.str();
\r
7010 error( RtAudioError::WARNING );
\r
7014 // The device is open ... fill the parameter structure.
\r
7015 result = snd_pcm_hw_params_any( phandle, params );
\r
7016 if ( result < 0 ) {
\r
7017 snd_pcm_close( phandle );
\r
7018 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7019 errorText_ = errorStream_.str();
\r
7020 error( RtAudioError::WARNING );
\r
7024 // Test our discrete set of sample rate values.
\r
7025 info.sampleRates.clear();
\r
7026 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7027 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7028 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7030 if ( info.sampleRates.size() == 0 ) {
\r
7031 snd_pcm_close( phandle );
\r
7032 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7033 errorText_ = errorStream_.str();
\r
7034 error( RtAudioError::WARNING );
\r
7038 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7039 snd_pcm_format_t format;
\r
7040 info.nativeFormats = 0;
\r
7041 format = SND_PCM_FORMAT_S8;
\r
7042 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7043 info.nativeFormats |= RTAUDIO_SINT8;
\r
7044 format = SND_PCM_FORMAT_S16;
\r
7045 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7046 info.nativeFormats |= RTAUDIO_SINT16;
\r
7047 format = SND_PCM_FORMAT_S24;
\r
7048 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7049 info.nativeFormats |= RTAUDIO_SINT24;
\r
7050 format = SND_PCM_FORMAT_S32;
\r
7051 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7052 info.nativeFormats |= RTAUDIO_SINT32;
\r
7053 format = SND_PCM_FORMAT_FLOAT;
\r
7054 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7055 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7056 format = SND_PCM_FORMAT_FLOAT64;
\r
7057 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7058 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7060 // Check that we have at least one supported format
\r
7061 if ( info.nativeFormats == 0 ) {
\r
7062 snd_pcm_close( phandle );
\r
7063 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7064 errorText_ = errorStream_.str();
\r
7065 error( RtAudioError::WARNING );
\r
7069 // Get the device name
\r
7071 result = snd_card_get_name( card, &cardname );
\r
7072 if ( result >= 0 ) {
\r
7073 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7078 // That's all ... close the device and return
\r
7079 snd_pcm_close( phandle );
\r
7080 info.probed = true;
\r
7084 void RtApiAlsa :: saveDeviceInfo( void )
\r
7088 unsigned int nDevices = getDeviceCount();
\r
7089 devices_.resize( nDevices );
\r
7090 for ( unsigned int i=0; i<nDevices; i++ )
\r
7091 devices_[i] = getDeviceInfo( i );
\r
7094 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7095 unsigned int firstChannel, unsigned int sampleRate,
\r
7096 RtAudioFormat format, unsigned int *bufferSize,
\r
7097 RtAudio::StreamOptions *options )
\r
7100 #if defined(__RTAUDIO_DEBUG__)
\r
7101 snd_output_t *out;
\r
7102 snd_output_stdio_attach(&out, stderr, 0);
\r
7105 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7107 unsigned nDevices = 0;
\r
7108 int result, subdevice, card;
\r
7110 snd_ctl_t *chandle;
\r
7112 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7113 snprintf(name, sizeof(name), "%s", "default");
\r
7115 // Count cards and devices
\r
7117 snd_card_next( &card );
\r
7118 while ( card >= 0 ) {
\r
7119 sprintf( name, "hw:%d", card );
\r
7120 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7121 if ( result < 0 ) {
\r
7122 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7123 errorText_ = errorStream_.str();
\r
7128 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7129 if ( result < 0 ) break;
\r
7130 if ( subdevice < 0 ) break;
\r
7131 if ( nDevices == device ) {
\r
7132 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7133 snd_ctl_close( chandle );
\r
7138 snd_ctl_close( chandle );
\r
7139 snd_card_next( &card );
\r
7142 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7143 if ( result == 0 ) {
\r
7144 if ( nDevices == device ) {
\r
7145 strcpy( name, "default" );
\r
7151 if ( nDevices == 0 ) {
\r
7152 // This should not happen because a check is made before this function is called.
\r
7153 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7157 if ( device >= nDevices ) {
\r
7158 // This should not happen because a check is made before this function is called.
\r
7159 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7166 // The getDeviceInfo() function will not work for a device that is
\r
7167 // already open. Thus, we'll probe the system before opening a
\r
7168 // stream and save the results for use by getDeviceInfo().
\r
7169 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7170 this->saveDeviceInfo();
\r
7172 snd_pcm_stream_t stream;
\r
7173 if ( mode == OUTPUT )
\r
7174 stream = SND_PCM_STREAM_PLAYBACK;
\r
7176 stream = SND_PCM_STREAM_CAPTURE;
\r
7178 snd_pcm_t *phandle;
\r
7179 int openMode = SND_PCM_ASYNC;
\r
7180 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7181 if ( result < 0 ) {
\r
7182 if ( mode == OUTPUT )
\r
7183 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7185 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7186 errorText_ = errorStream_.str();
\r
7190 // Fill the parameter structure.
\r
7191 snd_pcm_hw_params_t *hw_params;
\r
7192 snd_pcm_hw_params_alloca( &hw_params );
\r
7193 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7194 if ( result < 0 ) {
\r
7195 snd_pcm_close( phandle );
\r
7196 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7197 errorText_ = errorStream_.str();
\r
7201 #if defined(__RTAUDIO_DEBUG__)
\r
7202 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7203 snd_pcm_hw_params_dump( hw_params, out );
\r
7206 // Set access ... check user preference.
\r
7207 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7208 stream_.userInterleaved = false;
\r
7209 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7210 if ( result < 0 ) {
\r
7211 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7212 stream_.deviceInterleaved[mode] = true;
\r
7215 stream_.deviceInterleaved[mode] = false;
\r
7218 stream_.userInterleaved = true;
\r
7219 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7220 if ( result < 0 ) {
\r
7221 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7222 stream_.deviceInterleaved[mode] = false;
\r
7225 stream_.deviceInterleaved[mode] = true;
\r
7228 if ( result < 0 ) {
\r
7229 snd_pcm_close( phandle );
\r
7230 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7231 errorText_ = errorStream_.str();
\r
7235 // Determine how to set the device format.
\r
7236 stream_.userFormat = format;
\r
7237 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7239 if ( format == RTAUDIO_SINT8 )
\r
7240 deviceFormat = SND_PCM_FORMAT_S8;
\r
7241 else if ( format == RTAUDIO_SINT16 )
\r
7242 deviceFormat = SND_PCM_FORMAT_S16;
\r
7243 else if ( format == RTAUDIO_SINT24 )
\r
7244 deviceFormat = SND_PCM_FORMAT_S24;
\r
7245 else if ( format == RTAUDIO_SINT32 )
\r
7246 deviceFormat = SND_PCM_FORMAT_S32;
\r
7247 else if ( format == RTAUDIO_FLOAT32 )
\r
7248 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7249 else if ( format == RTAUDIO_FLOAT64 )
\r
7250 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7252 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7253 stream_.deviceFormat[mode] = format;
\r
7257 // The user requested format is not natively supported by the device.
\r
7258 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7259 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7260 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7264 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7265 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7266 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7270 deviceFormat = SND_PCM_FORMAT_S32;
\r
7271 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7272 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7276 deviceFormat = SND_PCM_FORMAT_S24;
\r
7277 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7278 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7282 deviceFormat = SND_PCM_FORMAT_S16;
\r
7283 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7284 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7288 deviceFormat = SND_PCM_FORMAT_S8;
\r
7289 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7290 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7294 // If we get here, no supported format was found.
\r
7295 snd_pcm_close( phandle );
\r
7296 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7297 errorText_ = errorStream_.str();
\r
7301 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7302 if ( result < 0 ) {
\r
7303 snd_pcm_close( phandle );
\r
7304 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7305 errorText_ = errorStream_.str();
\r
7309 // Determine whether byte-swaping is necessary.
\r
7310 stream_.doByteSwap[mode] = false;
\r
7311 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7312 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7313 if ( result == 0 )
\r
7314 stream_.doByteSwap[mode] = true;
\r
7315 else if (result < 0) {
\r
7316 snd_pcm_close( phandle );
\r
7317 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7318 errorText_ = errorStream_.str();
\r
7323 // Set the sample rate.
\r
7324 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7325 if ( result < 0 ) {
\r
7326 snd_pcm_close( phandle );
\r
7327 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7328 errorText_ = errorStream_.str();
\r
7332 // Determine the number of channels for this device. We support a possible
\r
7333 // minimum device channel number > than the value requested by the user.
\r
7334 stream_.nUserChannels[mode] = channels;
\r
7335 unsigned int value;
\r
7336 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7337 unsigned int deviceChannels = value;
\r
7338 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7339 snd_pcm_close( phandle );
\r
7340 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7341 errorText_ = errorStream_.str();
\r
7345 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7346 if ( result < 0 ) {
\r
7347 snd_pcm_close( phandle );
\r
7348 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7349 errorText_ = errorStream_.str();
\r
7352 deviceChannels = value;
\r
7353 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7354 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7356 // Set the device channels.
\r
7357 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7358 if ( result < 0 ) {
\r
7359 snd_pcm_close( phandle );
\r
7360 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7361 errorText_ = errorStream_.str();
\r
7365 // Set the buffer (or period) size.
\r
7367 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7368 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7369 if ( result < 0 ) {
\r
7370 snd_pcm_close( phandle );
\r
7371 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7372 errorText_ = errorStream_.str();
\r
7375 *bufferSize = periodSize;
\r
7377 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7378 unsigned int periods = 0;
\r
7379 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7380 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7381 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7382 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7383 if ( result < 0 ) {
\r
7384 snd_pcm_close( phandle );
\r
7385 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7386 errorText_ = errorStream_.str();
\r
7390 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7391 // MUST be the same in both directions!
\r
7392 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7393 snd_pcm_close( phandle );
\r
7394 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7395 errorText_ = errorStream_.str();
\r
7399 stream_.bufferSize = *bufferSize;
\r
7401 // Install the hardware configuration
\r
7402 result = snd_pcm_hw_params( phandle, hw_params );
\r
7403 if ( result < 0 ) {
\r
7404 snd_pcm_close( phandle );
\r
7405 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7406 errorText_ = errorStream_.str();
\r
7410 #if defined(__RTAUDIO_DEBUG__)
\r
7411 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7412 snd_pcm_hw_params_dump( hw_params, out );
\r
7415 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7416 snd_pcm_sw_params_t *sw_params = NULL;
\r
7417 snd_pcm_sw_params_alloca( &sw_params );
\r
7418 snd_pcm_sw_params_current( phandle, sw_params );
\r
7419 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7420 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7421 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7423 // The following two settings were suggested by Theo Veenker
\r
7424 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7425 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7427 // here are two options for a fix
\r
7428 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7429 snd_pcm_uframes_t val;
\r
7430 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7431 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7433 result = snd_pcm_sw_params( phandle, sw_params );
\r
7434 if ( result < 0 ) {
\r
7435 snd_pcm_close( phandle );
\r
7436 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7437 errorText_ = errorStream_.str();
\r
7441 #if defined(__RTAUDIO_DEBUG__)
\r
7442 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7443 snd_pcm_sw_params_dump( sw_params, out );
\r
7446 // Set flags for buffer conversion
\r
7447 stream_.doConvertBuffer[mode] = false;
\r
7448 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7449 stream_.doConvertBuffer[mode] = true;
\r
7450 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7451 stream_.doConvertBuffer[mode] = true;
\r
7452 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7453 stream_.nUserChannels[mode] > 1 )
\r
7454 stream_.doConvertBuffer[mode] = true;
\r
7456 // Allocate the ApiHandle if necessary and then save.
\r
7457 AlsaHandle *apiInfo = 0;
\r
7458 if ( stream_.apiHandle == 0 ) {
\r
7460 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7462 catch ( std::bad_alloc& ) {
\r
7463 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7467 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7468 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7472 stream_.apiHandle = (void *) apiInfo;
\r
7473 apiInfo->handles[0] = 0;
\r
7474 apiInfo->handles[1] = 0;
\r
7477 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7479 apiInfo->handles[mode] = phandle;
\r
7482 // Allocate necessary internal buffers.
\r
7483 unsigned long bufferBytes;
\r
7484 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7485 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7486 if ( stream_.userBuffer[mode] == NULL ) {
\r
7487 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7491 if ( stream_.doConvertBuffer[mode] ) {
\r
7493 bool makeBuffer = true;
\r
7494 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7495 if ( mode == INPUT ) {
\r
7496 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7497 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7498 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7502 if ( makeBuffer ) {
\r
7503 bufferBytes *= *bufferSize;
\r
7504 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7505 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7506 if ( stream_.deviceBuffer == NULL ) {
\r
7507 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7513 stream_.sampleRate = sampleRate;
\r
7514 stream_.nBuffers = periods;
\r
7515 stream_.device[mode] = device;
\r
7516 stream_.state = STREAM_STOPPED;
\r
7518 // Setup the buffer conversion information structure.
\r
7519 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7521 // Setup thread if necessary.
\r
7522 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7523 // We had already set up an output stream.
\r
7524 stream_.mode = DUPLEX;
\r
7525 // Link the streams if possible.
\r
7526 apiInfo->synchronized = false;
\r
7527 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7528 apiInfo->synchronized = true;
\r
7530 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7531 error( RtAudioError::WARNING );
\r
7535 stream_.mode = mode;
\r
7537 // Setup callback thread.
\r
7538 stream_.callbackInfo.object = (void *) this;
\r
7540 // Set the thread attributes for joinable and realtime scheduling
\r
7541 // priority (optional). The higher priority will only take affect
\r
7542 // if the program is run as root or suid. Note, under Linux
\r
7543 // processes with CAP_SYS_NICE privilege, a user can change
\r
7544 // scheduling policy and priority (thus need not be root). See
\r
7545 // POSIX "capabilities".
\r
7546 pthread_attr_t attr;
\r
7547 pthread_attr_init( &attr );
\r
7548 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7550 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7551 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7552 // We previously attempted to increase the audio callback priority
\r
7553 // to SCHED_RR here via the attributes. However, while no errors
\r
7554 // were reported in doing so, it did not work. So, now this is
\r
7555 // done in the alsaCallbackHandler function.
\r
7556 stream_.callbackInfo.doRealtime = true;
\r
7557 int priority = options->priority;
\r
7558 int min = sched_get_priority_min( SCHED_RR );
\r
7559 int max = sched_get_priority_max( SCHED_RR );
\r
7560 if ( priority < min ) priority = min;
\r
7561 else if ( priority > max ) priority = max;
\r
7562 stream_.callbackInfo.priority = priority;
\r
7566 stream_.callbackInfo.isRunning = true;
\r
7567 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7568 pthread_attr_destroy( &attr );
\r
7570 stream_.callbackInfo.isRunning = false;
\r
7571 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7580 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7581 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7582 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7584 stream_.apiHandle = 0;
\r
7587 if ( phandle) snd_pcm_close( phandle );
\r
7589 for ( int i=0; i<2; i++ ) {
\r
7590 if ( stream_.userBuffer[i] ) {
\r
7591 free( stream_.userBuffer[i] );
\r
7592 stream_.userBuffer[i] = 0;
\r
7596 if ( stream_.deviceBuffer ) {
\r
7597 free( stream_.deviceBuffer );
\r
7598 stream_.deviceBuffer = 0;
\r
7601 stream_.state = STREAM_CLOSED;
\r
7605 void RtApiAlsa :: closeStream()
\r
7607 if ( stream_.state == STREAM_CLOSED ) {
\r
7608 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7609 error( RtAudioError::WARNING );
\r
7613 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7614 stream_.callbackInfo.isRunning = false;
\r
7615 MUTEX_LOCK( &stream_.mutex );
\r
7616 if ( stream_.state == STREAM_STOPPED ) {
\r
7617 apiInfo->runnable = true;
\r
7618 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7620 MUTEX_UNLOCK( &stream_.mutex );
\r
7621 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7623 if ( stream_.state == STREAM_RUNNING ) {
\r
7624 stream_.state = STREAM_STOPPED;
\r
7625 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7626 snd_pcm_drop( apiInfo->handles[0] );
\r
7627 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7628 snd_pcm_drop( apiInfo->handles[1] );
\r
7632 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7633 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7634 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7636 stream_.apiHandle = 0;
\r
7639 for ( int i=0; i<2; i++ ) {
\r
7640 if ( stream_.userBuffer[i] ) {
\r
7641 free( stream_.userBuffer[i] );
\r
7642 stream_.userBuffer[i] = 0;
\r
7646 if ( stream_.deviceBuffer ) {
\r
7647 free( stream_.deviceBuffer );
\r
7648 stream_.deviceBuffer = 0;
\r
7651 stream_.mode = UNINITIALIZED;
\r
7652 stream_.state = STREAM_CLOSED;
\r
7655 void RtApiAlsa :: startStream()
\r
7657 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7660 if ( stream_.state == STREAM_RUNNING ) {
\r
7661 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7662 error( RtAudioError::WARNING );
\r
7666 MUTEX_LOCK( &stream_.mutex );
\r
7669 snd_pcm_state_t state;
\r
7670 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7671 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7672 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7673 state = snd_pcm_state( handle[0] );
\r
7674 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7675 result = snd_pcm_prepare( handle[0] );
\r
7676 if ( result < 0 ) {
\r
7677 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7678 errorText_ = errorStream_.str();
\r
7684 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7685 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7686 state = snd_pcm_state( handle[1] );
\r
7687 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7688 result = snd_pcm_prepare( handle[1] );
\r
7689 if ( result < 0 ) {
\r
7690 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7691 errorText_ = errorStream_.str();
\r
7697 stream_.state = STREAM_RUNNING;
\r
7700 apiInfo->runnable = true;
\r
7701 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7702 MUTEX_UNLOCK( &stream_.mutex );
\r
7704 if ( result >= 0 ) return;
\r
7705 error( RtAudioError::SYSTEM_ERROR );
\r
7708 void RtApiAlsa :: stopStream()
\r
7711 if ( stream_.state == STREAM_STOPPED ) {
\r
7712 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7713 error( RtAudioError::WARNING );
\r
7717 stream_.state = STREAM_STOPPED;
\r
7718 MUTEX_LOCK( &stream_.mutex );
\r
7721 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7722 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7723 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7724 if ( apiInfo->synchronized )
\r
7725 result = snd_pcm_drop( handle[0] );
\r
7727 result = snd_pcm_drain( handle[0] );
\r
7728 if ( result < 0 ) {
\r
7729 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7730 errorText_ = errorStream_.str();
\r
7735 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7736 result = snd_pcm_drop( handle[1] );
\r
7737 if ( result < 0 ) {
\r
7738 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7739 errorText_ = errorStream_.str();
\r
7745 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7746 MUTEX_UNLOCK( &stream_.mutex );
\r
7748 if ( result >= 0 ) return;
\r
7749 error( RtAudioError::SYSTEM_ERROR );
\r
7752 void RtApiAlsa :: abortStream()
\r
7755 if ( stream_.state == STREAM_STOPPED ) {
\r
7756 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7757 error( RtAudioError::WARNING );
\r
7761 stream_.state = STREAM_STOPPED;
\r
7762 MUTEX_LOCK( &stream_.mutex );
\r
7765 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7766 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7767 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7768 result = snd_pcm_drop( handle[0] );
\r
7769 if ( result < 0 ) {
\r
7770 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7771 errorText_ = errorStream_.str();
\r
7776 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7777 result = snd_pcm_drop( handle[1] );
\r
7778 if ( result < 0 ) {
\r
7779 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7780 errorText_ = errorStream_.str();
\r
7786 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7787 MUTEX_UNLOCK( &stream_.mutex );
\r
7789 if ( result >= 0 ) return;
\r
7790 error( RtAudioError::SYSTEM_ERROR );
\r
7793 void RtApiAlsa :: callbackEvent()
\r
7795 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7796 if ( stream_.state == STREAM_STOPPED ) {
\r
7797 MUTEX_LOCK( &stream_.mutex );
\r
7798 while ( !apiInfo->runnable )
\r
7799 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7801 if ( stream_.state != STREAM_RUNNING ) {
\r
7802 MUTEX_UNLOCK( &stream_.mutex );
\r
7805 MUTEX_UNLOCK( &stream_.mutex );
\r
7808 if ( stream_.state == STREAM_CLOSED ) {
\r
7809 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7810 error( RtAudioError::WARNING );
\r
7814 int doStopStream = 0;
\r
7815 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7816 double streamTime = getStreamTime();
\r
7817 RtAudioStreamStatus status = 0;
\r
7818 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7819 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7820 apiInfo->xrun[0] = false;
\r
7822 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7823 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7824 apiInfo->xrun[1] = false;
\r
7826 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7827 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7829 if ( doStopStream == 2 ) {
\r
7834 MUTEX_LOCK( &stream_.mutex );
\r
7836 // The state might change while waiting on a mutex.
\r
7837 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7842 snd_pcm_t **handle;
\r
7843 snd_pcm_sframes_t frames;
\r
7844 RtAudioFormat format;
\r
7845 handle = (snd_pcm_t **) apiInfo->handles;
\r
7847 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7849 // Setup parameters.
\r
7850 if ( stream_.doConvertBuffer[1] ) {
\r
7851 buffer = stream_.deviceBuffer;
\r
7852 channels = stream_.nDeviceChannels[1];
\r
7853 format = stream_.deviceFormat[1];
\r
7856 buffer = stream_.userBuffer[1];
\r
7857 channels = stream_.nUserChannels[1];
\r
7858 format = stream_.userFormat;
\r
7861 // Read samples from device in interleaved/non-interleaved format.
\r
7862 if ( stream_.deviceInterleaved[1] )
\r
7863 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7865 void *bufs[channels];
\r
7866 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7867 for ( int i=0; i<channels; i++ )
\r
7868 bufs[i] = (void *) (buffer + (i * offset));
\r
7869 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7872 if ( result < (int) stream_.bufferSize ) {
\r
7873 // Either an error or overrun occured.
\r
7874 if ( result == -EPIPE ) {
\r
7875 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7876 if ( state == SND_PCM_STATE_XRUN ) {
\r
7877 apiInfo->xrun[1] = true;
\r
7878 result = snd_pcm_prepare( handle[1] );
\r
7879 if ( result < 0 ) {
\r
7880 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7881 errorText_ = errorStream_.str();
\r
7885 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7886 errorText_ = errorStream_.str();
\r
7890 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7891 errorText_ = errorStream_.str();
\r
7893 error( RtAudioError::WARNING );
\r
7897 // Do byte swapping if necessary.
\r
7898 if ( stream_.doByteSwap[1] )
\r
7899 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7901 // Do buffer conversion if necessary.
\r
7902 if ( stream_.doConvertBuffer[1] )
\r
7903 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7905 // Check stream latency
\r
7906 result = snd_pcm_delay( handle[1], &frames );
\r
7907 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7912 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7914 // Setup parameters and do buffer conversion if necessary.
\r
7915 if ( stream_.doConvertBuffer[0] ) {
\r
7916 buffer = stream_.deviceBuffer;
\r
7917 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7918 channels = stream_.nDeviceChannels[0];
\r
7919 format = stream_.deviceFormat[0];
\r
7922 buffer = stream_.userBuffer[0];
\r
7923 channels = stream_.nUserChannels[0];
\r
7924 format = stream_.userFormat;
\r
7927 // Do byte swapping if necessary.
\r
7928 if ( stream_.doByteSwap[0] )
\r
7929 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7931 // Write samples to device in interleaved/non-interleaved format.
\r
7932 if ( stream_.deviceInterleaved[0] )
\r
7933 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7935 void *bufs[channels];
\r
7936 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7937 for ( int i=0; i<channels; i++ )
\r
7938 bufs[i] = (void *) (buffer + (i * offset));
\r
7939 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7942 if ( result < (int) stream_.bufferSize ) {
\r
7943 // Either an error or underrun occured.
\r
7944 if ( result == -EPIPE ) {
\r
7945 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7946 if ( state == SND_PCM_STATE_XRUN ) {
\r
7947 apiInfo->xrun[0] = true;
\r
7948 result = snd_pcm_prepare( handle[0] );
\r
7949 if ( result < 0 ) {
\r
7950 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7951 errorText_ = errorStream_.str();
\r
7955 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7956 errorText_ = errorStream_.str();
\r
7960 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7961 errorText_ = errorStream_.str();
\r
7963 error( RtAudioError::WARNING );
\r
7967 // Check stream latency
\r
7968 result = snd_pcm_delay( handle[0], &frames );
\r
7969 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7973 MUTEX_UNLOCK( &stream_.mutex );
\r
7975 RtApi::tickStreamTime();
\r
7976 if ( doStopStream == 1 ) this->stopStream();
\r
7979 static void *alsaCallbackHandler( void *ptr )
\r
7981 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7982 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7983 bool *isRunning = &info->isRunning;
\r
7985 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7986 if ( &info->doRealtime ) {
\r
7987 pthread_t tID = pthread_self(); // ID of this thread
\r
7988 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7989 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7993 while ( *isRunning == true ) {
\r
7994 pthread_testcancel();
\r
7995 object->callbackEvent();
\r
7998 pthread_exit( NULL );
\r
8001 //******************** End of __LINUX_ALSA__ *********************//
\r
8004 #if defined(__LINUX_PULSE__)
\r
8006 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8007 // and Tristan Matthews.
\r
8009 #include <pulse/error.h>
\r
8010 #include <pulse/simple.h>
\r
8013 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8014 44100, 48000, 96000, 0};
\r
8016 struct rtaudio_pa_format_mapping_t {
\r
8017 RtAudioFormat rtaudio_format;
\r
8018 pa_sample_format_t pa_format;
\r
8021 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8022 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8023 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8024 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8025 {0, PA_SAMPLE_INVALID}};
\r
8027 struct PulseAudioHandle {
\r
8028 pa_simple *s_play;
\r
8031 pthread_cond_t runnable_cv;
\r
8033 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8036 RtApiPulse::~RtApiPulse()
\r
8038 if ( stream_.state != STREAM_CLOSED )
\r
8042 unsigned int RtApiPulse::getDeviceCount( void )
\r
8047 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8049 RtAudio::DeviceInfo info;
\r
8050 info.probed = true;
\r
8051 info.name = "PulseAudio";
\r
8052 info.outputChannels = 2;
\r
8053 info.inputChannels = 2;
\r
8054 info.duplexChannels = 2;
\r
8055 info.isDefaultOutput = true;
\r
8056 info.isDefaultInput = true;
\r
8058 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8059 info.sampleRates.push_back( *sr );
\r
8061 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8066 static void *pulseaudio_callback( void * user )
\r
8068 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8069 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8070 volatile bool *isRunning = &cbi->isRunning;
\r
8072 while ( *isRunning ) {
\r
8073 pthread_testcancel();
\r
8074 context->callbackEvent();
\r
8077 pthread_exit( NULL );
\r
8080 void RtApiPulse::closeStream( void )
\r
8082 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8084 stream_.callbackInfo.isRunning = false;
\r
8086 MUTEX_LOCK( &stream_.mutex );
\r
8087 if ( stream_.state == STREAM_STOPPED ) {
\r
8088 pah->runnable = true;
\r
8089 pthread_cond_signal( &pah->runnable_cv );
\r
8091 MUTEX_UNLOCK( &stream_.mutex );
\r
8093 pthread_join( pah->thread, 0 );
\r
8094 if ( pah->s_play ) {
\r
8095 pa_simple_flush( pah->s_play, NULL );
\r
8096 pa_simple_free( pah->s_play );
\r
8099 pa_simple_free( pah->s_rec );
\r
8101 pthread_cond_destroy( &pah->runnable_cv );
\r
8103 stream_.apiHandle = 0;
\r
8106 if ( stream_.userBuffer[0] ) {
\r
8107 free( stream_.userBuffer[0] );
\r
8108 stream_.userBuffer[0] = 0;
\r
8110 if ( stream_.userBuffer[1] ) {
\r
8111 free( stream_.userBuffer[1] );
\r
8112 stream_.userBuffer[1] = 0;
\r
8115 stream_.state = STREAM_CLOSED;
\r
8116 stream_.mode = UNINITIALIZED;
\r
8119 void RtApiPulse::callbackEvent( void )
\r
8121 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8123 if ( stream_.state == STREAM_STOPPED ) {
\r
8124 MUTEX_LOCK( &stream_.mutex );
\r
8125 while ( !pah->runnable )
\r
8126 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8128 if ( stream_.state != STREAM_RUNNING ) {
\r
8129 MUTEX_UNLOCK( &stream_.mutex );
\r
8132 MUTEX_UNLOCK( &stream_.mutex );
\r
8135 if ( stream_.state == STREAM_CLOSED ) {
\r
8136 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8137 "this shouldn't happen!";
\r
8138 error( RtAudioError::WARNING );
\r
8142 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8143 double streamTime = getStreamTime();
\r
8144 RtAudioStreamStatus status = 0;
\r
8145 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8146 stream_.bufferSize, streamTime, status,
\r
8147 stream_.callbackInfo.userData );
\r
8149 if ( doStopStream == 2 ) {
\r
8154 MUTEX_LOCK( &stream_.mutex );
\r
8155 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8156 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8158 if ( stream_.state != STREAM_RUNNING )
\r
8163 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8164 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8165 convertBuffer( stream_.deviceBuffer,
\r
8166 stream_.userBuffer[OUTPUT],
\r
8167 stream_.convertInfo[OUTPUT] );
\r
8168 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8169 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8171 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8172 formatBytes( stream_.userFormat );
\r
8174 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8175 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8176 pa_strerror( pa_error ) << ".";
\r
8177 errorText_ = errorStream_.str();
\r
8178 error( RtAudioError::WARNING );
\r
8182 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8183 if ( stream_.doConvertBuffer[INPUT] )
\r
8184 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8185 formatBytes( stream_.deviceFormat[INPUT] );
\r
8187 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8188 formatBytes( stream_.userFormat );
\r
8190 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8191 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8192 pa_strerror( pa_error ) << ".";
\r
8193 errorText_ = errorStream_.str();
\r
8194 error( RtAudioError::WARNING );
\r
8196 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8197 convertBuffer( stream_.userBuffer[INPUT],
\r
8198 stream_.deviceBuffer,
\r
8199 stream_.convertInfo[INPUT] );
\r
8204 MUTEX_UNLOCK( &stream_.mutex );
\r
8205 RtApi::tickStreamTime();
\r
8207 if ( doStopStream == 1 )
\r
8211 void RtApiPulse::startStream( void )
\r
8213 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8215 if ( stream_.state == STREAM_CLOSED ) {
\r
8216 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8217 error( RtAudioError::INVALID_USE );
\r
8220 if ( stream_.state == STREAM_RUNNING ) {
\r
8221 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8222 error( RtAudioError::WARNING );
\r
8226 MUTEX_LOCK( &stream_.mutex );
\r
8228 stream_.state = STREAM_RUNNING;
\r
8230 pah->runnable = true;
\r
8231 pthread_cond_signal( &pah->runnable_cv );
\r
8232 MUTEX_UNLOCK( &stream_.mutex );
\r
8235 void RtApiPulse::stopStream( void )
\r
8237 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8239 if ( stream_.state == STREAM_CLOSED ) {
\r
8240 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8241 error( RtAudioError::INVALID_USE );
\r
8244 if ( stream_.state == STREAM_STOPPED ) {
\r
8245 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8246 error( RtAudioError::WARNING );
\r
8250 stream_.state = STREAM_STOPPED;
\r
8251 MUTEX_LOCK( &stream_.mutex );
\r
8253 if ( pah && pah->s_play ) {
\r
8255 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8256 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8257 pa_strerror( pa_error ) << ".";
\r
8258 errorText_ = errorStream_.str();
\r
8259 MUTEX_UNLOCK( &stream_.mutex );
\r
8260 error( RtAudioError::SYSTEM_ERROR );
\r
8265 stream_.state = STREAM_STOPPED;
\r
8266 MUTEX_UNLOCK( &stream_.mutex );
\r
8269 void RtApiPulse::abortStream( void )
\r
8271 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8273 if ( stream_.state == STREAM_CLOSED ) {
\r
8274 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8275 error( RtAudioError::INVALID_USE );
\r
8278 if ( stream_.state == STREAM_STOPPED ) {
\r
8279 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8280 error( RtAudioError::WARNING );
\r
8284 stream_.state = STREAM_STOPPED;
\r
8285 MUTEX_LOCK( &stream_.mutex );
\r
8287 if ( pah && pah->s_play ) {
\r
8289 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8290 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8291 pa_strerror( pa_error ) << ".";
\r
8292 errorText_ = errorStream_.str();
\r
8293 MUTEX_UNLOCK( &stream_.mutex );
\r
8294 error( RtAudioError::SYSTEM_ERROR );
\r
8299 stream_.state = STREAM_STOPPED;
\r
8300 MUTEX_UNLOCK( &stream_.mutex );
\r
8303 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8304 unsigned int channels, unsigned int firstChannel,
\r
8305 unsigned int sampleRate, RtAudioFormat format,
\r
8306 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8308 PulseAudioHandle *pah = 0;
\r
8309 unsigned long bufferBytes = 0;
\r
8310 pa_sample_spec ss;
\r
8312 if ( device != 0 ) return false;
\r
8313 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8314 if ( channels != 1 && channels != 2 ) {
\r
8315 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8318 ss.channels = channels;
\r
8320 if ( firstChannel != 0 ) return false;
\r
8322 bool sr_found = false;
\r
8323 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8324 if ( sampleRate == *sr ) {
\r
8326 stream_.sampleRate = sampleRate;
\r
8327 ss.rate = sampleRate;
\r
8331 if ( !sr_found ) {
\r
8332 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8336 bool sf_found = 0;
\r
8337 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8338 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8339 if ( format == sf->rtaudio_format ) {
\r
8341 stream_.userFormat = sf->rtaudio_format;
\r
8342 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8343 ss.format = sf->pa_format;
\r
8347 if ( !sf_found ) { // Use internal data format conversion.
\r
8348 stream_.userFormat = format;
\r
8349 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8350 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8353 // Set other stream parameters.
\r
8354 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8355 else stream_.userInterleaved = true;
\r
8356 stream_.deviceInterleaved[mode] = true;
\r
8357 stream_.nBuffers = 1;
\r
8358 stream_.doByteSwap[mode] = false;
\r
8359 stream_.nUserChannels[mode] = channels;
\r
8360 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8361 stream_.channelOffset[mode] = 0;
\r
8362 std::string streamName = "RtAudio";
\r
8364 // Set flags for buffer conversion.
\r
8365 stream_.doConvertBuffer[mode] = false;
\r
8366 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8367 stream_.doConvertBuffer[mode] = true;
\r
8368 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8369 stream_.doConvertBuffer[mode] = true;
\r
8371 // Allocate necessary internal buffers.
\r
8372 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8373 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8374 if ( stream_.userBuffer[mode] == NULL ) {
\r
8375 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8378 stream_.bufferSize = *bufferSize;
\r
8380 if ( stream_.doConvertBuffer[mode] ) {
\r
8382 bool makeBuffer = true;
\r
8383 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8384 if ( mode == INPUT ) {
\r
8385 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8386 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8387 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8391 if ( makeBuffer ) {
\r
8392 bufferBytes *= *bufferSize;
\r
8393 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8394 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8395 if ( stream_.deviceBuffer == NULL ) {
\r
8396 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8402 stream_.device[mode] = device;
\r
8404 // Setup the buffer conversion information structure.
\r
8405 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8407 if ( !stream_.apiHandle ) {
\r
8408 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8410 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8414 stream_.apiHandle = pah;
\r
8415 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8416 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8420 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8423 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8426 pa_buffer_attr buffer_attr;
\r
8427 buffer_attr.fragsize = bufferBytes;
\r
8428 buffer_attr.maxlength = -1;
\r
8430 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8431 if ( !pah->s_rec ) {
\r
8432 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8437 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8438 if ( !pah->s_play ) {
\r
8439 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8447 if ( stream_.mode == UNINITIALIZED )
\r
8448 stream_.mode = mode;
\r
8449 else if ( stream_.mode == mode )
\r
8452 stream_.mode = DUPLEX;
\r
8454 if ( !stream_.callbackInfo.isRunning ) {
\r
8455 stream_.callbackInfo.object = this;
\r
8456 stream_.callbackInfo.isRunning = true;
\r
8457 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8458 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8463 stream_.state = STREAM_STOPPED;
\r
8467 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8468 pthread_cond_destroy( &pah->runnable_cv );
\r
8470 stream_.apiHandle = 0;
\r
8473 for ( int i=0; i<2; i++ ) {
\r
8474 if ( stream_.userBuffer[i] ) {
\r
8475 free( stream_.userBuffer[i] );
\r
8476 stream_.userBuffer[i] = 0;
\r
8480 if ( stream_.deviceBuffer ) {
\r
8481 free( stream_.deviceBuffer );
\r
8482 stream_.deviceBuffer = 0;
\r
8488 //******************** End of __LINUX_PULSE__ *********************//
\r
8491 #if defined(__LINUX_OSS__)
\r
8493 #include <unistd.h>
\r
8494 #include <sys/ioctl.h>
\r
8495 #include <unistd.h>
\r
8496 #include <fcntl.h>
\r
8497 #include <sys/soundcard.h>
\r
8498 #include <errno.h>
\r
8501 static void *ossCallbackHandler(void * ptr);
\r
8503 // A structure to hold various information related to the OSS API
\r
8504 // implementation.
\r
8505 struct OssHandle {
\r
8506 int id[2]; // device ids
\r
8509 pthread_cond_t runnable;
\r
8512 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8515 RtApiOss :: RtApiOss()
\r
8517 // Nothing to do here.
\r
8520 RtApiOss :: ~RtApiOss()
\r
8522 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8525 unsigned int RtApiOss :: getDeviceCount( void )
\r
8527 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8528 if ( mixerfd == -1 ) {
\r
8529 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8530 error( RtAudioError::WARNING );
\r
8534 oss_sysinfo sysinfo;
\r
8535 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8537 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8538 error( RtAudioError::WARNING );
\r
8543 return sysinfo.numaudios;
\r
8546 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8548 RtAudio::DeviceInfo info;
\r
8549 info.probed = false;
\r
8551 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8552 if ( mixerfd == -1 ) {
\r
8553 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8554 error( RtAudioError::WARNING );
\r
8558 oss_sysinfo sysinfo;
\r
8559 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8560 if ( result == -1 ) {
\r
8562 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8563 error( RtAudioError::WARNING );
\r
8567 unsigned nDevices = sysinfo.numaudios;
\r
8568 if ( nDevices == 0 ) {
\r
8570 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8571 error( RtAudioError::INVALID_USE );
\r
8575 if ( device >= nDevices ) {
\r
8577 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8578 error( RtAudioError::INVALID_USE );
\r
8582 oss_audioinfo ainfo;
\r
8583 ainfo.dev = device;
\r
8584 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8586 if ( result == -1 ) {
\r
8587 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8588 errorText_ = errorStream_.str();
\r
8589 error( RtAudioError::WARNING );
\r
8594 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8595 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8596 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8597 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8598 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8601 // Probe data formats ... do for input
\r
8602 unsigned long mask = ainfo.iformats;
\r
8603 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8604 info.nativeFormats |= RTAUDIO_SINT16;
\r
8605 if ( mask & AFMT_S8 )
\r
8606 info.nativeFormats |= RTAUDIO_SINT8;
\r
8607 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8608 info.nativeFormats |= RTAUDIO_SINT32;
\r
8609 if ( mask & AFMT_FLOAT )
\r
8610 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8611 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8612 info.nativeFormats |= RTAUDIO_SINT24;
\r
8614 // Check that we have at least one supported format
\r
8615 if ( info.nativeFormats == 0 ) {
\r
8616 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8617 errorText_ = errorStream_.str();
\r
8618 error( RtAudioError::WARNING );
\r
8622 // Probe the supported sample rates.
\r
8623 info.sampleRates.clear();
\r
8624 if ( ainfo.nrates ) {
\r
8625 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8626 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8627 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8628 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8635 // Check min and max rate values;
\r
8636 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8637 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8638 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8642 if ( info.sampleRates.size() == 0 ) {
\r
8643 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8644 errorText_ = errorStream_.str();
\r
8645 error( RtAudioError::WARNING );
\r
8648 info.probed = true;
\r
8649 info.name = ainfo.name;
\r
8656 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8657 unsigned int firstChannel, unsigned int sampleRate,
\r
8658 RtAudioFormat format, unsigned int *bufferSize,
\r
8659 RtAudio::StreamOptions *options )
\r
8661 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8662 if ( mixerfd == -1 ) {
\r
8663 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8667 oss_sysinfo sysinfo;
\r
8668 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8669 if ( result == -1 ) {
\r
8671 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8675 unsigned nDevices = sysinfo.numaudios;
\r
8676 if ( nDevices == 0 ) {
\r
8677 // This should not happen because a check is made before this function is called.
\r
8679 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8683 if ( device >= nDevices ) {
\r
8684 // This should not happen because a check is made before this function is called.
\r
8686 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8690 oss_audioinfo ainfo;
\r
8691 ainfo.dev = device;
\r
8692 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8694 if ( result == -1 ) {
\r
8695 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8696 errorText_ = errorStream_.str();
\r
8700 // Check if device supports input or output
\r
8701 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8702 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8703 if ( mode == OUTPUT )
\r
8704 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8706 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8707 errorText_ = errorStream_.str();
\r
8712 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8713 if ( mode == OUTPUT )
\r
8714 flags |= O_WRONLY;
\r
8715 else { // mode == INPUT
\r
8716 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8717 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8718 close( handle->id[0] );
\r
8719 handle->id[0] = 0;
\r
8720 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8721 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8722 errorText_ = errorStream_.str();
\r
8725 // Check that the number previously set channels is the same.
\r
8726 if ( stream_.nUserChannels[0] != channels ) {
\r
8727 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8728 errorText_ = errorStream_.str();
\r
8734 flags |= O_RDONLY;
\r
8737 // Set exclusive access if specified.
\r
8738 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8740 // Try to open the device.
\r
8742 fd = open( ainfo.devnode, flags, 0 );
\r
8744 if ( errno == EBUSY )
\r
8745 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8747 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8748 errorText_ = errorStream_.str();
\r
8752 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8754 if ( flags | O_RDWR ) {
\r
8755 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8756 if ( result == -1) {
\r
8757 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8758 errorText_ = errorStream_.str();
\r
8764 // Check the device channel support.
\r
8765 stream_.nUserChannels[mode] = channels;
\r
8766 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8768 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8769 errorText_ = errorStream_.str();
\r
8773 // Set the number of channels.
\r
8774 int deviceChannels = channels + firstChannel;
\r
8775 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8776 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8778 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8779 errorText_ = errorStream_.str();
\r
8782 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8784 // Get the data format mask
\r
8786 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8787 if ( result == -1 ) {
\r
8789 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8790 errorText_ = errorStream_.str();
\r
8794 // Determine how to set the device format.
\r
8795 stream_.userFormat = format;
\r
8796 int deviceFormat = -1;
\r
8797 stream_.doByteSwap[mode] = false;
\r
8798 if ( format == RTAUDIO_SINT8 ) {
\r
8799 if ( mask & AFMT_S8 ) {
\r
8800 deviceFormat = AFMT_S8;
\r
8801 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8804 else if ( format == RTAUDIO_SINT16 ) {
\r
8805 if ( mask & AFMT_S16_NE ) {
\r
8806 deviceFormat = AFMT_S16_NE;
\r
8807 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8809 else if ( mask & AFMT_S16_OE ) {
\r
8810 deviceFormat = AFMT_S16_OE;
\r
8811 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8812 stream_.doByteSwap[mode] = true;
\r
8815 else if ( format == RTAUDIO_SINT24 ) {
\r
8816 if ( mask & AFMT_S24_NE ) {
\r
8817 deviceFormat = AFMT_S24_NE;
\r
8818 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8820 else if ( mask & AFMT_S24_OE ) {
\r
8821 deviceFormat = AFMT_S24_OE;
\r
8822 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8823 stream_.doByteSwap[mode] = true;
\r
8826 else if ( format == RTAUDIO_SINT32 ) {
\r
8827 if ( mask & AFMT_S32_NE ) {
\r
8828 deviceFormat = AFMT_S32_NE;
\r
8829 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8831 else if ( mask & AFMT_S32_OE ) {
\r
8832 deviceFormat = AFMT_S32_OE;
\r
8833 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8834 stream_.doByteSwap[mode] = true;
\r
8838 if ( deviceFormat == -1 ) {
\r
8839 // The user requested format is not natively supported by the device.
\r
8840 if ( mask & AFMT_S16_NE ) {
\r
8841 deviceFormat = AFMT_S16_NE;
\r
8842 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8844 else if ( mask & AFMT_S32_NE ) {
\r
8845 deviceFormat = AFMT_S32_NE;
\r
8846 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8848 else if ( mask & AFMT_S24_NE ) {
\r
8849 deviceFormat = AFMT_S24_NE;
\r
8850 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8852 else if ( mask & AFMT_S16_OE ) {
\r
8853 deviceFormat = AFMT_S16_OE;
\r
8854 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8855 stream_.doByteSwap[mode] = true;
\r
8857 else if ( mask & AFMT_S32_OE ) {
\r
8858 deviceFormat = AFMT_S32_OE;
\r
8859 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8860 stream_.doByteSwap[mode] = true;
\r
8862 else if ( mask & AFMT_S24_OE ) {
\r
8863 deviceFormat = AFMT_S24_OE;
\r
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8865 stream_.doByteSwap[mode] = true;
\r
8867 else if ( mask & AFMT_S8) {
\r
8868 deviceFormat = AFMT_S8;
\r
8869 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8873 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8874 // This really shouldn't happen ...
\r
8876 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8877 errorText_ = errorStream_.str();
\r
8881 // Set the data format.
\r
8882 int temp = deviceFormat;
\r
8883 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8884 if ( result == -1 || deviceFormat != temp ) {
\r
8886 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8887 errorText_ = errorStream_.str();
\r
8891 // Attempt to set the buffer size. According to OSS, the minimum
\r
8892 // number of buffers is two. The supposed minimum buffer size is 16
\r
8893 // bytes, so that will be our lower bound. The argument to this
\r
8894 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8895 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8896 // We'll check the actual value used near the end of the setup
\r
8898 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8899 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8901 if ( options ) buffers = options->numberOfBuffers;
\r
8902 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8903 if ( buffers < 2 ) buffers = 3;
\r
8904 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8905 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8906 if ( result == -1 ) {
\r
8908 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8909 errorText_ = errorStream_.str();
\r
8912 stream_.nBuffers = buffers;
\r
8914 // Save buffer size (in sample frames).
\r
8915 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8916 stream_.bufferSize = *bufferSize;
\r
8918 // Set the sample rate.
\r
8919 int srate = sampleRate;
\r
8920 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8921 if ( result == -1 ) {
\r
8923 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8924 errorText_ = errorStream_.str();
\r
8928 // Verify the sample rate setup worked.
\r
8929 if ( abs( srate - sampleRate ) > 100 ) {
\r
8931 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8932 errorText_ = errorStream_.str();
\r
8935 stream_.sampleRate = sampleRate;
\r
8937 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8938 // We're doing duplex setup here.
\r
8939 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8940 stream_.nDeviceChannels[0] = deviceChannels;
\r
8943 // Set interleaving parameters.
\r
8944 stream_.userInterleaved = true;
\r
8945 stream_.deviceInterleaved[mode] = true;
\r
8946 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8947 stream_.userInterleaved = false;
\r
8949 // Set flags for buffer conversion
\r
8950 stream_.doConvertBuffer[mode] = false;
\r
8951 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8952 stream_.doConvertBuffer[mode] = true;
\r
8953 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8954 stream_.doConvertBuffer[mode] = true;
\r
8955 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8956 stream_.nUserChannels[mode] > 1 )
\r
8957 stream_.doConvertBuffer[mode] = true;
\r
8959 // Allocate the stream handles if necessary and then save.
\r
8960 if ( stream_.apiHandle == 0 ) {
\r
8962 handle = new OssHandle;
\r
8964 catch ( std::bad_alloc& ) {
\r
8965 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8969 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8970 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8974 stream_.apiHandle = (void *) handle;
\r
8977 handle = (OssHandle *) stream_.apiHandle;
\r
8979 handle->id[mode] = fd;
\r
8981 // Allocate necessary internal buffers.
\r
8982 unsigned long bufferBytes;
\r
8983 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8984 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8985 if ( stream_.userBuffer[mode] == NULL ) {
\r
8986 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8990 if ( stream_.doConvertBuffer[mode] ) {
\r
8992 bool makeBuffer = true;
\r
8993 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8994 if ( mode == INPUT ) {
\r
8995 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8996 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8997 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9001 if ( makeBuffer ) {
\r
9002 bufferBytes *= *bufferSize;
\r
9003 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9004 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9005 if ( stream_.deviceBuffer == NULL ) {
\r
9006 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9012 stream_.device[mode] = device;
\r
9013 stream_.state = STREAM_STOPPED;
\r
9015 // Setup the buffer conversion information structure.
\r
9016 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9018 // Setup thread if necessary.
\r
9019 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9020 // We had already set up an output stream.
\r
9021 stream_.mode = DUPLEX;
\r
9022 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9025 stream_.mode = mode;
\r
9027 // Setup callback thread.
\r
9028 stream_.callbackInfo.object = (void *) this;
\r
9030 // Set the thread attributes for joinable and realtime scheduling
\r
9031 // priority. The higher priority will only take affect if the
\r
9032 // program is run as root or suid.
\r
9033 pthread_attr_t attr;
\r
9034 pthread_attr_init( &attr );
\r
9035 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9036 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9037 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9038 struct sched_param param;
\r
9039 int priority = options->priority;
\r
9040 int min = sched_get_priority_min( SCHED_RR );
\r
9041 int max = sched_get_priority_max( SCHED_RR );
\r
9042 if ( priority < min ) priority = min;
\r
9043 else if ( priority > max ) priority = max;
\r
9044 param.sched_priority = priority;
\r
9045 pthread_attr_setschedparam( &attr, ¶m );
\r
9046 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9049 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9051 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9054 stream_.callbackInfo.isRunning = true;
\r
9055 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9056 pthread_attr_destroy( &attr );
\r
9058 stream_.callbackInfo.isRunning = false;
\r
9059 errorText_ = "RtApiOss::error creating callback thread!";
\r
9068 pthread_cond_destroy( &handle->runnable );
\r
9069 if ( handle->id[0] ) close( handle->id[0] );
\r
9070 if ( handle->id[1] ) close( handle->id[1] );
\r
9072 stream_.apiHandle = 0;
\r
9075 for ( int i=0; i<2; i++ ) {
\r
9076 if ( stream_.userBuffer[i] ) {
\r
9077 free( stream_.userBuffer[i] );
\r
9078 stream_.userBuffer[i] = 0;
\r
9082 if ( stream_.deviceBuffer ) {
\r
9083 free( stream_.deviceBuffer );
\r
9084 stream_.deviceBuffer = 0;
\r
9090 void RtApiOss :: closeStream()
\r
9092 if ( stream_.state == STREAM_CLOSED ) {
\r
9093 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9094 error( RtAudioError::WARNING );
\r
9098 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9099 stream_.callbackInfo.isRunning = false;
\r
9100 MUTEX_LOCK( &stream_.mutex );
\r
9101 if ( stream_.state == STREAM_STOPPED )
\r
9102 pthread_cond_signal( &handle->runnable );
\r
9103 MUTEX_UNLOCK( &stream_.mutex );
\r
9104 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9106 if ( stream_.state == STREAM_RUNNING ) {
\r
9107 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9108 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9110 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9111 stream_.state = STREAM_STOPPED;
\r
9115 pthread_cond_destroy( &handle->runnable );
\r
9116 if ( handle->id[0] ) close( handle->id[0] );
\r
9117 if ( handle->id[1] ) close( handle->id[1] );
\r
9119 stream_.apiHandle = 0;
\r
9122 for ( int i=0; i<2; i++ ) {
\r
9123 if ( stream_.userBuffer[i] ) {
\r
9124 free( stream_.userBuffer[i] );
\r
9125 stream_.userBuffer[i] = 0;
\r
9129 if ( stream_.deviceBuffer ) {
\r
9130 free( stream_.deviceBuffer );
\r
9131 stream_.deviceBuffer = 0;
\r
9134 stream_.mode = UNINITIALIZED;
\r
9135 stream_.state = STREAM_CLOSED;
\r
9138 void RtApiOss :: startStream()
\r
9141 if ( stream_.state == STREAM_RUNNING ) {
\r
9142 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9143 error( RtAudioError::WARNING );
\r
9147 MUTEX_LOCK( &stream_.mutex );
\r
9149 stream_.state = STREAM_RUNNING;
\r
9151 // No need to do anything else here ... OSS automatically starts
\r
9152 // when fed samples.
\r
9154 MUTEX_UNLOCK( &stream_.mutex );
\r
9156 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9157 pthread_cond_signal( &handle->runnable );
\r
9160 void RtApiOss :: stopStream()
\r
9163 if ( stream_.state == STREAM_STOPPED ) {
\r
9164 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9165 error( RtAudioError::WARNING );
\r
9169 MUTEX_LOCK( &stream_.mutex );
\r
9171 // The state might change while waiting on a mutex.
\r
9172 if ( stream_.state == STREAM_STOPPED ) {
\r
9173 MUTEX_UNLOCK( &stream_.mutex );
\r
9178 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9179 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9181 // Flush the output with zeros a few times.
\r
9184 RtAudioFormat format;
\r
9186 if ( stream_.doConvertBuffer[0] ) {
\r
9187 buffer = stream_.deviceBuffer;
\r
9188 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9189 format = stream_.deviceFormat[0];
\r
9192 buffer = stream_.userBuffer[0];
\r
9193 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9194 format = stream_.userFormat;
\r
9197 memset( buffer, 0, samples * formatBytes(format) );
\r
9198 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9199 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9200 if ( result == -1 ) {
\r
9201 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9202 error( RtAudioError::WARNING );
\r
9206 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9207 if ( result == -1 ) {
\r
9208 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9209 errorText_ = errorStream_.str();
\r
9212 handle->triggered = false;
\r
9215 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9216 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9217 if ( result == -1 ) {
\r
9218 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9219 errorText_ = errorStream_.str();
\r
9225 stream_.state = STREAM_STOPPED;
\r
9226 MUTEX_UNLOCK( &stream_.mutex );
\r
9228 if ( result != -1 ) return;
\r
9229 error( RtAudioError::SYSTEM_ERROR );
\r
9232 void RtApiOss :: abortStream()
\r
9235 if ( stream_.state == STREAM_STOPPED ) {
\r
9236 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9237 error( RtAudioError::WARNING );
\r
9241 MUTEX_LOCK( &stream_.mutex );
\r
9243 // The state might change while waiting on a mutex.
\r
9244 if ( stream_.state == STREAM_STOPPED ) {
\r
9245 MUTEX_UNLOCK( &stream_.mutex );
\r
9250 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9251 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9252 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9253 if ( result == -1 ) {
\r
9254 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9255 errorText_ = errorStream_.str();
\r
9258 handle->triggered = false;
\r
9261 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9262 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9263 if ( result == -1 ) {
\r
9264 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9265 errorText_ = errorStream_.str();
\r
9271 stream_.state = STREAM_STOPPED;
\r
9272 MUTEX_UNLOCK( &stream_.mutex );
\r
9274 if ( result != -1 ) return;
\r
9275 error( RtAudioError::SYSTEM_ERROR );
\r
9278 void RtApiOss :: callbackEvent()
\r
9280 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9281 if ( stream_.state == STREAM_STOPPED ) {
\r
9282 MUTEX_LOCK( &stream_.mutex );
\r
9283 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9284 if ( stream_.state != STREAM_RUNNING ) {
\r
9285 MUTEX_UNLOCK( &stream_.mutex );
\r
9288 MUTEX_UNLOCK( &stream_.mutex );
\r
9291 if ( stream_.state == STREAM_CLOSED ) {
\r
9292 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9293 error( RtAudioError::WARNING );
\r
9297 // Invoke user callback to get fresh output data.
\r
9298 int doStopStream = 0;
\r
9299 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9300 double streamTime = getStreamTime();
\r
9301 RtAudioStreamStatus status = 0;
\r
9302 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9303 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9304 handle->xrun[0] = false;
\r
9306 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9307 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9308 handle->xrun[1] = false;
\r
9310 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9311 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9312 if ( doStopStream == 2 ) {
\r
9313 this->abortStream();
\r
9317 MUTEX_LOCK( &stream_.mutex );
\r
9319 // The state might change while waiting on a mutex.
\r
9320 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9325 RtAudioFormat format;
\r
9327 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9329 // Setup parameters and do buffer conversion if necessary.
\r
9330 if ( stream_.doConvertBuffer[0] ) {
\r
9331 buffer = stream_.deviceBuffer;
\r
9332 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9333 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9334 format = stream_.deviceFormat[0];
\r
9337 buffer = stream_.userBuffer[0];
\r
9338 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9339 format = stream_.userFormat;
\r
9342 // Do byte swapping if necessary.
\r
9343 if ( stream_.doByteSwap[0] )
\r
9344 byteSwapBuffer( buffer, samples, format );
\r
9346 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9348 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9349 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9350 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9351 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9352 handle->triggered = true;
\r
9355 // Write samples to device.
\r
9356 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9358 if ( result == -1 ) {
\r
9359 // We'll assume this is an underrun, though there isn't a
\r
9360 // specific means for determining that.
\r
9361 handle->xrun[0] = true;
\r
9362 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9363 error( RtAudioError::WARNING );
\r
9364 // Continue on to input section.
\r
9368 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9370 // Setup parameters.
\r
9371 if ( stream_.doConvertBuffer[1] ) {
\r
9372 buffer = stream_.deviceBuffer;
\r
9373 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9374 format = stream_.deviceFormat[1];
\r
9377 buffer = stream_.userBuffer[1];
\r
9378 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9379 format = stream_.userFormat;
\r
9382 // Read samples from device.
\r
9383 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9385 if ( result == -1 ) {
\r
9386 // We'll assume this is an overrun, though there isn't a
\r
9387 // specific means for determining that.
\r
9388 handle->xrun[1] = true;
\r
9389 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9390 error( RtAudioError::WARNING );
\r
9394 // Do byte swapping if necessary.
\r
9395 if ( stream_.doByteSwap[1] )
\r
9396 byteSwapBuffer( buffer, samples, format );
\r
9398 // Do buffer conversion if necessary.
\r
9399 if ( stream_.doConvertBuffer[1] )
\r
9400 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9404 MUTEX_UNLOCK( &stream_.mutex );
\r
9406 RtApi::tickStreamTime();
\r
9407 if ( doStopStream == 1 ) this->stopStream();
\r
9410 static void *ossCallbackHandler( void *ptr )
\r
9412 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9413 RtApiOss *object = (RtApiOss *) info->object;
\r
9414 bool *isRunning = &info->isRunning;
\r
9416 while ( *isRunning == true ) {
\r
9417 pthread_testcancel();
\r
9418 object->callbackEvent();
\r
9421 pthread_exit( NULL );
\r
9424 //******************** End of __LINUX_OSS__ *********************//
\r
9428 // *************************************************** //
\r
9430 // Protected common (OS-independent) RtAudio methods.
\r
9432 // *************************************************** //
\r
9434 // This method can be modified to control the behavior of error
\r
9435 // message printing.
\r
9436 void RtApi :: error( RtAudioError::Type type )
\r
9438 errorStream_.str(""); // clear the ostringstream
\r
9440 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9441 if ( errorCallback ) {
\r
9442 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9444 if ( firstErrorOccurred_ )
\r
9447 firstErrorOccurred_ = true;
\r
9448 const std::string errorMessage = errorText_;
\r
9450 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9451 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9455 errorCallback( type, errorMessage );
\r
9456 firstErrorOccurred_ = false;
\r
9460 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9461 std::cerr << '\n' << errorText_ << "\n\n";
\r
9462 else if ( type != RtAudioError::WARNING )
\r
9463 throw( RtAudioError( errorText_, type ) );
\r
9466 void RtApi :: verifyStream()
\r
9468 if ( stream_.state == STREAM_CLOSED ) {
\r
9469 errorText_ = "RtApi:: a stream is not open!";
\r
9470 error( RtAudioError::INVALID_USE );
\r
9474 void RtApi :: clearStreamInfo()
\r
9476 stream_.mode = UNINITIALIZED;
\r
9477 stream_.state = STREAM_CLOSED;
\r
9478 stream_.sampleRate = 0;
\r
9479 stream_.bufferSize = 0;
\r
9480 stream_.nBuffers = 0;
\r
9481 stream_.userFormat = 0;
\r
9482 stream_.userInterleaved = true;
\r
9483 stream_.streamTime = 0.0;
\r
9484 stream_.apiHandle = 0;
\r
9485 stream_.deviceBuffer = 0;
\r
9486 stream_.callbackInfo.callback = 0;
\r
9487 stream_.callbackInfo.userData = 0;
\r
9488 stream_.callbackInfo.isRunning = false;
\r
9489 stream_.callbackInfo.errorCallback = 0;
\r
9490 for ( int i=0; i<2; i++ ) {
\r
9491 stream_.device[i] = 11111;
\r
9492 stream_.doConvertBuffer[i] = false;
\r
9493 stream_.deviceInterleaved[i] = true;
\r
9494 stream_.doByteSwap[i] = false;
\r
9495 stream_.nUserChannels[i] = 0;
\r
9496 stream_.nDeviceChannels[i] = 0;
\r
9497 stream_.channelOffset[i] = 0;
\r
9498 stream_.deviceFormat[i] = 0;
\r
9499 stream_.latency[i] = 0;
\r
9500 stream_.userBuffer[i] = 0;
\r
9501 stream_.convertInfo[i].channels = 0;
\r
9502 stream_.convertInfo[i].inJump = 0;
\r
9503 stream_.convertInfo[i].outJump = 0;
\r
9504 stream_.convertInfo[i].inFormat = 0;
\r
9505 stream_.convertInfo[i].outFormat = 0;
\r
9506 stream_.convertInfo[i].inOffset.clear();
\r
9507 stream_.convertInfo[i].outOffset.clear();
\r
9511 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9513 if ( format == RTAUDIO_SINT16 )
\r
9515 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9517 else if ( format == RTAUDIO_FLOAT64 )
\r
9519 else if ( format == RTAUDIO_SINT24 )
\r
9521 else if ( format == RTAUDIO_SINT8 )
\r
9524 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9525 error( RtAudioError::WARNING );
\r
9530 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9532 if ( mode == INPUT ) { // convert device to user buffer
\r
9533 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9534 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9535 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9536 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9538 else { // convert user to device buffer
\r
9539 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9540 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9541 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9542 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9545 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9546 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9548 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9550 // Set up the interleave/deinterleave offsets.
\r
9551 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9552 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9553 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9554 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9555 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9556 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9557 stream_.convertInfo[mode].inJump = 1;
\r
9561 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9562 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9563 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9564 stream_.convertInfo[mode].outJump = 1;
\r
9568 else { // no (de)interleaving
\r
9569 if ( stream_.userInterleaved ) {
\r
9570 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9571 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9572 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9576 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9577 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9578 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9579 stream_.convertInfo[mode].inJump = 1;
\r
9580 stream_.convertInfo[mode].outJump = 1;
\r
9585 // Add channel offset.
\r
9586 if ( firstChannel > 0 ) {
\r
9587 if ( stream_.deviceInterleaved[mode] ) {
\r
9588 if ( mode == OUTPUT ) {
\r
9589 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9590 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9593 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9594 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9598 if ( mode == OUTPUT ) {
\r
9599 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9600 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9603 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9604 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9610 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9612 // This function does format conversion, input/output channel compensation, and
\r
9613 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9614 // the lower three bytes of a 32-bit integer.
\r
9616 // Clear our device buffer when in/out duplex device channels are different
\r
9617 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9618 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9619 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9622 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9624 Float64 *out = (Float64 *)outBuffer;
\r
9626 if (info.inFormat == RTAUDIO_SINT8) {
\r
9627 signed char *in = (signed char *)inBuffer;
\r
9628 scale = 1.0 / 127.5;
\r
9629 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9630 for (j=0; j<info.channels; j++) {
\r
9631 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9632 out[info.outOffset[j]] += 0.5;
\r
9633 out[info.outOffset[j]] *= scale;
\r
9635 in += info.inJump;
\r
9636 out += info.outJump;
\r
9639 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9640 Int16 *in = (Int16 *)inBuffer;
\r
9641 scale = 1.0 / 32767.5;
\r
9642 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9643 for (j=0; j<info.channels; j++) {
\r
9644 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9645 out[info.outOffset[j]] += 0.5;
\r
9646 out[info.outOffset[j]] *= scale;
\r
9648 in += info.inJump;
\r
9649 out += info.outJump;
\r
9652 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9653 Int24 *in = (Int24 *)inBuffer;
\r
9654 scale = 1.0 / 8388607.5;
\r
9655 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9656 for (j=0; j<info.channels; j++) {
\r
9657 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9658 out[info.outOffset[j]] += 0.5;
\r
9659 out[info.outOffset[j]] *= scale;
\r
9661 in += info.inJump;
\r
9662 out += info.outJump;
\r
9665 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9666 Int32 *in = (Int32 *)inBuffer;
\r
9667 scale = 1.0 / 2147483647.5;
\r
9668 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9669 for (j=0; j<info.channels; j++) {
\r
9670 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9671 out[info.outOffset[j]] += 0.5;
\r
9672 out[info.outOffset[j]] *= scale;
\r
9674 in += info.inJump;
\r
9675 out += info.outJump;
\r
9678 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9679 Float32 *in = (Float32 *)inBuffer;
\r
9680 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9681 for (j=0; j<info.channels; j++) {
\r
9682 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9684 in += info.inJump;
\r
9685 out += info.outJump;
\r
9688 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9689 // Channel compensation and/or (de)interleaving only.
\r
9690 Float64 *in = (Float64 *)inBuffer;
\r
9691 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9692 for (j=0; j<info.channels; j++) {
\r
9693 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9695 in += info.inJump;
\r
9696 out += info.outJump;
\r
9700 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9702 Float32 *out = (Float32 *)outBuffer;
\r
9704 if (info.inFormat == RTAUDIO_SINT8) {
\r
9705 signed char *in = (signed char *)inBuffer;
\r
9706 scale = (Float32) ( 1.0 / 127.5 );
\r
9707 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9708 for (j=0; j<info.channels; j++) {
\r
9709 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9710 out[info.outOffset[j]] += 0.5;
\r
9711 out[info.outOffset[j]] *= scale;
\r
9713 in += info.inJump;
\r
9714 out += info.outJump;
\r
9717 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9718 Int16 *in = (Int16 *)inBuffer;
\r
9719 scale = (Float32) ( 1.0 / 32767.5 );
\r
9720 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9721 for (j=0; j<info.channels; j++) {
\r
9722 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9723 out[info.outOffset[j]] += 0.5;
\r
9724 out[info.outOffset[j]] *= scale;
\r
9726 in += info.inJump;
\r
9727 out += info.outJump;
\r
9730 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9731 Int24 *in = (Int24 *)inBuffer;
\r
9732 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9733 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9734 for (j=0; j<info.channels; j++) {
\r
9735 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9736 out[info.outOffset[j]] += 0.5;
\r
9737 out[info.outOffset[j]] *= scale;
\r
9739 in += info.inJump;
\r
9740 out += info.outJump;
\r
9743 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9744 Int32 *in = (Int32 *)inBuffer;
\r
9745 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9746 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9747 for (j=0; j<info.channels; j++) {
\r
9748 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9749 out[info.outOffset[j]] += 0.5;
\r
9750 out[info.outOffset[j]] *= scale;
\r
9752 in += info.inJump;
\r
9753 out += info.outJump;
\r
9756 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9757 // Channel compensation and/or (de)interleaving only.
\r
9758 Float32 *in = (Float32 *)inBuffer;
\r
9759 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9760 for (j=0; j<info.channels; j++) {
\r
9761 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9763 in += info.inJump;
\r
9764 out += info.outJump;
\r
9767 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9768 Float64 *in = (Float64 *)inBuffer;
\r
9769 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9770 for (j=0; j<info.channels; j++) {
\r
9771 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9773 in += info.inJump;
\r
9774 out += info.outJump;
\r
9778 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9779 Int32 *out = (Int32 *)outBuffer;
\r
9780 if (info.inFormat == RTAUDIO_SINT8) {
\r
9781 signed char *in = (signed char *)inBuffer;
\r
9782 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9783 for (j=0; j<info.channels; j++) {
\r
9784 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9785 out[info.outOffset[j]] <<= 24;
\r
9787 in += info.inJump;
\r
9788 out += info.outJump;
\r
9791 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9792 Int16 *in = (Int16 *)inBuffer;
\r
9793 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9794 for (j=0; j<info.channels; j++) {
\r
9795 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9796 out[info.outOffset[j]] <<= 16;
\r
9798 in += info.inJump;
\r
9799 out += info.outJump;
\r
9802 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9803 Int24 *in = (Int24 *)inBuffer;
\r
9804 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9805 for (j=0; j<info.channels; j++) {
\r
9806 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9807 out[info.outOffset[j]] <<= 8;
\r
9809 in += info.inJump;
\r
9810 out += info.outJump;
\r
9813 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9814 // Channel compensation and/or (de)interleaving only.
\r
9815 Int32 *in = (Int32 *)inBuffer;
\r
9816 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9817 for (j=0; j<info.channels; j++) {
\r
9818 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9820 in += info.inJump;
\r
9821 out += info.outJump;
\r
9824 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9825 Float32 *in = (Float32 *)inBuffer;
\r
9826 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9827 for (j=0; j<info.channels; j++) {
\r
9828 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9830 in += info.inJump;
\r
9831 out += info.outJump;
\r
9834 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9835 Float64 *in = (Float64 *)inBuffer;
\r
9836 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9837 for (j=0; j<info.channels; j++) {
\r
9838 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9840 in += info.inJump;
\r
9841 out += info.outJump;
\r
9845 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9846 Int24 *out = (Int24 *)outBuffer;
\r
9847 if (info.inFormat == RTAUDIO_SINT8) {
\r
9848 signed char *in = (signed char *)inBuffer;
\r
9849 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9850 for (j=0; j<info.channels; j++) {
\r
9851 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9852 //out[info.outOffset[j]] <<= 16;
\r
9854 in += info.inJump;
\r
9855 out += info.outJump;
\r
9858 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9859 Int16 *in = (Int16 *)inBuffer;
\r
9860 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9861 for (j=0; j<info.channels; j++) {
\r
9862 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9863 //out[info.outOffset[j]] <<= 8;
\r
9865 in += info.inJump;
\r
9866 out += info.outJump;
\r
9869 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9870 // Channel compensation and/or (de)interleaving only.
\r
9871 Int24 *in = (Int24 *)inBuffer;
\r
9872 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9873 for (j=0; j<info.channels; j++) {
\r
9874 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9876 in += info.inJump;
\r
9877 out += info.outJump;
\r
9880 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9881 Int32 *in = (Int32 *)inBuffer;
\r
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9883 for (j=0; j<info.channels; j++) {
\r
9884 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9885 //out[info.outOffset[j]] >>= 8;
\r
9887 in += info.inJump;
\r
9888 out += info.outJump;
\r
9891 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9892 Float32 *in = (Float32 *)inBuffer;
\r
9893 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9894 for (j=0; j<info.channels; j++) {
\r
9895 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9897 in += info.inJump;
\r
9898 out += info.outJump;
\r
9901 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9902 Float64 *in = (Float64 *)inBuffer;
\r
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9904 for (j=0; j<info.channels; j++) {
\r
9905 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9907 in += info.inJump;
\r
9908 out += info.outJump;
\r
9912 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9913 Int16 *out = (Int16 *)outBuffer;
\r
9914 if (info.inFormat == RTAUDIO_SINT8) {
\r
9915 signed char *in = (signed char *)inBuffer;
\r
9916 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9917 for (j=0; j<info.channels; j++) {
\r
9918 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9919 out[info.outOffset[j]] <<= 8;
\r
9921 in += info.inJump;
\r
9922 out += info.outJump;
\r
9925 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9926 // Channel compensation and/or (de)interleaving only.
\r
9927 Int16 *in = (Int16 *)inBuffer;
\r
9928 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9929 for (j=0; j<info.channels; j++) {
\r
9930 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9932 in += info.inJump;
\r
9933 out += info.outJump;
\r
9936 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9937 Int24 *in = (Int24 *)inBuffer;
\r
9938 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9939 for (j=0; j<info.channels; j++) {
\r
9940 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9942 in += info.inJump;
\r
9943 out += info.outJump;
\r
9946 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9947 Int32 *in = (Int32 *)inBuffer;
\r
9948 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9949 for (j=0; j<info.channels; j++) {
\r
9950 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9952 in += info.inJump;
\r
9953 out += info.outJump;
\r
9956 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9957 Float32 *in = (Float32 *)inBuffer;
\r
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9959 for (j=0; j<info.channels; j++) {
\r
9960 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9962 in += info.inJump;
\r
9963 out += info.outJump;
\r
9966 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9967 Float64 *in = (Float64 *)inBuffer;
\r
9968 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9969 for (j=0; j<info.channels; j++) {
\r
9970 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9972 in += info.inJump;
\r
9973 out += info.outJump;
\r
9977 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9978 signed char *out = (signed char *)outBuffer;
\r
9979 if (info.inFormat == RTAUDIO_SINT8) {
\r
9980 // Channel compensation and/or (de)interleaving only.
\r
9981 signed char *in = (signed char *)inBuffer;
\r
9982 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9983 for (j=0; j<info.channels; j++) {
\r
9984 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9986 in += info.inJump;
\r
9987 out += info.outJump;
\r
9990 if (info.inFormat == RTAUDIO_SINT16) {
\r
9991 Int16 *in = (Int16 *)inBuffer;
\r
9992 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9993 for (j=0; j<info.channels; j++) {
\r
9994 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9996 in += info.inJump;
\r
9997 out += info.outJump;
\r
10000 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10001 Int24 *in = (Int24 *)inBuffer;
\r
10002 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10003 for (j=0; j<info.channels; j++) {
\r
10004 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10006 in += info.inJump;
\r
10007 out += info.outJump;
\r
10010 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10011 Int32 *in = (Int32 *)inBuffer;
\r
10012 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10013 for (j=0; j<info.channels; j++) {
\r
10014 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10016 in += info.inJump;
\r
10017 out += info.outJump;
\r
10020 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10021 Float32 *in = (Float32 *)inBuffer;
\r
10022 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10023 for (j=0; j<info.channels; j++) {
\r
10024 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10026 in += info.inJump;
\r
10027 out += info.outJump;
\r
10030 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10031 Float64 *in = (Float64 *)inBuffer;
\r
10032 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10033 for (j=0; j<info.channels; j++) {
\r
10034 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10036 in += info.inJump;
\r
10037 out += info.outJump;
\r
10043 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10044 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10045 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10047 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10049 register char val;
\r
10050 register char *ptr;
\r
10053 if ( format == RTAUDIO_SINT16 ) {
\r
10054 for ( unsigned int i=0; i<samples; i++ ) {
\r
10055 // Swap 1st and 2nd bytes.
\r
10057 *(ptr) = *(ptr+1);
\r
10060 // Increment 2 bytes.
\r
10064 else if ( format == RTAUDIO_SINT32 ||
\r
10065 format == RTAUDIO_FLOAT32 ) {
\r
10066 for ( unsigned int i=0; i<samples; i++ ) {
\r
10067 // Swap 1st and 4th bytes.
\r
10069 *(ptr) = *(ptr+3);
\r
10072 // Swap 2nd and 3rd bytes.
\r
10075 *(ptr) = *(ptr+1);
\r
10078 // Increment 3 more bytes.
\r
10082 else if ( format == RTAUDIO_SINT24 ) {
\r
10083 for ( unsigned int i=0; i<samples; i++ ) {
\r
10084 // Swap 1st and 3rd bytes.
\r
10086 *(ptr) = *(ptr+2);
\r
10089 // Increment 2 more bytes.
\r
10093 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10094 for ( unsigned int i=0; i<samples; i++ ) {
\r
10095 // Swap 1st and 8th bytes
\r
10097 *(ptr) = *(ptr+7);
\r
10100 // Swap 2nd and 7th bytes
\r
10103 *(ptr) = *(ptr+5);
\r
10106 // Swap 3rd and 6th bytes
\r
10109 *(ptr) = *(ptr+3);
\r
10112 // Swap 4th and 5th bytes
\r
10115 *(ptr) = *(ptr+1);
\r
10118 // Increment 5 more bytes.
\r
10124 // Indentation settings for Vim and Emacs
\r
10126 // Local Variables:
\r
10127 // c-basic-offset: 2
\r
10128 // indent-tabs-mode: nil
\r
10131 // vim: et sts=2 sw=2
\r