1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1687 // Don't bother draining input
\r
1688 if ( handle->drainCounter ) {
\r
1689 handle->drainCounter++;
\r
1693 AudioDeviceID inputDevice;
\r
1694 inputDevice = handle->id[1];
\r
1695 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1697 if ( handle->nStreams[1] == 1 ) {
\r
1698 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1699 convertBuffer( stream_.userBuffer[1],
\r
1700 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1701 stream_.convertInfo[1] );
\r
1703 else { // copy to user buffer
\r
1704 memcpy( stream_.userBuffer[1],
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1706 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1709 else { // read from multiple streams
\r
1710 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1711 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1713 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1714 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1715 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1716 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1717 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1720 else { // read from multiple multi-channel streams
\r
1721 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1722 Float32 *out, *in;
\r
1724 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1725 UInt32 outChannels = stream_.nUserChannels[1];
\r
1726 if ( stream_.doConvertBuffer[1] ) {
\r
1727 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1728 outChannels = stream_.nDeviceChannels[1];
\r
1731 if ( outInterleaved ) outOffset = 1;
\r
1732 else outOffset = stream_.bufferSize;
\r
1734 channelsLeft = outChannels;
\r
1735 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1737 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1738 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1741 // Account for possible channel offset in first stream
\r
1742 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1743 streamChannels -= stream_.channelOffset[1];
\r
1744 inJump = stream_.channelOffset[1];
\r
1748 // Account for possible unread channels at end of the last stream
\r
1749 if ( streamChannels > channelsLeft ) {
\r
1750 inJump = streamChannels - channelsLeft;
\r
1751 streamChannels = channelsLeft;
\r
1754 // Determine output buffer offsets and skips
\r
1755 if ( outInterleaved ) {
\r
1756 outJump = outChannels;
\r
1757 out += outChannels - channelsLeft;
\r
1761 out += (outChannels - channelsLeft) * outOffset;
\r
1764 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1765 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1766 out[j*outOffset] = *in++;
\r
1771 channelsLeft -= streamChannels;
\r
1775 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1776 convertBuffer( stream_.userBuffer[1],
\r
1777 stream_.deviceBuffer,
\r
1778 stream_.convertInfo[1] );
\r
1784 //MUTEX_UNLOCK( &stream_.mutex );
\r
1786 RtApi::tickStreamTime();
\r
1790 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1794 case kAudioHardwareNotRunningError:
\r
1795 return "kAudioHardwareNotRunningError";
\r
1797 case kAudioHardwareUnspecifiedError:
\r
1798 return "kAudioHardwareUnspecifiedError";
\r
1800 case kAudioHardwareUnknownPropertyError:
\r
1801 return "kAudioHardwareUnknownPropertyError";
\r
1803 case kAudioHardwareBadPropertySizeError:
\r
1804 return "kAudioHardwareBadPropertySizeError";
\r
1806 case kAudioHardwareIllegalOperationError:
\r
1807 return "kAudioHardwareIllegalOperationError";
\r
1809 case kAudioHardwareBadObjectError:
\r
1810 return "kAudioHardwareBadObjectError";
\r
1812 case kAudioHardwareBadDeviceError:
\r
1813 return "kAudioHardwareBadDeviceError";
\r
1815 case kAudioHardwareBadStreamError:
\r
1816 return "kAudioHardwareBadStreamError";
\r
1818 case kAudioHardwareUnsupportedOperationError:
\r
1819 return "kAudioHardwareUnsupportedOperationError";
\r
1821 case kAudioDeviceUnsupportedFormatError:
\r
1822 return "kAudioDeviceUnsupportedFormatError";
\r
1824 case kAudioDevicePermissionsError:
\r
1825 return "kAudioDevicePermissionsError";
\r
1828 return "CoreAudio unknown error";
\r
1832 //******************** End of __MACOSX_CORE__ *********************//
\r
1835 #if defined(__UNIX_JACK__)
\r
1837 // JACK is a low-latency audio server, originally written for the
\r
1838 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1839 // connect a number of different applications to an audio device, as
\r
1840 // well as allowing them to share audio between themselves.
\r
1842 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1843 // have ports connected to the server. The JACK server is typically
\r
1844 // started in a terminal as follows:
\r
1846 // .jackd -d alsa -d hw:0
\r
1848 // or through an interface program such as qjackctl. Many of the
\r
1849 // parameters normally set for a stream are fixed by the JACK server
\r
1850 // and can be specified when the JACK server is started. In
\r
1853 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1855 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1856 // frames, and number of buffers = 4. Once the server is running, it
\r
1857 // is not possible to override these values. If the values are not
\r
1858 // specified in the command-line, the JACK server uses default values.
\r
1860 // The JACK server does not have to be running when an instance of
\r
1861 // RtApiJack is created, though the function getDeviceCount() will
\r
1862 // report 0 devices found until JACK has been started. When no
\r
1863 // devices are available (i.e., the JACK server is not running), a
\r
1864 // stream cannot be opened.
\r
1866 #include <jack/jack.h>
\r
1867 #include <unistd.h>
\r
1870 // A structure to hold various information related to the Jack API
\r
1871 // implementation.
\r
1872 struct JackHandle {
\r
1873 jack_client_t *client;
\r
1874 jack_port_t **ports[2];
\r
1875 std::string deviceName[2];
\r
1877 pthread_cond_t condition;
\r
1878 int drainCounter; // Tracks callback counts when draining
\r
1879 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1882 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1885 static void jackSilentError( const char * ) {};
\r
1887 RtApiJack :: RtApiJack()
\r
1889 // Nothing to do here.
\r
1890 #if !defined(__RTAUDIO_DEBUG__)
\r
1891 // Turn off Jack's internal error reporting.
\r
1892 jack_set_error_function( &jackSilentError );
\r
1896 RtApiJack :: ~RtApiJack()
\r
1898 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1901 unsigned int RtApiJack :: getDeviceCount( void )
\r
1903 // See if we can become a jack client.
\r
1904 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1905 jack_status_t *status = NULL;
\r
1906 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1907 if ( client == 0 ) return 0;
\r
1909 const char **ports;
\r
1910 std::string port, previousPort;
\r
1911 unsigned int nChannels = 0, nDevices = 0;
\r
1912 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1914 // Parse the port names up to the first colon (:).
\r
1915 size_t iColon = 0;
\r
1917 port = (char *) ports[ nChannels ];
\r
1918 iColon = port.find(":");
\r
1919 if ( iColon != std::string::npos ) {
\r
1920 port = port.substr( 0, iColon + 1 );
\r
1921 if ( port != previousPort ) {
\r
1923 previousPort = port;
\r
1926 } while ( ports[++nChannels] );
\r
1930 jack_client_close( client );
\r
1934 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1936 RtAudio::DeviceInfo info;
\r
1937 info.probed = false;
\r
1939 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1940 jack_status_t *status = NULL;
\r
1941 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1942 if ( client == 0 ) {
\r
1943 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1944 error( RtAudioError::WARNING );
\r
1948 const char **ports;
\r
1949 std::string port, previousPort;
\r
1950 unsigned int nPorts = 0, nDevices = 0;
\r
1951 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1953 // Parse the port names up to the first colon (:).
\r
1954 size_t iColon = 0;
\r
1956 port = (char *) ports[ nPorts ];
\r
1957 iColon = port.find(":");
\r
1958 if ( iColon != std::string::npos ) {
\r
1959 port = port.substr( 0, iColon );
\r
1960 if ( port != previousPort ) {
\r
1961 if ( nDevices == device ) info.name = port;
\r
1963 previousPort = port;
\r
1966 } while ( ports[++nPorts] );
\r
1970 if ( device >= nDevices ) {
\r
1971 jack_client_close( client );
\r
1972 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1973 error( RtAudioError::INVALID_USE );
\r
1977 // Get the current jack server sample rate.
\r
1978 info.sampleRates.clear();
\r
1979 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1981 // Count the available ports containing the client name as device
\r
1982 // channels. Jack "input ports" equal RtAudio output channels.
\r
1983 unsigned int nChannels = 0;
\r
1984 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1986 while ( ports[ nChannels ] ) nChannels++;
\r
1988 info.outputChannels = nChannels;
\r
1991 // Jack "output ports" equal RtAudio input channels.
\r
1993 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1995 while ( ports[ nChannels ] ) nChannels++;
\r
1997 info.inputChannels = nChannels;
\r
2000 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2001 jack_client_close(client);
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 // If device opens for both playback and capture, we determine the channels.
\r
2008 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2009 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2011 // Jack always uses 32-bit floats.
\r
2012 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2014 // Jack doesn't provide default devices so we'll use the first available one.
\r
2015 if ( device == 0 && info.outputChannels > 0 )
\r
2016 info.isDefaultOutput = true;
\r
2017 if ( device == 0 && info.inputChannels > 0 )
\r
2018 info.isDefaultInput = true;
\r
2020 jack_client_close(client);
\r
2021 info.probed = true;
\r
2025 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2027 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2029 RtApiJack *object = (RtApiJack *) info->object;
\r
2030 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2035 // This function will be called by a spawned thread when the Jack
\r
2036 // server signals that it is shutting down. It is necessary to handle
\r
2037 // it this way because the jackShutdown() function must return before
\r
2038 // the jack_deactivate() function (in closeStream()) will return.
\r
2039 static void *jackCloseStream( void *ptr )
\r
2041 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2042 RtApiJack *object = (RtApiJack *) info->object;
\r
2044 object->closeStream();
\r
2046 pthread_exit( NULL );
\r
2048 static void jackShutdown( void *infoPointer )
\r
2050 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2051 RtApiJack *object = (RtApiJack *) info->object;
\r
2053 // Check current stream state. If stopped, then we'll assume this
\r
2054 // was called as a result of a call to RtApiJack::stopStream (the
\r
2055 // deactivation of a client handle causes this function to be called).
\r
2056 // If not, we'll assume the Jack server is shutting down or some
\r
2057 // other problem occurred and we should close the stream.
\r
2058 if ( object->isStreamRunning() == false ) return;
\r
2060 ThreadHandle threadId;
\r
2061 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2062 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2065 static int jackXrun( void *infoPointer )
\r
2067 JackHandle *handle = (JackHandle *) infoPointer;
\r
2069 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2070 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2075 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2076 unsigned int firstChannel, unsigned int sampleRate,
\r
2077 RtAudioFormat format, unsigned int *bufferSize,
\r
2078 RtAudio::StreamOptions *options )
\r
2080 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2082 // Look for jack server and try to become a client (only do once per stream).
\r
2083 jack_client_t *client = 0;
\r
2084 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2085 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2086 jack_status_t *status = NULL;
\r
2087 if ( options && !options->streamName.empty() )
\r
2088 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2090 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2091 if ( client == 0 ) {
\r
2092 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2093 error( RtAudioError::WARNING );
\r
2098 // The handle must have been created on an earlier pass.
\r
2099 client = handle->client;
\r
2102 const char **ports;
\r
2103 std::string port, previousPort, deviceName;
\r
2104 unsigned int nPorts = 0, nDevices = 0;
\r
2105 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2107 // Parse the port names up to the first colon (:).
\r
2108 size_t iColon = 0;
\r
2110 port = (char *) ports[ nPorts ];
\r
2111 iColon = port.find(":");
\r
2112 if ( iColon != std::string::npos ) {
\r
2113 port = port.substr( 0, iColon );
\r
2114 if ( port != previousPort ) {
\r
2115 if ( nDevices == device ) deviceName = port;
\r
2117 previousPort = port;
\r
2120 } while ( ports[++nPorts] );
\r
2124 if ( device >= nDevices ) {
\r
2125 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2129 // Count the available ports containing the client name as device
\r
2130 // channels. Jack "input ports" equal RtAudio output channels.
\r
2131 unsigned int nChannels = 0;
\r
2132 unsigned long flag = JackPortIsInput;
\r
2133 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2134 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2136 while ( ports[ nChannels ] ) nChannels++;
\r
2140 // Compare the jack ports for specified client to the requested number of channels.
\r
2141 if ( nChannels < (channels + firstChannel) ) {
\r
2142 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2143 errorText_ = errorStream_.str();
\r
2147 // Check the jack server sample rate.
\r
2148 unsigned int jackRate = jack_get_sample_rate( client );
\r
2149 if ( sampleRate != jackRate ) {
\r
2150 jack_client_close( client );
\r
2151 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2152 errorText_ = errorStream_.str();
\r
2155 stream_.sampleRate = jackRate;
\r
2157 // Get the latency of the JACK port.
\r
2158 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2159 if ( ports[ firstChannel ] ) {
\r
2160 // Added by Ge Wang
\r
2161 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2162 // the range (usually the min and max are equal)
\r
2163 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2164 // get the latency range
\r
2165 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2166 // be optimistic, use the min!
\r
2167 stream_.latency[mode] = latrange.min;
\r
2168 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2172 // The jack server always uses 32-bit floating-point data.
\r
2173 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2174 stream_.userFormat = format;
\r
2176 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2177 else stream_.userInterleaved = true;
\r
2179 // Jack always uses non-interleaved buffers.
\r
2180 stream_.deviceInterleaved[mode] = false;
\r
2182 // Jack always provides host byte-ordered data.
\r
2183 stream_.doByteSwap[mode] = false;
\r
2185 // Get the buffer size. The buffer size and number of buffers
\r
2186 // (periods) is set when the jack server is started.
\r
2187 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2188 *bufferSize = stream_.bufferSize;
\r
2190 stream_.nDeviceChannels[mode] = channels;
\r
2191 stream_.nUserChannels[mode] = channels;
\r
2193 // Set flags for buffer conversion.
\r
2194 stream_.doConvertBuffer[mode] = false;
\r
2195 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2196 stream_.doConvertBuffer[mode] = true;
\r
2197 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2198 stream_.nUserChannels[mode] > 1 )
\r
2199 stream_.doConvertBuffer[mode] = true;
\r
2201 // Allocate our JackHandle structure for the stream.
\r
2202 if ( handle == 0 ) {
\r
2204 handle = new JackHandle;
\r
2206 catch ( std::bad_alloc& ) {
\r
2207 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2211 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2212 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2215 stream_.apiHandle = (void *) handle;
\r
2216 handle->client = client;
\r
2218 handle->deviceName[mode] = deviceName;
\r
2220 // Allocate necessary internal buffers.
\r
2221 unsigned long bufferBytes;
\r
2222 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2223 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2224 if ( stream_.userBuffer[mode] == NULL ) {
\r
2225 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2229 if ( stream_.doConvertBuffer[mode] ) {
\r
2231 bool makeBuffer = true;
\r
2232 if ( mode == OUTPUT )
\r
2233 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2234 else { // mode == INPUT
\r
2235 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2236 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2237 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2238 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2242 if ( makeBuffer ) {
\r
2243 bufferBytes *= *bufferSize;
\r
2244 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2245 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2246 if ( stream_.deviceBuffer == NULL ) {
\r
2247 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2253 // Allocate memory for the Jack ports (channels) identifiers.
\r
2254 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2255 if ( handle->ports[mode] == NULL ) {
\r
2256 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2260 stream_.device[mode] = device;
\r
2261 stream_.channelOffset[mode] = firstChannel;
\r
2262 stream_.state = STREAM_STOPPED;
\r
2263 stream_.callbackInfo.object = (void *) this;
\r
2265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2266 // We had already set up the stream for output.
\r
2267 stream_.mode = DUPLEX;
\r
2269 stream_.mode = mode;
\r
2270 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2271 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2272 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2275 // Register our ports.
\r
2277 if ( mode == OUTPUT ) {
\r
2278 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2279 snprintf( label, 64, "outport %d", i );
\r
2280 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2281 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2285 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2286 snprintf( label, 64, "inport %d", i );
\r
2287 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2288 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2292 // Setup the buffer conversion information structure. We don't use
\r
2293 // buffers to do channel offsets, so we override that parameter
\r
2295 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2301 pthread_cond_destroy( &handle->condition );
\r
2302 jack_client_close( handle->client );
\r
2304 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2305 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2308 stream_.apiHandle = 0;
\r
2311 for ( int i=0; i<2; i++ ) {
\r
2312 if ( stream_.userBuffer[i] ) {
\r
2313 free( stream_.userBuffer[i] );
\r
2314 stream_.userBuffer[i] = 0;
\r
2318 if ( stream_.deviceBuffer ) {
\r
2319 free( stream_.deviceBuffer );
\r
2320 stream_.deviceBuffer = 0;
\r
2326 void RtApiJack :: closeStream( void )
\r
2328 if ( stream_.state == STREAM_CLOSED ) {
\r
2329 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2330 error( RtAudioError::WARNING );
\r
2334 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2337 if ( stream_.state == STREAM_RUNNING )
\r
2338 jack_deactivate( handle->client );
\r
2340 jack_client_close( handle->client );
\r
2344 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2345 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2346 pthread_cond_destroy( &handle->condition );
\r
2348 stream_.apiHandle = 0;
\r
2351 for ( int i=0; i<2; i++ ) {
\r
2352 if ( stream_.userBuffer[i] ) {
\r
2353 free( stream_.userBuffer[i] );
\r
2354 stream_.userBuffer[i] = 0;
\r
2358 if ( stream_.deviceBuffer ) {
\r
2359 free( stream_.deviceBuffer );
\r
2360 stream_.deviceBuffer = 0;
\r
2363 stream_.mode = UNINITIALIZED;
\r
2364 stream_.state = STREAM_CLOSED;
\r
2367 void RtApiJack :: startStream( void )
\r
2370 if ( stream_.state == STREAM_RUNNING ) {
\r
2371 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2372 error( RtAudioError::WARNING );
\r
2376 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2377 int result = jack_activate( handle->client );
\r
2379 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2383 const char **ports;
\r
2385 // Get the list of available ports.
\r
2386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2388 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2389 if ( ports == NULL) {
\r
2390 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2394 // Now make the port connections. Since RtAudio wasn't designed to
\r
2395 // allow the user to select particular channels of a device, we'll
\r
2396 // just open the first "nChannels" ports with offset.
\r
2397 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2399 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2400 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2403 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2410 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2412 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2413 if ( ports == NULL) {
\r
2414 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2418 // Now make the port connections. See note above.
\r
2419 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2421 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2422 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2425 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2432 handle->drainCounter = 0;
\r
2433 handle->internalDrain = false;
\r
2434 stream_.state = STREAM_RUNNING;
\r
2437 if ( result == 0 ) return;
\r
2438 error( RtAudioError::SYSTEM_ERROR );
\r
2441 void RtApiJack :: stopStream( void )
\r
2444 if ( stream_.state == STREAM_STOPPED ) {
\r
2445 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2446 error( RtAudioError::WARNING );
\r
2450 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2453 if ( handle->drainCounter == 0 ) {
\r
2454 handle->drainCounter = 2;
\r
2455 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2459 jack_deactivate( handle->client );
\r
2460 stream_.state = STREAM_STOPPED;
\r
2463 void RtApiJack :: abortStream( void )
\r
2466 if ( stream_.state == STREAM_STOPPED ) {
\r
2467 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2468 error( RtAudioError::WARNING );
\r
2472 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2473 handle->drainCounter = 2;
\r
2478 // This function will be called by a spawned thread when the user
\r
2479 // callback function signals that the stream should be stopped or
\r
2480 // aborted. It is necessary to handle it this way because the
\r
2481 // callbackEvent() function must return before the jack_deactivate()
\r
2482 // function will return.
\r
2483 static void *jackStopStream( void *ptr )
\r
2485 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2486 RtApiJack *object = (RtApiJack *) info->object;
\r
2488 object->stopStream();
\r
2489 pthread_exit( NULL );
\r
2492 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2494 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2495 if ( stream_.state == STREAM_CLOSED ) {
\r
2496 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2497 error( RtAudioError::WARNING );
\r
2500 if ( stream_.bufferSize != nframes ) {
\r
2501 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2502 error( RtAudioError::WARNING );
\r
2506 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2507 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 // Check if we were draining the stream and signal is finished.
\r
2510 if ( handle->drainCounter > 3 ) {
\r
2511 ThreadHandle threadId;
\r
2513 stream_.state = STREAM_STOPPING;
\r
2514 if ( handle->internalDrain == true )
\r
2515 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2517 pthread_cond_signal( &handle->condition );
\r
2521 // Invoke user callback first, to get fresh output data.
\r
2522 if ( handle->drainCounter == 0 ) {
\r
2523 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2524 double streamTime = getStreamTime();
\r
2525 RtAudioStreamStatus status = 0;
\r
2526 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2527 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2528 handle->xrun[0] = false;
\r
2530 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2531 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2532 handle->xrun[1] = false;
\r
2534 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2535 stream_.bufferSize, streamTime, status, info->userData );
\r
2536 if ( cbReturnValue == 2 ) {
\r
2537 stream_.state = STREAM_STOPPING;
\r
2538 handle->drainCounter = 2;
\r
2540 pthread_create( &id, NULL, jackStopStream, info );
\r
2543 else if ( cbReturnValue == 1 ) {
\r
2544 handle->drainCounter = 1;
\r
2545 handle->internalDrain = true;
\r
2549 jack_default_audio_sample_t *jackbuffer;
\r
2550 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2551 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2553 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2555 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2556 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2557 memset( jackbuffer, 0, bufferBytes );
\r
2561 else if ( stream_.doConvertBuffer[0] ) {
\r
2563 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2565 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2566 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2567 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2570 else { // no buffer conversion
\r
2571 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2572 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2573 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2578 // Don't bother draining input
\r
2579 if ( handle->drainCounter ) {
\r
2580 handle->drainCounter++;
\r
2584 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2586 if ( stream_.doConvertBuffer[1] ) {
\r
2587 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2588 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2589 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2591 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2593 else { // no buffer conversion
\r
2594 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2595 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2596 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2602 RtApi::tickStreamTime();
\r
2605 //******************** End of __UNIX_JACK__ *********************//
\r
2608 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2610 // The ASIO API is designed around a callback scheme, so this
\r
2611 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2612 // Jack. The primary constraint with ASIO is that it only allows
\r
2613 // access to a single driver at a time. Thus, it is not possible to
\r
2614 // have more than one simultaneous RtAudio stream.
\r
2616 // This implementation also requires a number of external ASIO files
\r
2617 // and a few global variables. The ASIO callback scheme does not
\r
2618 // allow for the passing of user data, so we must create a global
\r
2619 // pointer to our callbackInfo structure.
\r
2621 // On unix systems, we make use of a pthread condition variable.
\r
2622 // Since there is no equivalent in Windows, I hacked something based
\r
2623 // on information found in
\r
2624 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2626 #include "asiosys.h"
\r
2628 #include "iasiothiscallresolver.h"
\r
2629 #include "asiodrivers.h"
\r
2632 static AsioDrivers drivers;
\r
2633 static ASIOCallbacks asioCallbacks;
\r
2634 static ASIODriverInfo driverInfo;
\r
2635 static CallbackInfo *asioCallbackInfo;
\r
2636 static bool asioXRun;
\r
2638 struct AsioHandle {
\r
2639 int drainCounter; // Tracks callback counts when draining
\r
2640 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2641 ASIOBufferInfo *bufferInfos;
\r
2645 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2648 // Function declarations (definitions at end of section)
\r
2649 static const char* getAsioErrorString( ASIOError result );
\r
2650 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2651 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2653 RtApiAsio :: RtApiAsio()
\r
2655 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2656 // CoInitialize beforehand, but it must be for appartment threading
\r
2657 // (in which case, CoInitilialize will return S_FALSE here).
\r
2658 coInitialized_ = false;
\r
2659 HRESULT hr = CoInitialize( NULL );
\r
2660 if ( FAILED(hr) ) {
\r
2661 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2662 error( RtAudioError::WARNING );
\r
2664 coInitialized_ = true;
\r
2666 drivers.removeCurrentDriver();
\r
2667 driverInfo.asioVersion = 2;
\r
2669 // See note in DirectSound implementation about GetDesktopWindow().
\r
2670 driverInfo.sysRef = GetForegroundWindow();
\r
2673 RtApiAsio :: ~RtApiAsio()
\r
2675 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2676 if ( coInitialized_ ) CoUninitialize();
\r
2679 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2681 return (unsigned int) drivers.asioGetNumDev();
\r
2684 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2686 RtAudio::DeviceInfo info;
\r
2687 info.probed = false;
\r
2690 unsigned int nDevices = getDeviceCount();
\r
2691 if ( nDevices == 0 ) {
\r
2692 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2693 error( RtAudioError::INVALID_USE );
\r
2697 if ( device >= nDevices ) {
\r
2698 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2699 error( RtAudioError::INVALID_USE );
\r
2703 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2704 if ( stream_.state != STREAM_CLOSED ) {
\r
2705 if ( device >= devices_.size() ) {
\r
2706 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2707 error( RtAudioError::WARNING );
\r
2710 return devices_[ device ];
\r
2713 char driverName[32];
\r
2714 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2715 if ( result != ASE_OK ) {
\r
2716 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2717 errorText_ = errorStream_.str();
\r
2718 error( RtAudioError::WARNING );
\r
2722 info.name = driverName;
\r
2724 if ( !drivers.loadDriver( driverName ) ) {
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtAudioError::WARNING );
\r
2731 result = ASIOInit( &driverInfo );
\r
2732 if ( result != ASE_OK ) {
\r
2733 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2734 errorText_ = errorStream_.str();
\r
2735 error( RtAudioError::WARNING );
\r
2739 // Determine the device channel information.
\r
2740 long inputChannels, outputChannels;
\r
2741 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2742 if ( result != ASE_OK ) {
\r
2743 drivers.removeCurrentDriver();
\r
2744 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2745 errorText_ = errorStream_.str();
\r
2746 error( RtAudioError::WARNING );
\r
2750 info.outputChannels = outputChannels;
\r
2751 info.inputChannels = inputChannels;
\r
2752 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2753 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2755 // Determine the supported sample rates.
\r
2756 info.sampleRates.clear();
\r
2757 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2758 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2759 if ( result == ASE_OK )
\r
2760 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2763 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2764 ASIOChannelInfo channelInfo;
\r
2765 channelInfo.channel = 0;
\r
2766 channelInfo.isInput = true;
\r
2767 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2768 result = ASIOGetChannelInfo( &channelInfo );
\r
2769 if ( result != ASE_OK ) {
\r
2770 drivers.removeCurrentDriver();
\r
2771 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2772 errorText_ = errorStream_.str();
\r
2773 error( RtAudioError::WARNING );
\r
2777 info.nativeFormats = 0;
\r
2778 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT16;
\r
2780 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_SINT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2784 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2785 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2786 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2787 info.nativeFormats |= RTAUDIO_SINT24;
\r
2789 if ( info.outputChannels > 0 )
\r
2790 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2791 if ( info.inputChannels > 0 )
\r
2792 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2794 info.probed = true;
\r
2795 drivers.removeCurrentDriver();
\r
2799 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2801 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2802 object->callbackEvent( index );
\r
2805 void RtApiAsio :: saveDeviceInfo( void )
\r
2809 unsigned int nDevices = getDeviceCount();
\r
2810 devices_.resize( nDevices );
\r
2811 for ( unsigned int i=0; i<nDevices; i++ )
\r
2812 devices_[i] = getDeviceInfo( i );
\r
2815 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2816 unsigned int firstChannel, unsigned int sampleRate,
\r
2817 RtAudioFormat format, unsigned int *bufferSize,
\r
2818 RtAudio::StreamOptions *options )
\r
2820 // For ASIO, a duplex stream MUST use the same driver.
\r
2821 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2822 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2826 char driverName[32];
\r
2827 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2828 if ( result != ASE_OK ) {
\r
2829 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2830 errorText_ = errorStream_.str();
\r
2834 // Only load the driver once for duplex stream.
\r
2835 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2836 // The getDeviceInfo() function will not work when a stream is open
\r
2837 // because ASIO does not allow multiple devices to run at the same
\r
2838 // time. Thus, we'll probe the system before opening a stream and
\r
2839 // save the results for use by getDeviceInfo().
\r
2840 this->saveDeviceInfo();
\r
2842 if ( !drivers.loadDriver( driverName ) ) {
\r
2843 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2844 errorText_ = errorStream_.str();
\r
2848 result = ASIOInit( &driverInfo );
\r
2849 if ( result != ASE_OK ) {
\r
2850 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2851 errorText_ = errorStream_.str();
\r
2856 // Check the device channel count.
\r
2857 long inputChannels, outputChannels;
\r
2858 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2859 if ( result != ASE_OK ) {
\r
2860 drivers.removeCurrentDriver();
\r
2861 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2862 errorText_ = errorStream_.str();
\r
2866 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2867 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2870 errorText_ = errorStream_.str();
\r
2873 stream_.nDeviceChannels[mode] = channels;
\r
2874 stream_.nUserChannels[mode] = channels;
\r
2875 stream_.channelOffset[mode] = firstChannel;
\r
2877 // Verify the sample rate is supported.
\r
2878 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2879 if ( result != ASE_OK ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Get the current sample rate
\r
2887 ASIOSampleRate currentRate;
\r
2888 result = ASIOGetSampleRate( ¤tRate );
\r
2889 if ( result != ASE_OK ) {
\r
2890 drivers.removeCurrentDriver();
\r
2891 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2892 errorText_ = errorStream_.str();
\r
2896 // Set the sample rate only if necessary
\r
2897 if ( currentRate != sampleRate ) {
\r
2898 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2899 if ( result != ASE_OK ) {
\r
2900 drivers.removeCurrentDriver();
\r
2901 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2902 errorText_ = errorStream_.str();
\r
2907 // Determine the driver data type.
\r
2908 ASIOChannelInfo channelInfo;
\r
2909 channelInfo.channel = 0;
\r
2910 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2911 else channelInfo.isInput = true;
\r
2912 result = ASIOGetChannelInfo( &channelInfo );
\r
2913 if ( result != ASE_OK ) {
\r
2914 drivers.removeCurrentDriver();
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2916 errorText_ = errorStream_.str();
\r
2920 // Assuming WINDOWS host is always little-endian.
\r
2921 stream_.doByteSwap[mode] = false;
\r
2922 stream_.userFormat = format;
\r
2923 stream_.deviceFormat[mode] = 0;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2925 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2926 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2928 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2930 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2932 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2933 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2934 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2936 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2937 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2938 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2940 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2941 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2942 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2945 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2946 drivers.removeCurrentDriver();
\r
2947 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2948 errorText_ = errorStream_.str();
\r
2952 // Set the buffer size. For a duplex stream, this will end up
\r
2953 // setting the buffer size based on the input constraints, which
\r
2955 long minSize, maxSize, preferSize, granularity;
\r
2956 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2957 if ( result != ASE_OK ) {
\r
2958 drivers.removeCurrentDriver();
\r
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2960 errorText_ = errorStream_.str();
\r
2964 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2965 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2966 else if ( granularity == -1 ) {
\r
2967 // Make sure bufferSize is a power of two.
\r
2968 int log2_of_min_size = 0;
\r
2969 int log2_of_max_size = 0;
\r
2971 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2972 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2973 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2976 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2977 int min_delta_num = log2_of_min_size;
\r
2979 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2980 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2981 if (current_delta < min_delta) {
\r
2982 min_delta = current_delta;
\r
2983 min_delta_num = i;
\r
2987 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2988 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2989 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2991 else if ( granularity != 0 ) {
\r
2992 // Set to an even multiple of granularity, rounding up.
\r
2993 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2996 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2997 drivers.removeCurrentDriver();
\r
2998 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3002 stream_.bufferSize = *bufferSize;
\r
3003 stream_.nBuffers = 2;
\r
3005 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3006 else stream_.userInterleaved = true;
\r
3008 // ASIO always uses non-interleaved buffers.
\r
3009 stream_.deviceInterleaved[mode] = false;
\r
3011 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3012 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3013 if ( handle == 0 ) {
\r
3015 handle = new AsioHandle;
\r
3017 catch ( std::bad_alloc& ) {
\r
3018 //if ( handle == NULL ) {
\r
3019 drivers.removeCurrentDriver();
\r
3020 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3023 handle->bufferInfos = 0;
\r
3025 // Create a manual-reset event.
\r
3026 handle->condition = CreateEvent( NULL, // no security
\r
3027 TRUE, // manual-reset
\r
3028 FALSE, // non-signaled initially
\r
3029 NULL ); // unnamed
\r
3030 stream_.apiHandle = (void *) handle;
\r
3033 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3034 // and output separately, we'll have to dispose of previously
\r
3035 // created output buffers for a duplex stream.
\r
3036 long inputLatency, outputLatency;
\r
3037 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3038 ASIODisposeBuffers();
\r
3039 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3042 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3043 bool buffersAllocated = false;
\r
3044 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3045 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3046 if ( handle->bufferInfos == NULL ) {
\r
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3048 errorText_ = errorStream_.str();
\r
3052 ASIOBufferInfo *infos;
\r
3053 infos = handle->bufferInfos;
\r
3054 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3055 infos->isInput = ASIOFalse;
\r
3056 infos->channelNum = i + stream_.channelOffset[0];
\r
3057 infos->buffers[0] = infos->buffers[1] = 0;
\r
3059 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3060 infos->isInput = ASIOTrue;
\r
3061 infos->channelNum = i + stream_.channelOffset[1];
\r
3062 infos->buffers[0] = infos->buffers[1] = 0;
\r
3065 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3066 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3067 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3068 asioCallbacks.asioMessage = &asioMessages;
\r
3069 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3070 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3071 if ( result != ASE_OK ) {
\r
3072 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3073 errorText_ = errorStream_.str();
\r
3076 buffersAllocated = true;
\r
3078 // Set flags for buffer conversion.
\r
3079 stream_.doConvertBuffer[mode] = false;
\r
3080 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3081 stream_.doConvertBuffer[mode] = true;
\r
3082 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3083 stream_.nUserChannels[mode] > 1 )
\r
3084 stream_.doConvertBuffer[mode] = true;
\r
3086 // Allocate necessary internal buffers
\r
3087 unsigned long bufferBytes;
\r
3088 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3089 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3090 if ( stream_.userBuffer[mode] == NULL ) {
\r
3091 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3095 if ( stream_.doConvertBuffer[mode] ) {
\r
3097 bool makeBuffer = true;
\r
3098 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3099 if ( mode == INPUT ) {
\r
3100 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3101 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3102 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3106 if ( makeBuffer ) {
\r
3107 bufferBytes *= *bufferSize;
\r
3108 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3109 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3110 if ( stream_.deviceBuffer == NULL ) {
\r
3111 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3117 stream_.sampleRate = sampleRate;
\r
3118 stream_.device[mode] = device;
\r
3119 stream_.state = STREAM_STOPPED;
\r
3120 asioCallbackInfo = &stream_.callbackInfo;
\r
3121 stream_.callbackInfo.object = (void *) this;
\r
3122 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3123 // We had already set up an output stream.
\r
3124 stream_.mode = DUPLEX;
\r
3126 stream_.mode = mode;
\r
3128 // Determine device latencies
\r
3129 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3130 if ( result != ASE_OK ) {
\r
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3132 errorText_ = errorStream_.str();
\r
3133 error( RtAudioError::WARNING); // warn but don't fail
\r
3136 stream_.latency[0] = outputLatency;
\r
3137 stream_.latency[1] = inputLatency;
\r
3140 // Setup the buffer conversion information structure. We don't use
\r
3141 // buffers to do channel offsets, so we override that parameter
\r
3143 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3148 if ( buffersAllocated )
\r
3149 ASIODisposeBuffers();
\r
3150 drivers.removeCurrentDriver();
\r
3153 CloseHandle( handle->condition );
\r
3154 if ( handle->bufferInfos )
\r
3155 free( handle->bufferInfos );
\r
3157 stream_.apiHandle = 0;
\r
3160 for ( int i=0; i<2; i++ ) {
\r
3161 if ( stream_.userBuffer[i] ) {
\r
3162 free( stream_.userBuffer[i] );
\r
3163 stream_.userBuffer[i] = 0;
\r
3167 if ( stream_.deviceBuffer ) {
\r
3168 free( stream_.deviceBuffer );
\r
3169 stream_.deviceBuffer = 0;
\r
3175 void RtApiAsio :: closeStream()
\r
3177 if ( stream_.state == STREAM_CLOSED ) {
\r
3178 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3179 error( RtAudioError::WARNING );
\r
3183 if ( stream_.state == STREAM_RUNNING ) {
\r
3184 stream_.state = STREAM_STOPPED;
\r
3187 ASIODisposeBuffers();
\r
3188 drivers.removeCurrentDriver();
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3192 CloseHandle( handle->condition );
\r
3193 if ( handle->bufferInfos )
\r
3194 free( handle->bufferInfos );
\r
3196 stream_.apiHandle = 0;
\r
3199 for ( int i=0; i<2; i++ ) {
\r
3200 if ( stream_.userBuffer[i] ) {
\r
3201 free( stream_.userBuffer[i] );
\r
3202 stream_.userBuffer[i] = 0;
\r
3206 if ( stream_.deviceBuffer ) {
\r
3207 free( stream_.deviceBuffer );
\r
3208 stream_.deviceBuffer = 0;
\r
3211 stream_.mode = UNINITIALIZED;
\r
3212 stream_.state = STREAM_CLOSED;
\r
3215 bool stopThreadCalled = false;
\r
3217 void RtApiAsio :: startStream()
\r
3220 if ( stream_.state == STREAM_RUNNING ) {
\r
3221 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3222 error( RtAudioError::WARNING );
\r
3226 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3227 ASIOError result = ASIOStart();
\r
3228 if ( result != ASE_OK ) {
\r
3229 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3230 errorText_ = errorStream_.str();
\r
3234 handle->drainCounter = 0;
\r
3235 handle->internalDrain = false;
\r
3236 ResetEvent( handle->condition );
\r
3237 stream_.state = STREAM_RUNNING;
\r
3241 stopThreadCalled = false;
\r
3243 if ( result == ASE_OK ) return;
\r
3244 error( RtAudioError::SYSTEM_ERROR );
\r
3247 void RtApiAsio :: stopStream()
\r
3250 if ( stream_.state == STREAM_STOPPED ) {
\r
3251 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3252 error( RtAudioError::WARNING );
\r
3256 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3258 if ( handle->drainCounter == 0 ) {
\r
3259 handle->drainCounter = 2;
\r
3260 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3264 stream_.state = STREAM_STOPPED;
\r
3266 ASIOError result = ASIOStop();
\r
3267 if ( result != ASE_OK ) {
\r
3268 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3269 errorText_ = errorStream_.str();
\r
3272 if ( result == ASE_OK ) return;
\r
3273 error( RtAudioError::SYSTEM_ERROR );
\r
3276 void RtApiAsio :: abortStream()
\r
3279 if ( stream_.state == STREAM_STOPPED ) {
\r
3280 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3281 error( RtAudioError::WARNING );
\r
3285 // The following lines were commented-out because some behavior was
\r
3286 // noted where the device buffers need to be zeroed to avoid
\r
3287 // continuing sound, even when the device buffers are completely
\r
3288 // disposed. So now, calling abort is the same as calling stop.
\r
3289 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3290 // handle->drainCounter = 2;
\r
3294 // This function will be called by a spawned thread when the user
\r
3295 // callback function signals that the stream should be stopped or
\r
3296 // aborted. It is necessary to handle it this way because the
\r
3297 // callbackEvent() function must return before the ASIOStop()
\r
3298 // function will return.
\r
3299 static unsigned __stdcall asioStopStream( void *ptr )
\r
3301 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3302 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3304 object->stopStream();
\r
3305 _endthreadex( 0 );
\r
3309 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3311 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3312 if ( stream_.state == STREAM_CLOSED ) {
\r
3313 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3314 error( RtAudioError::WARNING );
\r
3318 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3321 // Check if we were draining the stream and signal if finished.
\r
3322 if ( handle->drainCounter > 3 ) {
\r
3324 stream_.state = STREAM_STOPPING;
\r
3325 if ( handle->internalDrain == false )
\r
3326 SetEvent( handle->condition );
\r
3327 else { // spawn a thread to stop the stream
\r
3328 unsigned threadId;
\r
3329 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3330 &stream_.callbackInfo, 0, &threadId );
\r
3335 // Invoke user callback to get fresh output data UNLESS we are
\r
3336 // draining stream.
\r
3337 if ( handle->drainCounter == 0 ) {
\r
3338 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3339 double streamTime = getStreamTime();
\r
3340 RtAudioStreamStatus status = 0;
\r
3341 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3342 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3345 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3346 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3349 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3350 stream_.bufferSize, streamTime, status, info->userData );
\r
3351 if ( cbReturnValue == 2 ) {
\r
3352 stream_.state = STREAM_STOPPING;
\r
3353 handle->drainCounter = 2;
\r
3354 unsigned threadId;
\r
3355 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3356 &stream_.callbackInfo, 0, &threadId );
\r
3359 else if ( cbReturnValue == 1 ) {
\r
3360 handle->drainCounter = 1;
\r
3361 handle->internalDrain = true;
\r
3365 unsigned int nChannels, bufferBytes, i, j;
\r
3366 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3369 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3371 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3373 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3374 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3375 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3379 else if ( stream_.doConvertBuffer[0] ) {
\r
3381 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3382 if ( stream_.doByteSwap[0] )
\r
3383 byteSwapBuffer( stream_.deviceBuffer,
\r
3384 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3385 stream_.deviceFormat[0] );
\r
3387 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3388 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3389 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3390 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3396 if ( stream_.doByteSwap[0] )
\r
3397 byteSwapBuffer( stream_.userBuffer[0],
\r
3398 stream_.bufferSize * stream_.nUserChannels[0],
\r
3399 stream_.userFormat );
\r
3401 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3402 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3403 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3404 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3410 // Don't bother draining input
\r
3411 if ( handle->drainCounter ) {
\r
3412 handle->drainCounter++;
\r
3416 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3418 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3420 if (stream_.doConvertBuffer[1]) {
\r
3422 // Always interleave ASIO input data.
\r
3423 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3424 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3425 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3426 handle->bufferInfos[i].buffers[bufferIndex],
\r
3430 if ( stream_.doByteSwap[1] )
\r
3431 byteSwapBuffer( stream_.deviceBuffer,
\r
3432 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3433 stream_.deviceFormat[1] );
\r
3434 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3438 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3439 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3440 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3441 handle->bufferInfos[i].buffers[bufferIndex],
\r
3446 if ( stream_.doByteSwap[1] )
\r
3447 byteSwapBuffer( stream_.userBuffer[1],
\r
3448 stream_.bufferSize * stream_.nUserChannels[1],
\r
3449 stream_.userFormat );
\r
3454 // The following call was suggested by Malte Clasen. While the API
\r
3455 // documentation indicates it should not be required, some device
\r
3456 // drivers apparently do not function correctly without it.
\r
3457 ASIOOutputReady();
\r
3459 RtApi::tickStreamTime();
\r
3463 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3465 // The ASIO documentation says that this usually only happens during
\r
3466 // external sync. Audio processing is not stopped by the driver,
\r
3467 // actual sample rate might not have even changed, maybe only the
\r
3468 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3471 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3473 object->stopStream();
\r
3475 catch ( RtAudioError &exception ) {
\r
3476 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3480 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3483 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3487 switch( selector ) {
\r
3488 case kAsioSelectorSupported:
\r
3489 if ( value == kAsioResetRequest
\r
3490 || value == kAsioEngineVersion
\r
3491 || value == kAsioResyncRequest
\r
3492 || value == kAsioLatenciesChanged
\r
3493 // The following three were added for ASIO 2.0, you don't
\r
3494 // necessarily have to support them.
\r
3495 || value == kAsioSupportsTimeInfo
\r
3496 || value == kAsioSupportsTimeCode
\r
3497 || value == kAsioSupportsInputMonitor)
\r
3500 case kAsioResetRequest:
\r
3501 // Defer the task and perform the reset of the driver during the
\r
3502 // next "safe" situation. You cannot reset the driver right now,
\r
3503 // as this code is called from the driver. Reset the driver is
\r
3504 // done by completely destruct is. I.e. ASIOStop(),
\r
3505 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3507 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3510 case kAsioResyncRequest:
\r
3511 // This informs the application that the driver encountered some
\r
3512 // non-fatal data loss. It is used for synchronization purposes
\r
3513 // of different media. Added mainly to work around the Win16Mutex
\r
3514 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3515 // which could lose data because the Mutex was held too long by
\r
3516 // another thread. However a driver can issue it in other
\r
3517 // situations, too.
\r
3518 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3522 case kAsioLatenciesChanged:
\r
3523 // This will inform the host application that the drivers were
\r
3524 // latencies changed. Beware, it this does not mean that the
\r
3525 // buffer sizes have changed! You might need to update internal
\r
3527 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3530 case kAsioEngineVersion:
\r
3531 // Return the supported ASIO version of the host application. If
\r
3532 // a host application does not implement this selector, ASIO 1.0
\r
3533 // is assumed by the driver.
\r
3536 case kAsioSupportsTimeInfo:
\r
3537 // Informs the driver whether the
\r
3538 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3539 // For compatibility with ASIO 1.0 drivers the host application
\r
3540 // should always support the "old" bufferSwitch method, too.
\r
3543 case kAsioSupportsTimeCode:
\r
3544 // Informs the driver whether application is interested in time
\r
3545 // code info. If an application does not need to know about time
\r
3546 // code, the driver has less work to do.
\r
3553 static const char* getAsioErrorString( ASIOError result )
\r
3558 const char*message;
\r
3561 static const Messages m[] =
\r
3563 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3564 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3565 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3566 { ASE_InvalidMode, "Invalid mode." },
\r
3567 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3568 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3569 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3572 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3573 if ( m[i].value == result ) return m[i].message;
\r
3575 return "Unknown error.";
\r
3578 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3582 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3584 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3585 // - Introduces support for the Windows WASAPI API
\r
3586 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3587 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3588 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3593 #include <audioclient.h>
\r
3595 #include <mmdeviceapi.h>
\r
3596 #include <functiondiscoverykeys_devpkey.h>
\r
3598 //=============================================================================
\r
3600 #define SAFE_RELEASE( objectPtr )\
\r
3603 objectPtr->Release();\
\r
3604 objectPtr = NULL;\
\r
3607 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3609 //-----------------------------------------------------------------------------
\r
3611 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3612 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3613 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3614 // provide intermediate storage for read / write synchronization.
\r
3615 class WasapiBuffer
\r
3619 : buffer_( NULL ),
\r
3628 // sets the length of the internal ring buffer
\r
3629 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3632 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3634 bufferSize_ = bufferSize;
\r
3639 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3640 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3642 if ( !buffer || // incoming buffer is NULL
\r
3643 bufferSize == 0 || // incoming buffer has no data
\r
3644 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3649 unsigned int relOutIndex = outIndex_;
\r
3650 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3651 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3652 relOutIndex += bufferSize_;
\r
3655 // "in" index can end on the "out" index but cannot begin at it
\r
3656 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3657 return false; // not enough space between "in" index and "out" index
\r
3660 // copy buffer from external to internal
\r
3661 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3662 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3663 int fromInSize = bufferSize - fromZeroSize;
\r
3667 case RTAUDIO_SINT8:
\r
3668 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3669 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3671 case RTAUDIO_SINT16:
\r
3672 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3673 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3675 case RTAUDIO_SINT24:
\r
3676 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3677 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3679 case RTAUDIO_SINT32:
\r
3680 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3681 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3683 case RTAUDIO_FLOAT32:
\r
3684 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3685 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3687 case RTAUDIO_FLOAT64:
\r
3688 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3689 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3693 // update "in" index
\r
3694 inIndex_ += bufferSize;
\r
3695 inIndex_ %= bufferSize_;
\r
3700 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3701 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3703 if ( !buffer || // incoming buffer is NULL
\r
3704 bufferSize == 0 || // incoming buffer has no data
\r
3705 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3710 unsigned int relInIndex = inIndex_;
\r
3711 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3712 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3713 relInIndex += bufferSize_;
\r
3716 // "out" index can begin at and end on the "in" index
\r
3717 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3718 return false; // not enough space between "out" index and "in" index
\r
3721 // copy buffer from internal to external
\r
3722 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3723 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3724 int fromOutSize = bufferSize - fromZeroSize;
\r
3728 case RTAUDIO_SINT8:
\r
3729 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3730 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3732 case RTAUDIO_SINT16:
\r
3733 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3734 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3736 case RTAUDIO_SINT24:
\r
3737 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3738 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3740 case RTAUDIO_SINT32:
\r
3741 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3742 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3744 case RTAUDIO_FLOAT32:
\r
3745 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3746 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3748 case RTAUDIO_FLOAT64:
\r
3749 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3750 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3754 // update "out" index
\r
3755 outIndex_ += bufferSize;
\r
3756 outIndex_ %= bufferSize_;
\r
3763 unsigned int bufferSize_;
\r
3764 unsigned int inIndex_;
\r
3765 unsigned int outIndex_;
\r
3768 //-----------------------------------------------------------------------------
\r
3770 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3771 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3772 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3773 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3774 // one rate and its multiple.
\r
3775 void convertBufferWasapi( char* outBuffer,
\r
3776 const char* inBuffer,
\r
3777 const unsigned int& channelCount,
\r
3778 const unsigned int& inSampleRate,
\r
3779 const unsigned int& outSampleRate,
\r
3780 const unsigned int& inSampleCount,
\r
3781 unsigned int& outSampleCount,
\r
3782 const RtAudioFormat& format )
\r
3784 // calculate the new outSampleCount and relative sampleStep
\r
3785 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3786 float sampleStep = 1.0f / sampleRatio;
\r
3787 float inSampleFraction = 0.0f;
\r
3789 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3791 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3792 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3794 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3798 case RTAUDIO_SINT8:
\r
3799 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3801 case RTAUDIO_SINT16:
\r
3802 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3804 case RTAUDIO_SINT24:
\r
3805 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3807 case RTAUDIO_SINT32:
\r
3808 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3810 case RTAUDIO_FLOAT32:
\r
3811 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3813 case RTAUDIO_FLOAT64:
\r
3814 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3818 // jump to next in sample
\r
3819 inSampleFraction += sampleStep;
\r
3823 //-----------------------------------------------------------------------------
\r
3825 // A structure to hold various information related to the WASAPI implementation.
\r
3826 struct WasapiHandle
\r
3828 IAudioClient* captureAudioClient;
\r
3829 IAudioClient* renderAudioClient;
\r
3830 IAudioCaptureClient* captureClient;
\r
3831 IAudioRenderClient* renderClient;
\r
3832 HANDLE captureEvent;
\r
3833 HANDLE renderEvent;
\r
3836 : captureAudioClient( NULL ),
\r
3837 renderAudioClient( NULL ),
\r
3838 captureClient( NULL ),
\r
3839 renderClient( NULL ),
\r
3840 captureEvent( NULL ),
\r
3841 renderEvent( NULL ) {}
\r
3844 //=============================================================================
\r
3846 RtApiWasapi::RtApiWasapi()
\r
3847 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3849 // WASAPI can run either apartment or multi-threaded
\r
3850 HRESULT hr = CoInitialize( NULL );
\r
3851 if ( !FAILED( hr ) )
\r
3852 coInitialized_ = true;
\r
3854 // Instantiate device enumerator
\r
3855 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3856 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3857 ( void** ) &deviceEnumerator_ );
\r
3859 if ( FAILED( hr ) ) {
\r
3860 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3861 error( RtAudioError::DRIVER_ERROR );
\r
3865 //-----------------------------------------------------------------------------
\r
3867 RtApiWasapi::~RtApiWasapi()
\r
3869 if ( stream_.state != STREAM_CLOSED )
\r
3872 SAFE_RELEASE( deviceEnumerator_ );
\r
3874 // If this object previously called CoInitialize()
\r
3875 if ( coInitialized_ )
\r
3879 //=============================================================================
\r
3881 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3883 unsigned int captureDeviceCount = 0;
\r
3884 unsigned int renderDeviceCount = 0;
\r
3886 IMMDeviceCollection* captureDevices = NULL;
\r
3887 IMMDeviceCollection* renderDevices = NULL;
\r
3889 // Count capture devices
\r
3890 errorText_.clear();
\r
3891 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3892 if ( FAILED( hr ) ) {
\r
3893 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3897 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3898 if ( FAILED( hr ) ) {
\r
3899 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3903 // Count render devices
\r
3904 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3905 if ( FAILED( hr ) ) {
\r
3906 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3910 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3911 if ( FAILED( hr ) ) {
\r
3912 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3917 // release all references
\r
3918 SAFE_RELEASE( captureDevices );
\r
3919 SAFE_RELEASE( renderDevices );
\r
3921 if ( errorText_.empty() )
\r
3922 return captureDeviceCount + renderDeviceCount;
\r
3924 error( RtAudioError::DRIVER_ERROR );
\r
3928 //-----------------------------------------------------------------------------
\r
3930 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3932 RtAudio::DeviceInfo info;
\r
3933 unsigned int captureDeviceCount = 0;
\r
3934 unsigned int renderDeviceCount = 0;
\r
3935 std::wstring deviceName;
\r
3936 std::string defaultDeviceName;
\r
3937 bool isCaptureDevice = false;
\r
3939 PROPVARIANT deviceNameProp;
\r
3940 PROPVARIANT defaultDeviceNameProp;
\r
3942 IMMDeviceCollection* captureDevices = NULL;
\r
3943 IMMDeviceCollection* renderDevices = NULL;
\r
3944 IMMDevice* devicePtr = NULL;
\r
3945 IMMDevice* defaultDevicePtr = NULL;
\r
3946 IAudioClient* audioClient = NULL;
\r
3947 IPropertyStore* devicePropStore = NULL;
\r
3948 IPropertyStore* defaultDevicePropStore = NULL;
\r
3950 WAVEFORMATEX* deviceFormat = NULL;
\r
3951 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3954 info.probed = false;
\r
3956 // Count capture devices
\r
3957 errorText_.clear();
\r
3958 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3959 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3960 if ( FAILED( hr ) ) {
\r
3961 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3965 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3966 if ( FAILED( hr ) ) {
\r
3967 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3971 // Count render devices
\r
3972 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3973 if ( FAILED( hr ) ) {
\r
3974 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3978 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3979 if ( FAILED( hr ) ) {
\r
3980 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3984 // validate device index
\r
3985 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3986 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3987 errorType = RtAudioError::INVALID_USE;
\r
3991 // determine whether index falls within capture or render devices
\r
3992 if ( device >= renderDeviceCount ) {
\r
3993 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3994 if ( FAILED( hr ) ) {
\r
3995 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
3998 isCaptureDevice = true;
\r
4001 hr = renderDevices->Item( device, &devicePtr );
\r
4002 if ( FAILED( hr ) ) {
\r
4003 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4006 isCaptureDevice = false;
\r
4009 // get default device name
\r
4010 if ( isCaptureDevice ) {
\r
4011 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4012 if ( FAILED( hr ) ) {
\r
4013 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4018 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4019 if ( FAILED( hr ) ) {
\r
4020 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4025 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4026 if ( FAILED( hr ) ) {
\r
4027 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4030 PropVariantInit( &defaultDeviceNameProp );
\r
4032 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4033 if ( FAILED( hr ) ) {
\r
4034 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4038 deviceName = defaultDeviceNameProp.pwszVal;
\r
4039 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4042 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4043 if ( FAILED( hr ) ) {
\r
4044 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4048 PropVariantInit( &deviceNameProp );
\r
4050 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4051 if ( FAILED( hr ) ) {
\r
4052 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4056 deviceName = deviceNameProp.pwszVal;
\r
4057 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4060 if ( isCaptureDevice ) {
\r
4061 info.isDefaultInput = info.name == defaultDeviceName;
\r
4062 info.isDefaultOutput = false;
\r
4065 info.isDefaultInput = false;
\r
4066 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4070 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4071 if ( FAILED( hr ) ) {
\r
4072 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4076 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4077 if ( FAILED( hr ) ) {
\r
4078 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4082 if ( isCaptureDevice ) {
\r
4083 info.inputChannels = deviceFormat->nChannels;
\r
4084 info.outputChannels = 0;
\r
4085 info.duplexChannels = 0;
\r
4088 info.inputChannels = 0;
\r
4089 info.outputChannels = deviceFormat->nChannels;
\r
4090 info.duplexChannels = 0;
\r
4094 info.sampleRates.clear();
\r
4096 // allow support for all sample rates as we have a built-in sample rate converter
\r
4097 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4098 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4102 info.nativeFormats = 0;
\r
4104 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4105 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4106 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4108 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4109 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4111 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4112 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4115 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4116 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4117 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4119 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4120 info.nativeFormats |= RTAUDIO_SINT8;
\r
4122 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4123 info.nativeFormats |= RTAUDIO_SINT16;
\r
4125 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4126 info.nativeFormats |= RTAUDIO_SINT24;
\r
4128 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4129 info.nativeFormats |= RTAUDIO_SINT32;
\r
4134 info.probed = true;
\r
4137 // release all references
\r
4138 PropVariantClear( &deviceNameProp );
\r
4139 PropVariantClear( &defaultDeviceNameProp );
\r
4141 SAFE_RELEASE( captureDevices );
\r
4142 SAFE_RELEASE( renderDevices );
\r
4143 SAFE_RELEASE( devicePtr );
\r
4144 SAFE_RELEASE( defaultDevicePtr );
\r
4145 SAFE_RELEASE( audioClient );
\r
4146 SAFE_RELEASE( devicePropStore );
\r
4147 SAFE_RELEASE( defaultDevicePropStore );
\r
4149 CoTaskMemFree( deviceFormat );
\r
4150 CoTaskMemFree( closestMatchFormat );
\r
4152 if ( !errorText_.empty() )
\r
4153 error( errorType );
\r
4157 //-----------------------------------------------------------------------------
\r
4159 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4161 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4162 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4170 //-----------------------------------------------------------------------------
\r
4172 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4174 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4175 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4183 //-----------------------------------------------------------------------------
\r
4185 void RtApiWasapi::closeStream( void )
\r
4187 if ( stream_.state == STREAM_CLOSED ) {
\r
4188 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4189 error( RtAudioError::WARNING );
\r
4193 if ( stream_.state != STREAM_STOPPED )
\r
4196 // clean up stream memory
\r
4197 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4198 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4200 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4201 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4203 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4204 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4206 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4207 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4209 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4210 stream_.apiHandle = NULL;
\r
4212 for ( int i = 0; i < 2; i++ ) {
\r
4213 if ( stream_.userBuffer[i] ) {
\r
4214 free( stream_.userBuffer[i] );
\r
4215 stream_.userBuffer[i] = 0;
\r
4219 if ( stream_.deviceBuffer ) {
\r
4220 free( stream_.deviceBuffer );
\r
4221 stream_.deviceBuffer = 0;
\r
4224 // update stream state
\r
4225 stream_.state = STREAM_CLOSED;
\r
4228 //-----------------------------------------------------------------------------
\r
4230 void RtApiWasapi::startStream( void )
\r
4234 if ( stream_.state == STREAM_RUNNING ) {
\r
4235 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4236 error( RtAudioError::WARNING );
\r
4240 // update stream state
\r
4241 stream_.state = STREAM_RUNNING;
\r
4243 // create WASAPI stream thread
\r
4244 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4246 if ( !stream_.callbackInfo.thread ) {
\r
4247 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4248 error( RtAudioError::THREAD_ERROR );
\r
4251 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4252 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4256 //-----------------------------------------------------------------------------
\r
4258 void RtApiWasapi::stopStream( void )
\r
4262 if ( stream_.state == STREAM_STOPPED ) {
\r
4263 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4264 error( RtAudioError::WARNING );
\r
4268 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4269 stream_.state = STREAM_STOPPING;
\r
4271 // wait until stream thread is stopped
\r
4272 while( stream_.state != STREAM_STOPPED ) {
\r
4276 // Wait for the last buffer to play before stopping.
\r
4277 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4279 // stop capture client if applicable
\r
4280 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4281 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4282 if ( FAILED( hr ) ) {
\r
4283 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4284 error( RtAudioError::DRIVER_ERROR );
\r
4289 // stop render client if applicable
\r
4290 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4291 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4292 if ( FAILED( hr ) ) {
\r
4293 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4294 error( RtAudioError::DRIVER_ERROR );
\r
4299 // close thread handle
\r
4300 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4301 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4302 error( RtAudioError::THREAD_ERROR );
\r
4306 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4309 //-----------------------------------------------------------------------------
\r
4311 void RtApiWasapi::abortStream( void )
\r
4315 if ( stream_.state == STREAM_STOPPED ) {
\r
4316 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4317 error( RtAudioError::WARNING );
\r
4321 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4322 stream_.state = STREAM_STOPPING;
\r
4324 // wait until stream thread is stopped
\r
4325 while ( stream_.state != STREAM_STOPPED ) {
\r
4329 // stop capture client if applicable
\r
4330 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4331 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4332 if ( FAILED( hr ) ) {
\r
4333 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4334 error( RtAudioError::DRIVER_ERROR );
\r
4339 // stop render client if applicable
\r
4340 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4341 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4342 if ( FAILED( hr ) ) {
\r
4343 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4344 error( RtAudioError::DRIVER_ERROR );
\r
4349 // close thread handle
\r
4350 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4351 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4352 error( RtAudioError::THREAD_ERROR );
\r
4356 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4359 //-----------------------------------------------------------------------------
\r
4361 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4362 unsigned int firstChannel, unsigned int sampleRate,
\r
4363 RtAudioFormat format, unsigned int* bufferSize,
\r
4364 RtAudio::StreamOptions* options )
\r
4366 bool methodResult = FAILURE;
\r
4367 unsigned int captureDeviceCount = 0;
\r
4368 unsigned int renderDeviceCount = 0;
\r
4370 IMMDeviceCollection* captureDevices = NULL;
\r
4371 IMMDeviceCollection* renderDevices = NULL;
\r
4372 IMMDevice* devicePtr = NULL;
\r
4373 WAVEFORMATEX* deviceFormat = NULL;
\r
4374 unsigned int bufferBytes;
\r
4375 stream_.state = STREAM_STOPPED;
\r
4377 // create API Handle if not already created
\r
4378 if ( !stream_.apiHandle )
\r
4379 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4381 // Count capture devices
\r
4382 errorText_.clear();
\r
4383 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4384 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4385 if ( FAILED( hr ) ) {
\r
4386 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4390 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4391 if ( FAILED( hr ) ) {
\r
4392 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4396 // Count render devices
\r
4397 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4398 if ( FAILED( hr ) ) {
\r
4399 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4403 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4404 if ( FAILED( hr ) ) {
\r
4405 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4409 // validate device index
\r
4410 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4411 errorType = RtAudioError::INVALID_USE;
\r
4412 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4416 // determine whether index falls within capture or render devices
\r
4417 if ( device >= renderDeviceCount ) {
\r
4418 if ( mode != INPUT ) {
\r
4419 errorType = RtAudioError::INVALID_USE;
\r
4420 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4424 // retrieve captureAudioClient from devicePtr
\r
4425 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4427 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4428 if ( FAILED( hr ) ) {
\r
4429 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4433 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4434 NULL, ( void** ) &captureAudioClient );
\r
4435 if ( FAILED( hr ) ) {
\r
4436 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4440 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4441 if ( FAILED( hr ) ) {
\r
4442 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4446 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4447 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4450 if ( mode != OUTPUT ) {
\r
4451 errorType = RtAudioError::INVALID_USE;
\r
4452 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4456 // retrieve renderAudioClient from devicePtr
\r
4457 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4459 hr = renderDevices->Item( device, &devicePtr );
\r
4460 if ( FAILED( hr ) ) {
\r
4461 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4465 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4466 NULL, ( void** ) &renderAudioClient );
\r
4467 if ( FAILED( hr ) ) {
\r
4468 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4472 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4473 if ( FAILED( hr ) ) {
\r
4474 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4478 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4479 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4482 // fill stream data
\r
4483 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4484 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4485 stream_.mode = DUPLEX;
\r
4488 stream_.mode = mode;
\r
4491 stream_.device[mode] = device;
\r
4492 stream_.doByteSwap[mode] = false;
\r
4493 stream_.sampleRate = sampleRate;
\r
4494 stream_.bufferSize = *bufferSize;
\r
4495 stream_.nBuffers = 1;
\r
4496 stream_.nUserChannels[mode] = channels;
\r
4497 stream_.channelOffset[mode] = firstChannel;
\r
4498 stream_.userFormat = format;
\r
4499 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4501 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4502 stream_.userInterleaved = false;
\r
4504 stream_.userInterleaved = true;
\r
4505 stream_.deviceInterleaved[mode] = true;
\r
4507 // Set flags for buffer conversion.
\r
4508 stream_.doConvertBuffer[mode] = false;
\r
4509 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4510 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4511 stream_.doConvertBuffer[mode] = true;
\r
4512 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4513 stream_.nUserChannels[mode] > 1 )
\r
4514 stream_.doConvertBuffer[mode] = true;
\r
4516 if ( stream_.doConvertBuffer[mode] )
\r
4517 setConvertInfo( mode, 0 );
\r
4519 // Allocate necessary internal buffers
\r
4520 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4522 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4523 if ( !stream_.userBuffer[mode] ) {
\r
4524 errorType = RtAudioError::MEMORY_ERROR;
\r
4525 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4529 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4530 stream_.callbackInfo.priority = 15;
\r
4532 stream_.callbackInfo.priority = 0;
\r
4534 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4535 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4537 methodResult = SUCCESS;
\r
4541 SAFE_RELEASE( captureDevices );
\r
4542 SAFE_RELEASE( renderDevices );
\r
4543 SAFE_RELEASE( devicePtr );
\r
4544 CoTaskMemFree( deviceFormat );
\r
4546 // if method failed, close the stream
\r
4547 if ( methodResult == FAILURE )
\r
4550 if ( !errorText_.empty() )
\r
4551 error( errorType );
\r
4552 return methodResult;
\r
4555 //=============================================================================
\r
4557 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4560 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4565 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4568 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4573 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4576 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4581 //-----------------------------------------------------------------------------
\r
4583 void RtApiWasapi::wasapiThread()
\r
4585 // as this is a new thread, we must CoInitialize it
\r
4586 CoInitialize( NULL );
\r
4590 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4591 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4592 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4593 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4594 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4595 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4597 WAVEFORMATEX* captureFormat = NULL;
\r
4598 WAVEFORMATEX* renderFormat = NULL;
\r
4599 float captureSrRatio = 0.0f;
\r
4600 float renderSrRatio = 0.0f;
\r
4601 WasapiBuffer captureBuffer;
\r
4602 WasapiBuffer renderBuffer;
\r
4604 // declare local stream variables
\r
4605 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4606 BYTE* streamBuffer = NULL;
\r
4607 unsigned long captureFlags = 0;
\r
4608 unsigned int bufferFrameCount = 0;
\r
4609 unsigned int numFramesPadding = 0;
\r
4610 unsigned int convBufferSize = 0;
\r
4611 bool callbackPushed = false;
\r
4612 bool callbackPulled = false;
\r
4613 bool callbackStopped = false;
\r
4614 int callbackResult = 0;
\r
4616 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4617 char* convBuffer = NULL;
\r
4618 unsigned int convBuffSize = 0;
\r
4619 unsigned int deviceBuffSize = 0;
\r
4621 errorText_.clear();
\r
4622 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4624 // Attempt to assign "Pro Audio" characteristic to thread
\r
4625 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4627 DWORD taskIndex = 0;
\r
4628 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4629 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4630 FreeLibrary( AvrtDll );
\r
4633 // start capture stream if applicable
\r
4634 if ( captureAudioClient ) {
\r
4635 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4636 if ( FAILED( hr ) ) {
\r
4637 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4641 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4643 // initialize capture stream according to desire buffer size
\r
4644 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4645 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4647 if ( !captureClient ) {
\r
4648 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4649 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4650 desiredBufferPeriod,
\r
4651 desiredBufferPeriod,
\r
4654 if ( FAILED( hr ) ) {
\r
4655 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4659 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4660 ( void** ) &captureClient );
\r
4661 if ( FAILED( hr ) ) {
\r
4662 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4666 // configure captureEvent to trigger on every available capture buffer
\r
4667 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4668 if ( !captureEvent ) {
\r
4669 errorType = RtAudioError::SYSTEM_ERROR;
\r
4670 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4674 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4675 if ( FAILED( hr ) ) {
\r
4676 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4680 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4681 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4684 unsigned int inBufferSize = 0;
\r
4685 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4686 if ( FAILED( hr ) ) {
\r
4687 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4691 // scale outBufferSize according to stream->user sample rate ratio
\r
4692 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4693 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4695 // set captureBuffer size
\r
4696 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4698 // reset the capture stream
\r
4699 hr = captureAudioClient->Reset();
\r
4700 if ( FAILED( hr ) ) {
\r
4701 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4705 // start the capture stream
\r
4706 hr = captureAudioClient->Start();
\r
4707 if ( FAILED( hr ) ) {
\r
4708 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4713 // start render stream if applicable
\r
4714 if ( renderAudioClient ) {
\r
4715 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4716 if ( FAILED( hr ) ) {
\r
4717 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4721 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4723 // initialize render stream according to desire buffer size
\r
4724 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4725 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4727 if ( !renderClient ) {
\r
4728 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4729 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4730 desiredBufferPeriod,
\r
4731 desiredBufferPeriod,
\r
4734 if ( FAILED( hr ) ) {
\r
4735 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4739 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4740 ( void** ) &renderClient );
\r
4741 if ( FAILED( hr ) ) {
\r
4742 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4746 // configure renderEvent to trigger on every available render buffer
\r
4747 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4748 if ( !renderEvent ) {
\r
4749 errorType = RtAudioError::SYSTEM_ERROR;
\r
4750 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4754 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4755 if ( FAILED( hr ) ) {
\r
4756 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4760 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4761 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4764 unsigned int outBufferSize = 0;
\r
4765 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4766 if ( FAILED( hr ) ) {
\r
4767 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4771 // scale inBufferSize according to user->stream sample rate ratio
\r
4772 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4773 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4775 // set renderBuffer size
\r
4776 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4778 // reset the render stream
\r
4779 hr = renderAudioClient->Reset();
\r
4780 if ( FAILED( hr ) ) {
\r
4781 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4785 // start the render stream
\r
4786 hr = renderAudioClient->Start();
\r
4787 if ( FAILED( hr ) ) {
\r
4788 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4793 if ( stream_.mode == INPUT ) {
\r
4794 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4795 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4797 else if ( stream_.mode == OUTPUT ) {
\r
4798 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4799 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4801 else if ( stream_.mode == DUPLEX ) {
\r
4802 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4803 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4804 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4805 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4808 convBuffer = ( char* ) malloc( convBuffSize );
\r
4809 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4810 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4811 errorType = RtAudioError::MEMORY_ERROR;
\r
4812 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4816 // stream process loop
\r
4817 while ( stream_.state != STREAM_STOPPING ) {
\r
4818 if ( !callbackPulled ) {
\r
4821 // 1. Pull callback buffer from inputBuffer
\r
4822 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4823 // Convert callback buffer to user format
\r
4825 if ( captureAudioClient ) {
\r
4826 // Pull callback buffer from inputBuffer
\r
4827 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4828 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4829 stream_.deviceFormat[INPUT] );
\r
4831 if ( callbackPulled ) {
\r
4832 // Convert callback buffer to user sample rate
\r
4833 convertBufferWasapi( stream_.deviceBuffer,
\r
4835 stream_.nDeviceChannels[INPUT],
\r
4836 captureFormat->nSamplesPerSec,
\r
4837 stream_.sampleRate,
\r
4838 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4840 stream_.deviceFormat[INPUT] );
\r
4842 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4843 // Convert callback buffer to user format
\r
4844 convertBuffer( stream_.userBuffer[INPUT],
\r
4845 stream_.deviceBuffer,
\r
4846 stream_.convertInfo[INPUT] );
\r
4849 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4850 memcpy( stream_.userBuffer[INPUT],
\r
4851 stream_.deviceBuffer,
\r
4852 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4857 // if there is no capture stream, set callbackPulled flag
\r
4858 callbackPulled = true;
\r
4861 // Execute Callback
\r
4862 // ================
\r
4863 // 1. Execute user callback method
\r
4864 // 2. Handle return value from callback
\r
4866 // if callback has not requested the stream to stop
\r
4867 if ( callbackPulled && !callbackStopped ) {
\r
4868 // Execute user callback method
\r
4869 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4870 stream_.userBuffer[INPUT],
\r
4871 stream_.bufferSize,
\r
4873 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4874 stream_.callbackInfo.userData );
\r
4876 // Handle return value from callback
\r
4877 if ( callbackResult == 1 ) {
\r
4878 // instantiate a thread to stop this thread
\r
4879 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4880 if ( !threadHandle ) {
\r
4881 errorType = RtAudioError::THREAD_ERROR;
\r
4882 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4885 else if ( !CloseHandle( threadHandle ) ) {
\r
4886 errorType = RtAudioError::THREAD_ERROR;
\r
4887 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4891 callbackStopped = true;
\r
4893 else if ( callbackResult == 2 ) {
\r
4894 // instantiate a thread to stop this thread
\r
4895 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4896 if ( !threadHandle ) {
\r
4897 errorType = RtAudioError::THREAD_ERROR;
\r
4898 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4901 else if ( !CloseHandle( threadHandle ) ) {
\r
4902 errorType = RtAudioError::THREAD_ERROR;
\r
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4907 callbackStopped = true;
\r
4912 // Callback Output
\r
4913 // ===============
\r
4914 // 1. Convert callback buffer to stream format
\r
4915 // 2. Convert callback buffer to stream sample rate and channel count
\r
4916 // 3. Push callback buffer into outputBuffer
\r
4918 if ( renderAudioClient && callbackPulled ) {
\r
4919 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4920 // Convert callback buffer to stream format
\r
4921 convertBuffer( stream_.deviceBuffer,
\r
4922 stream_.userBuffer[OUTPUT],
\r
4923 stream_.convertInfo[OUTPUT] );
\r
4927 // Convert callback buffer to stream sample rate
\r
4928 convertBufferWasapi( convBuffer,
\r
4929 stream_.deviceBuffer,
\r
4930 stream_.nDeviceChannels[OUTPUT],
\r
4931 stream_.sampleRate,
\r
4932 renderFormat->nSamplesPerSec,
\r
4933 stream_.bufferSize,
\r
4935 stream_.deviceFormat[OUTPUT] );
\r
4937 // Push callback buffer into outputBuffer
\r
4938 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4939 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4940 stream_.deviceFormat[OUTPUT] );
\r
4943 // if there is no render stream, set callbackPushed flag
\r
4944 callbackPushed = true;
\r
4949 // 1. Get capture buffer from stream
\r
4950 // 2. Push capture buffer into inputBuffer
\r
4951 // 3. If 2. was successful: Release capture buffer
\r
4953 if ( captureAudioClient ) {
\r
4954 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4955 if ( !callbackPulled ) {
\r
4956 WaitForSingleObject( captureEvent, INFINITE );
\r
4959 // Get capture buffer from stream
\r
4960 hr = captureClient->GetBuffer( &streamBuffer,
\r
4961 &bufferFrameCount,
\r
4962 &captureFlags, NULL, NULL );
\r
4963 if ( FAILED( hr ) ) {
\r
4964 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4968 if ( bufferFrameCount != 0 ) {
\r
4969 // Push capture buffer into inputBuffer
\r
4970 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4971 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4972 stream_.deviceFormat[INPUT] ) )
\r
4974 // Release capture buffer
\r
4975 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4976 if ( FAILED( hr ) ) {
\r
4977 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4983 // Inform WASAPI that capture was unsuccessful
\r
4984 hr = captureClient->ReleaseBuffer( 0 );
\r
4985 if ( FAILED( hr ) ) {
\r
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4993 // Inform WASAPI that capture was unsuccessful
\r
4994 hr = captureClient->ReleaseBuffer( 0 );
\r
4995 if ( FAILED( hr ) ) {
\r
4996 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5004 // 1. Get render buffer from stream
\r
5005 // 2. Pull next buffer from outputBuffer
\r
5006 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5007 // Release render buffer
\r
5009 if ( renderAudioClient ) {
\r
5010 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5011 if ( callbackPulled && !callbackPushed ) {
\r
5012 WaitForSingleObject( renderEvent, INFINITE );
\r
5015 // Get render buffer from stream
\r
5016 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5017 if ( FAILED( hr ) ) {
\r
5018 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5022 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5023 if ( FAILED( hr ) ) {
\r
5024 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5028 bufferFrameCount -= numFramesPadding;
\r
5030 if ( bufferFrameCount != 0 ) {
\r
5031 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5032 if ( FAILED( hr ) ) {
\r
5033 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5037 // Pull next buffer from outputBuffer
\r
5038 // Fill render buffer with next buffer
\r
5039 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5040 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5041 stream_.deviceFormat[OUTPUT] ) )
\r
5043 // Release render buffer
\r
5044 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5045 if ( FAILED( hr ) ) {
\r
5046 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5052 // Inform WASAPI that render was unsuccessful
\r
5053 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5054 if ( FAILED( hr ) ) {
\r
5055 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5062 // Inform WASAPI that render was unsuccessful
\r
5063 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5064 if ( FAILED( hr ) ) {
\r
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5071 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5072 if ( callbackPushed ) {
\r
5073 callbackPulled = false;
\r
5076 // tick stream time
\r
5077 RtApi::tickStreamTime();
\r
5082 CoTaskMemFree( captureFormat );
\r
5083 CoTaskMemFree( renderFormat );
\r
5085 free ( convBuffer );
\r
5089 // update stream state
\r
5090 stream_.state = STREAM_STOPPED;
\r
5092 if ( errorText_.empty() )
\r
5095 error( errorType );
\r
5098 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5102 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5104 // Modified by Robin Davies, October 2005
\r
5105 // - Improvements to DirectX pointer chasing.
\r
5106 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5107 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5108 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5109 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5111 #include <dsound.h>
\r
5112 #include <assert.h>
\r
5113 #include <algorithm>
\r
5115 #if defined(__MINGW32__)
\r
5116 // missing from latest mingw winapi
\r
5117 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5118 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5119 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5120 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5123 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5125 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5126 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5129 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5131 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5132 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5133 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5134 return pointer >= earlierPointer && pointer < laterPointer;
\r
5137 // A structure to hold various information related to the DirectSound
\r
5138 // API implementation.
\r
5140 unsigned int drainCounter; // Tracks callback counts when draining
\r
5141 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5145 UINT bufferPointer[2];
\r
5146 DWORD dsBufferSize[2];
\r
5147 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5151 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5154 // Declarations for utility functions, callbacks, and structures
\r
5155 // specific to the DirectSound implementation.
\r
5156 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5157 LPCTSTR description,
\r
5159 LPVOID lpContext );
\r
5161 static const char* getErrorString( int code );
\r
5163 static unsigned __stdcall callbackHandler( void *ptr );
\r
5172 : found(false) { validId[0] = false; validId[1] = false; }
\r
5175 struct DsProbeData {
\r
5177 std::vector<struct DsDevice>* dsDevices;
\r
5180 RtApiDs :: RtApiDs()
\r
5182 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5183 // accept whatever the mainline chose for a threading model.
\r
5184 coInitialized_ = false;
\r
5185 HRESULT hr = CoInitialize( NULL );
\r
5186 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5189 RtApiDs :: ~RtApiDs()
\r
5191 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5192 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5195 // The DirectSound default output is always the first device.
\r
5196 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5201 // The DirectSound default input is always the first input device,
\r
5202 // which is the first capture device enumerated.
\r
5203 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5208 unsigned int RtApiDs :: getDeviceCount( void )
\r
5210 // Set query flag for previously found devices to false, so that we
\r
5211 // can check for any devices that have disappeared.
\r
5212 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5213 dsDevices[i].found = false;
\r
5215 // Query DirectSound devices.
\r
5216 struct DsProbeData probeInfo;
\r
5217 probeInfo.isInput = false;
\r
5218 probeInfo.dsDevices = &dsDevices;
\r
5219 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5220 if ( FAILED( result ) ) {
\r
5221 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5222 errorText_ = errorStream_.str();
\r
5223 error( RtAudioError::WARNING );
\r
5226 // Query DirectSoundCapture devices.
\r
5227 probeInfo.isInput = true;
\r
5228 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5229 if ( FAILED( result ) ) {
\r
5230 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5231 errorText_ = errorStream_.str();
\r
5232 error( RtAudioError::WARNING );
\r
5235 // Clean out any devices that may have disappeared.
\r
5236 std::vector< int > indices;
\r
5237 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5238 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5239 //unsigned int nErased = 0;
\r
5240 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5241 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5242 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5244 return static_cast<unsigned int>(dsDevices.size());
\r
5247 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5249 RtAudio::DeviceInfo info;
\r
5250 info.probed = false;
\r
5252 if ( dsDevices.size() == 0 ) {
\r
5253 // Force a query of all devices
\r
5255 if ( dsDevices.size() == 0 ) {
\r
5256 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5257 error( RtAudioError::INVALID_USE );
\r
5262 if ( device >= dsDevices.size() ) {
\r
5263 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5264 error( RtAudioError::INVALID_USE );
\r
5269 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5271 LPDIRECTSOUND output;
\r
5273 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5274 if ( FAILED( result ) ) {
\r
5275 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5276 errorText_ = errorStream_.str();
\r
5277 error( RtAudioError::WARNING );
\r
5281 outCaps.dwSize = sizeof( outCaps );
\r
5282 result = output->GetCaps( &outCaps );
\r
5283 if ( FAILED( result ) ) {
\r
5284 output->Release();
\r
5285 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5286 errorText_ = errorStream_.str();
\r
5287 error( RtAudioError::WARNING );
\r
5291 // Get output channel information.
\r
5292 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5294 // Get sample rate information.
\r
5295 info.sampleRates.clear();
\r
5296 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5297 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5298 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5299 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5302 // Get format information.
\r
5303 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5304 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5306 output->Release();
\r
5308 if ( getDefaultOutputDevice() == device )
\r
5309 info.isDefaultOutput = true;
\r
5311 if ( dsDevices[ device ].validId[1] == false ) {
\r
5312 info.name = dsDevices[ device ].name;
\r
5313 info.probed = true;
\r
5319 LPDIRECTSOUNDCAPTURE input;
\r
5320 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5321 if ( FAILED( result ) ) {
\r
5322 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5323 errorText_ = errorStream_.str();
\r
5324 error( RtAudioError::WARNING );
\r
5329 inCaps.dwSize = sizeof( inCaps );
\r
5330 result = input->GetCaps( &inCaps );
\r
5331 if ( FAILED( result ) ) {
\r
5333 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5334 errorText_ = errorStream_.str();
\r
5335 error( RtAudioError::WARNING );
\r
5339 // Get input channel information.
\r
5340 info.inputChannels = inCaps.dwChannels;
\r
5342 // Get sample rate and format information.
\r
5343 std::vector<unsigned int> rates;
\r
5344 if ( inCaps.dwChannels >= 2 ) {
\r
5345 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5346 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5347 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5348 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5349 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5350 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5351 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5352 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5354 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5355 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5360 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5362 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5363 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5364 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5367 else if ( inCaps.dwChannels == 1 ) {
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5369 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5374 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5375 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5377 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5383 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5386 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5390 else info.inputChannels = 0; // technically, this would be an error
\r
5394 if ( info.inputChannels == 0 ) return info;
\r
5396 // Copy the supported rates to the info structure but avoid duplication.
\r
5398 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5400 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5401 if ( rates[i] == info.sampleRates[j] ) {
\r
5406 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5408 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5410 // If device opens for both playback and capture, we determine the channels.
\r
5411 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5412 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5414 if ( device == 0 ) info.isDefaultInput = true;
\r
5416 // Copy name and return.
\r
5417 info.name = dsDevices[ device ].name;
\r
5418 info.probed = true;
\r
5422 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5423 unsigned int firstChannel, unsigned int sampleRate,
\r
5424 RtAudioFormat format, unsigned int *bufferSize,
\r
5425 RtAudio::StreamOptions *options )
\r
5427 if ( channels + firstChannel > 2 ) {
\r
5428 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5432 size_t nDevices = dsDevices.size();
\r
5433 if ( nDevices == 0 ) {
\r
5434 // This should not happen because a check is made before this function is called.
\r
5435 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5439 if ( device >= nDevices ) {
\r
5440 // This should not happen because a check is made before this function is called.
\r
5441 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5445 if ( mode == OUTPUT ) {
\r
5446 if ( dsDevices[ device ].validId[0] == false ) {
\r
5447 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5448 errorText_ = errorStream_.str();
\r
5452 else { // mode == INPUT
\r
5453 if ( dsDevices[ device ].validId[1] == false ) {
\r
5454 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5455 errorText_ = errorStream_.str();
\r
5460 // According to a note in PortAudio, using GetDesktopWindow()
\r
5461 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5462 // that occur when the application's window is not the foreground
\r
5463 // window. Also, if the application window closes before the
\r
5464 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5465 // problems when using GetDesktopWindow() but it seems fine now
\r
5466 // (January 2010). I'll leave it commented here.
\r
5467 // HWND hWnd = GetForegroundWindow();
\r
5468 HWND hWnd = GetDesktopWindow();
\r
5470 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5471 // two. This is a judgement call and a value of two is probably too
\r
5472 // low for capture, but it should work for playback.
\r
5474 if ( options ) nBuffers = options->numberOfBuffers;
\r
5475 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5476 if ( nBuffers < 2 ) nBuffers = 3;
\r
5478 // Check the lower range of the user-specified buffer size and set
\r
5479 // (arbitrarily) to a lower bound of 32.
\r
5480 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5482 // Create the wave format structure. The data format setting will
\r
5483 // be determined later.
\r
5484 WAVEFORMATEX waveFormat;
\r
5485 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5486 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5487 waveFormat.nChannels = channels + firstChannel;
\r
5488 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5490 // Determine the device buffer size. By default, we'll use the value
\r
5491 // defined above (32K), but we will grow it to make allowances for
\r
5492 // very large software buffer sizes.
\r
5493 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5494 DWORD dsPointerLeadTime = 0;
\r
5496 void *ohandle = 0, *bhandle = 0;
\r
5498 if ( mode == OUTPUT ) {
\r
5500 LPDIRECTSOUND output;
\r
5501 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5502 if ( FAILED( result ) ) {
\r
5503 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5504 errorText_ = errorStream_.str();
\r
5509 outCaps.dwSize = sizeof( outCaps );
\r
5510 result = output->GetCaps( &outCaps );
\r
5511 if ( FAILED( result ) ) {
\r
5512 output->Release();
\r
5513 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5514 errorText_ = errorStream_.str();
\r
5518 // Check channel information.
\r
5519 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5520 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5521 errorText_ = errorStream_.str();
\r
5525 // Check format information. Use 16-bit format unless not
\r
5526 // supported or user requests 8-bit.
\r
5527 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5528 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5529 waveFormat.wBitsPerSample = 16;
\r
5530 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5533 waveFormat.wBitsPerSample = 8;
\r
5534 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5536 stream_.userFormat = format;
\r
5538 // Update wave format structure and buffer information.
\r
5539 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5540 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5541 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5543 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5544 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5545 dsBufferSize *= 2;
\r
5547 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5548 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5549 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5550 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5551 if ( FAILED( result ) ) {
\r
5552 output->Release();
\r
5553 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5554 errorText_ = errorStream_.str();
\r
5558 // Even though we will write to the secondary buffer, we need to
\r
5559 // access the primary buffer to set the correct output format
\r
5560 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5561 // buffer description.
\r
5562 DSBUFFERDESC bufferDescription;
\r
5563 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5564 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5565 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5567 // Obtain the primary buffer
\r
5568 LPDIRECTSOUNDBUFFER buffer;
\r
5569 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5570 if ( FAILED( result ) ) {
\r
5571 output->Release();
\r
5572 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5573 errorText_ = errorStream_.str();
\r
5577 // Set the primary DS buffer sound format.
\r
5578 result = buffer->SetFormat( &waveFormat );
\r
5579 if ( FAILED( result ) ) {
\r
5580 output->Release();
\r
5581 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5582 errorText_ = errorStream_.str();
\r
5586 // Setup the secondary DS buffer description.
\r
5587 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5588 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5589 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5590 DSBCAPS_GLOBALFOCUS |
\r
5591 DSBCAPS_GETCURRENTPOSITION2 |
\r
5592 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5593 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5594 bufferDescription.lpwfxFormat = &waveFormat;
\r
5596 // Try to create the secondary DS buffer. If that doesn't work,
\r
5597 // try to use software mixing. Otherwise, there's a problem.
\r
5598 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5599 if ( FAILED( result ) ) {
\r
5600 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5601 DSBCAPS_GLOBALFOCUS |
\r
5602 DSBCAPS_GETCURRENTPOSITION2 |
\r
5603 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5604 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5605 if ( FAILED( result ) ) {
\r
5606 output->Release();
\r
5607 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5608 errorText_ = errorStream_.str();
\r
5613 // Get the buffer size ... might be different from what we specified.
\r
5615 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5616 result = buffer->GetCaps( &dsbcaps );
\r
5617 if ( FAILED( result ) ) {
\r
5618 output->Release();
\r
5619 buffer->Release();
\r
5620 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5621 errorText_ = errorStream_.str();
\r
5625 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5627 // Lock the DS buffer
\r
5630 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5631 if ( FAILED( result ) ) {
\r
5632 output->Release();
\r
5633 buffer->Release();
\r
5634 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5635 errorText_ = errorStream_.str();
\r
5639 // Zero the DS buffer
\r
5640 ZeroMemory( audioPtr, dataLen );
\r
5642 // Unlock the DS buffer
\r
5643 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5644 if ( FAILED( result ) ) {
\r
5645 output->Release();
\r
5646 buffer->Release();
\r
5647 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5648 errorText_ = errorStream_.str();
\r
5652 ohandle = (void *) output;
\r
5653 bhandle = (void *) buffer;
\r
5656 if ( mode == INPUT ) {
\r
5658 LPDIRECTSOUNDCAPTURE input;
\r
5659 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5660 if ( FAILED( result ) ) {
\r
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5662 errorText_ = errorStream_.str();
\r
5667 inCaps.dwSize = sizeof( inCaps );
\r
5668 result = input->GetCaps( &inCaps );
\r
5669 if ( FAILED( result ) ) {
\r
5671 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5672 errorText_ = errorStream_.str();
\r
5676 // Check channel information.
\r
5677 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5678 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5682 // Check format information. Use 16-bit format unless user
\r
5683 // requests 8-bit.
\r
5684 DWORD deviceFormats;
\r
5685 if ( channels + firstChannel == 2 ) {
\r
5686 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5687 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5688 waveFormat.wBitsPerSample = 8;
\r
5689 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5691 else { // assume 16-bit is supported
\r
5692 waveFormat.wBitsPerSample = 16;
\r
5693 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5696 else { // channel == 1
\r
5697 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5698 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5699 waveFormat.wBitsPerSample = 8;
\r
5700 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5702 else { // assume 16-bit is supported
\r
5703 waveFormat.wBitsPerSample = 16;
\r
5704 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5707 stream_.userFormat = format;
\r
5709 // Update wave format structure and buffer information.
\r
5710 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5711 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5712 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5714 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5715 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5716 dsBufferSize *= 2;
\r
5718 // Setup the secondary DS buffer description.
\r
5719 DSCBUFFERDESC bufferDescription;
\r
5720 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5721 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5722 bufferDescription.dwFlags = 0;
\r
5723 bufferDescription.dwReserved = 0;
\r
5724 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5725 bufferDescription.lpwfxFormat = &waveFormat;
\r
5727 // Create the capture buffer.
\r
5728 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5729 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5730 if ( FAILED( result ) ) {
\r
5732 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5733 errorText_ = errorStream_.str();
\r
5737 // Get the buffer size ... might be different from what we specified.
\r
5738 DSCBCAPS dscbcaps;
\r
5739 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5740 result = buffer->GetCaps( &dscbcaps );
\r
5741 if ( FAILED( result ) ) {
\r
5743 buffer->Release();
\r
5744 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5745 errorText_ = errorStream_.str();
\r
5749 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5751 // NOTE: We could have a problem here if this is a duplex stream
\r
5752 // and the play and capture hardware buffer sizes are different
\r
5753 // (I'm actually not sure if that is a problem or not).
\r
5754 // Currently, we are not verifying that.
\r
5756 // Lock the capture buffer
\r
5759 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5760 if ( FAILED( result ) ) {
\r
5762 buffer->Release();
\r
5763 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5764 errorText_ = errorStream_.str();
\r
5768 // Zero the buffer
\r
5769 ZeroMemory( audioPtr, dataLen );
\r
5771 // Unlock the buffer
\r
5772 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5773 if ( FAILED( result ) ) {
\r
5775 buffer->Release();
\r
5776 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5777 errorText_ = errorStream_.str();
\r
5781 ohandle = (void *) input;
\r
5782 bhandle = (void *) buffer;
\r
5785 // Set various stream parameters
\r
5786 DsHandle *handle = 0;
\r
5787 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5788 stream_.nUserChannels[mode] = channels;
\r
5789 stream_.bufferSize = *bufferSize;
\r
5790 stream_.channelOffset[mode] = firstChannel;
\r
5791 stream_.deviceInterleaved[mode] = true;
\r
5792 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5793 else stream_.userInterleaved = true;
\r
5795 // Set flag for buffer conversion
\r
5796 stream_.doConvertBuffer[mode] = false;
\r
5797 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5798 stream_.doConvertBuffer[mode] = true;
\r
5799 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5800 stream_.doConvertBuffer[mode] = true;
\r
5801 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5802 stream_.nUserChannels[mode] > 1 )
\r
5803 stream_.doConvertBuffer[mode] = true;
\r
5805 // Allocate necessary internal buffers
\r
5806 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5807 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5808 if ( stream_.userBuffer[mode] == NULL ) {
\r
5809 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5813 if ( stream_.doConvertBuffer[mode] ) {
\r
5815 bool makeBuffer = true;
\r
5816 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5817 if ( mode == INPUT ) {
\r
5818 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5819 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5820 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5824 if ( makeBuffer ) {
\r
5825 bufferBytes *= *bufferSize;
\r
5826 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5827 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5828 if ( stream_.deviceBuffer == NULL ) {
\r
5829 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5835 // Allocate our DsHandle structures for the stream.
\r
5836 if ( stream_.apiHandle == 0 ) {
\r
5838 handle = new DsHandle;
\r
5840 catch ( std::bad_alloc& ) {
\r
5841 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5845 // Create a manual-reset event.
\r
5846 handle->condition = CreateEvent( NULL, // no security
\r
5847 TRUE, // manual-reset
\r
5848 FALSE, // non-signaled initially
\r
5849 NULL ); // unnamed
\r
5850 stream_.apiHandle = (void *) handle;
\r
5853 handle = (DsHandle *) stream_.apiHandle;
\r
5854 handle->id[mode] = ohandle;
\r
5855 handle->buffer[mode] = bhandle;
\r
5856 handle->dsBufferSize[mode] = dsBufferSize;
\r
5857 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5859 stream_.device[mode] = device;
\r
5860 stream_.state = STREAM_STOPPED;
\r
5861 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5862 // We had already set up an output stream.
\r
5863 stream_.mode = DUPLEX;
\r
5865 stream_.mode = mode;
\r
5866 stream_.nBuffers = nBuffers;
\r
5867 stream_.sampleRate = sampleRate;
\r
5869 // Setup the buffer conversion information structure.
\r
5870 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5872 // Setup the callback thread.
\r
5873 if ( stream_.callbackInfo.isRunning == false ) {
\r
5874 unsigned threadId;
\r
5875 stream_.callbackInfo.isRunning = true;
\r
5876 stream_.callbackInfo.object = (void *) this;
\r
5877 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5878 &stream_.callbackInfo, 0, &threadId );
\r
5879 if ( stream_.callbackInfo.thread == 0 ) {
\r
5880 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5884 // Boost DS thread priority
\r
5885 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5891 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5892 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5893 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5894 if ( buffer ) buffer->Release();
\r
5895 object->Release();
\r
5897 if ( handle->buffer[1] ) {
\r
5898 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5899 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5900 if ( buffer ) buffer->Release();
\r
5901 object->Release();
\r
5903 CloseHandle( handle->condition );
\r
5905 stream_.apiHandle = 0;
\r
5908 for ( int i=0; i<2; i++ ) {
\r
5909 if ( stream_.userBuffer[i] ) {
\r
5910 free( stream_.userBuffer[i] );
\r
5911 stream_.userBuffer[i] = 0;
\r
5915 if ( stream_.deviceBuffer ) {
\r
5916 free( stream_.deviceBuffer );
\r
5917 stream_.deviceBuffer = 0;
\r
5920 stream_.state = STREAM_CLOSED;
\r
5924 void RtApiDs :: closeStream()
\r
5926 if ( stream_.state == STREAM_CLOSED ) {
\r
5927 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5928 error( RtAudioError::WARNING );
\r
5932 // Stop the callback thread.
\r
5933 stream_.callbackInfo.isRunning = false;
\r
5934 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5935 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5937 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5939 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5940 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5941 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5944 buffer->Release();
\r
5946 object->Release();
\r
5948 if ( handle->buffer[1] ) {
\r
5949 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5950 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5953 buffer->Release();
\r
5955 object->Release();
\r
5957 CloseHandle( handle->condition );
\r
5959 stream_.apiHandle = 0;
\r
5962 for ( int i=0; i<2; i++ ) {
\r
5963 if ( stream_.userBuffer[i] ) {
\r
5964 free( stream_.userBuffer[i] );
\r
5965 stream_.userBuffer[i] = 0;
\r
5969 if ( stream_.deviceBuffer ) {
\r
5970 free( stream_.deviceBuffer );
\r
5971 stream_.deviceBuffer = 0;
\r
5974 stream_.mode = UNINITIALIZED;
\r
5975 stream_.state = STREAM_CLOSED;
\r
5978 void RtApiDs :: startStream()
\r
5981 if ( stream_.state == STREAM_RUNNING ) {
\r
5982 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5983 error( RtAudioError::WARNING );
\r
5987 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5989 // Increase scheduler frequency on lesser windows (a side-effect of
\r
5990 // increasing timer accuracy). On greater windows (Win2K or later),
\r
5991 // this is already in effect.
\r
5992 timeBeginPeriod( 1 );
\r
5994 buffersRolling = false;
\r
5995 duplexPrerollBytes = 0;
\r
5997 if ( stream_.mode == DUPLEX ) {
\r
5998 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
5999 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6002 HRESULT result = 0;
\r
6003 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6005 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6006 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6007 if ( FAILED( result ) ) {
\r
6008 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6009 errorText_ = errorStream_.str();
\r
6014 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6016 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6017 result = buffer->Start( DSCBSTART_LOOPING );
\r
6018 if ( FAILED( result ) ) {
\r
6019 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6020 errorText_ = errorStream_.str();
\r
6025 handle->drainCounter = 0;
\r
6026 handle->internalDrain = false;
\r
6027 ResetEvent( handle->condition );
\r
6028 stream_.state = STREAM_RUNNING;
\r
6031 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6034 void RtApiDs :: stopStream()
\r
6037 if ( stream_.state == STREAM_STOPPED ) {
\r
6038 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6039 error( RtAudioError::WARNING );
\r
6043 HRESULT result = 0;
\r
6046 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6047 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6048 if ( handle->drainCounter == 0 ) {
\r
6049 handle->drainCounter = 2;
\r
6050 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6053 stream_.state = STREAM_STOPPED;
\r
6055 MUTEX_LOCK( &stream_.mutex );
\r
6057 // Stop the buffer and clear memory
\r
6058 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6059 result = buffer->Stop();
\r
6060 if ( FAILED( result ) ) {
\r
6061 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6062 errorText_ = errorStream_.str();
\r
6066 // Lock the buffer and clear it so that if we start to play again,
\r
6067 // we won't have old data playing.
\r
6068 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6069 if ( FAILED( result ) ) {
\r
6070 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6071 errorText_ = errorStream_.str();
\r
6075 // Zero the DS buffer
\r
6076 ZeroMemory( audioPtr, dataLen );
\r
6078 // Unlock the DS buffer
\r
6079 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6080 if ( FAILED( result ) ) {
\r
6081 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6082 errorText_ = errorStream_.str();
\r
6086 // If we start playing again, we must begin at beginning of buffer.
\r
6087 handle->bufferPointer[0] = 0;
\r
6090 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6091 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6095 stream_.state = STREAM_STOPPED;
\r
6097 if ( stream_.mode != DUPLEX )
\r
6098 MUTEX_LOCK( &stream_.mutex );
\r
6100 result = buffer->Stop();
\r
6101 if ( FAILED( result ) ) {
\r
6102 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6103 errorText_ = errorStream_.str();
\r
6107 // Lock the buffer and clear it so that if we start to play again,
\r
6108 // we won't have old data playing.
\r
6109 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6110 if ( FAILED( result ) ) {
\r
6111 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6112 errorText_ = errorStream_.str();
\r
6116 // Zero the DS buffer
\r
6117 ZeroMemory( audioPtr, dataLen );
\r
6119 // Unlock the DS buffer
\r
6120 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6121 if ( FAILED( result ) ) {
\r
6122 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6123 errorText_ = errorStream_.str();
\r
6127 // If we start recording again, we must begin at beginning of buffer.
\r
6128 handle->bufferPointer[1] = 0;
\r
6132 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6133 MUTEX_UNLOCK( &stream_.mutex );
\r
6135 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6138 void RtApiDs :: abortStream()
\r
6141 if ( stream_.state == STREAM_STOPPED ) {
\r
6142 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6143 error( RtAudioError::WARNING );
\r
6147 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6148 handle->drainCounter = 2;
\r
6153 void RtApiDs :: callbackEvent()
\r
6155 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6156 Sleep( 50 ); // sleep 50 milliseconds
\r
6160 if ( stream_.state == STREAM_CLOSED ) {
\r
6161 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6162 error( RtAudioError::WARNING );
\r
6166 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6167 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6169 // Check if we were draining the stream and signal is finished.
\r
6170 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6172 stream_.state = STREAM_STOPPING;
\r
6173 if ( handle->internalDrain == false )
\r
6174 SetEvent( handle->condition );
\r
6180 // Invoke user callback to get fresh output data UNLESS we are
\r
6181 // draining stream.
\r
6182 if ( handle->drainCounter == 0 ) {
\r
6183 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6184 double streamTime = getStreamTime();
\r
6185 RtAudioStreamStatus status = 0;
\r
6186 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6187 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6188 handle->xrun[0] = false;
\r
6190 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6191 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6192 handle->xrun[1] = false;
\r
6194 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6195 stream_.bufferSize, streamTime, status, info->userData );
\r
6196 if ( cbReturnValue == 2 ) {
\r
6197 stream_.state = STREAM_STOPPING;
\r
6198 handle->drainCounter = 2;
\r
6202 else if ( cbReturnValue == 1 ) {
\r
6203 handle->drainCounter = 1;
\r
6204 handle->internalDrain = true;
\r
6209 DWORD currentWritePointer, safeWritePointer;
\r
6210 DWORD currentReadPointer, safeReadPointer;
\r
6211 UINT nextWritePointer;
\r
6213 LPVOID buffer1 = NULL;
\r
6214 LPVOID buffer2 = NULL;
\r
6215 DWORD bufferSize1 = 0;
\r
6216 DWORD bufferSize2 = 0;
\r
6221 MUTEX_LOCK( &stream_.mutex );
\r
6222 if ( stream_.state == STREAM_STOPPED ) {
\r
6223 MUTEX_UNLOCK( &stream_.mutex );
\r
6227 if ( buffersRolling == false ) {
\r
6228 if ( stream_.mode == DUPLEX ) {
\r
6229 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6231 // It takes a while for the devices to get rolling. As a result,
\r
6232 // there's no guarantee that the capture and write device pointers
\r
6233 // will move in lockstep. Wait here for both devices to start
\r
6234 // rolling, and then set our buffer pointers accordingly.
\r
6235 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6236 // bytes later than the write buffer.
\r
6238 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6239 // take place between the two GetCurrentPosition calls... but I'm
\r
6240 // really not sure how to solve the problem. Temporarily boost to
\r
6241 // Realtime priority, maybe; but I'm not sure what priority the
\r
6242 // DirectSound service threads run at. We *should* be roughly
\r
6243 // within a ms or so of correct.
\r
6245 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6246 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6248 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6250 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6251 if ( FAILED( result ) ) {
\r
6252 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6253 errorText_ = errorStream_.str();
\r
6254 error( RtAudioError::SYSTEM_ERROR );
\r
6257 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6258 if ( FAILED( result ) ) {
\r
6259 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6260 errorText_ = errorStream_.str();
\r
6261 error( RtAudioError::SYSTEM_ERROR );
\r
6265 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6266 if ( FAILED( result ) ) {
\r
6267 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6268 errorText_ = errorStream_.str();
\r
6269 error( RtAudioError::SYSTEM_ERROR );
\r
6272 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6273 if ( FAILED( result ) ) {
\r
6274 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6275 errorText_ = errorStream_.str();
\r
6276 error( RtAudioError::SYSTEM_ERROR );
\r
6279 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6283 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6285 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6286 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6287 handle->bufferPointer[1] = safeReadPointer;
\r
6289 else if ( stream_.mode == OUTPUT ) {
\r
6291 // Set the proper nextWritePosition after initial startup.
\r
6292 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6293 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6294 if ( FAILED( result ) ) {
\r
6295 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6296 errorText_ = errorStream_.str();
\r
6297 error( RtAudioError::SYSTEM_ERROR );
\r
6300 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6301 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6304 buffersRolling = true;
\r
6307 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6309 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6311 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6312 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6313 bufferBytes *= formatBytes( stream_.userFormat );
\r
6314 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6317 // Setup parameters and do buffer conversion if necessary.
\r
6318 if ( stream_.doConvertBuffer[0] ) {
\r
6319 buffer = stream_.deviceBuffer;
\r
6320 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6321 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6322 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6325 buffer = stream_.userBuffer[0];
\r
6326 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6327 bufferBytes *= formatBytes( stream_.userFormat );
\r
6330 // No byte swapping necessary in DirectSound implementation.
\r
6332 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6333 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6335 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6336 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6338 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6339 nextWritePointer = handle->bufferPointer[0];
\r
6341 DWORD endWrite, leadPointer;
\r
6343 // Find out where the read and "safe write" pointers are.
\r
6344 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6345 if ( FAILED( result ) ) {
\r
6346 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6347 errorText_ = errorStream_.str();
\r
6348 error( RtAudioError::SYSTEM_ERROR );
\r
6352 // We will copy our output buffer into the region between
\r
6353 // safeWritePointer and leadPointer. If leadPointer is not
\r
6354 // beyond the next endWrite position, wait until it is.
\r
6355 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6356 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6357 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6358 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6359 endWrite = nextWritePointer + bufferBytes;
\r
6361 // Check whether the entire write region is behind the play pointer.
\r
6362 if ( leadPointer >= endWrite ) break;
\r
6364 // If we are here, then we must wait until the leadPointer advances
\r
6365 // beyond the end of our next write region. We use the
\r
6366 // Sleep() function to suspend operation until that happens.
\r
6367 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6368 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6369 if ( millis < 1.0 ) millis = 1.0;
\r
6370 Sleep( (DWORD) millis );
\r
6373 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6374 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6375 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6376 handle->xrun[0] = true;
\r
6377 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6378 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6379 handle->bufferPointer[0] = nextWritePointer;
\r
6380 endWrite = nextWritePointer + bufferBytes;
\r
6383 // Lock free space in the buffer
\r
6384 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6385 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6386 if ( FAILED( result ) ) {
\r
6387 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6388 errorText_ = errorStream_.str();
\r
6389 error( RtAudioError::SYSTEM_ERROR );
\r
6393 // Copy our buffer into the DS buffer
\r
6394 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6395 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6397 // Update our buffer offset and unlock sound buffer
\r
6398 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6399 if ( FAILED( result ) ) {
\r
6400 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6401 errorText_ = errorStream_.str();
\r
6402 error( RtAudioError::SYSTEM_ERROR );
\r
6405 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6406 handle->bufferPointer[0] = nextWritePointer;
\r
6409 // Don't bother draining input
\r
6410 if ( handle->drainCounter ) {
\r
6411 handle->drainCounter++;
\r
6415 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6417 // Setup parameters.
\r
6418 if ( stream_.doConvertBuffer[1] ) {
\r
6419 buffer = stream_.deviceBuffer;
\r
6420 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6421 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6424 buffer = stream_.userBuffer[1];
\r
6425 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6426 bufferBytes *= formatBytes( stream_.userFormat );
\r
6429 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6430 long nextReadPointer = handle->bufferPointer[1];
\r
6431 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6433 // Find out where the write and "safe read" pointers are.
\r
6434 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6435 if ( FAILED( result ) ) {
\r
6436 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6437 errorText_ = errorStream_.str();
\r
6438 error( RtAudioError::SYSTEM_ERROR );
\r
6442 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6443 DWORD endRead = nextReadPointer + bufferBytes;
\r
6445 // Handling depends on whether we are INPUT or DUPLEX.
\r
6446 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6447 // then a wait here will drag the write pointers into the forbidden zone.
\r
6449 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6450 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6451 // practical way to sync up the read and write pointers reliably, given the
\r
6452 // the very complex relationship between phase and increment of the read and write
\r
6455 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6456 // provide a pre-roll period of 0.5 seconds in which we return
\r
6457 // zeros from the read buffer while the pointers sync up.
\r
6459 if ( stream_.mode == DUPLEX ) {
\r
6460 if ( safeReadPointer < endRead ) {
\r
6461 if ( duplexPrerollBytes <= 0 ) {
\r
6462 // Pre-roll time over. Be more agressive.
\r
6463 int adjustment = endRead-safeReadPointer;
\r
6465 handle->xrun[1] = true;
\r
6467 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6468 // and perform fine adjustments later.
\r
6469 // - small adjustments: back off by twice as much.
\r
6470 if ( adjustment >= 2*bufferBytes )
\r
6471 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6473 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6475 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6479 // In pre=roll time. Just do it.
\r
6480 nextReadPointer = safeReadPointer - bufferBytes;
\r
6481 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6483 endRead = nextReadPointer + bufferBytes;
\r
6486 else { // mode == INPUT
\r
6487 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6488 // See comments for playback.
\r
6489 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6490 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6491 if ( millis < 1.0 ) millis = 1.0;
\r
6492 Sleep( (DWORD) millis );
\r
6494 // Wake up and find out where we are now.
\r
6495 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6496 if ( FAILED( result ) ) {
\r
6497 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6498 errorText_ = errorStream_.str();
\r
6499 error( RtAudioError::SYSTEM_ERROR );
\r
6503 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6507 // Lock free space in the buffer
\r
6508 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6509 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6510 if ( FAILED( result ) ) {
\r
6511 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6512 errorText_ = errorStream_.str();
\r
6513 error( RtAudioError::SYSTEM_ERROR );
\r
6517 if ( duplexPrerollBytes <= 0 ) {
\r
6518 // Copy our buffer into the DS buffer
\r
6519 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6520 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6523 memset( buffer, 0, bufferSize1 );
\r
6524 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6525 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6528 // Update our buffer offset and unlock sound buffer
\r
6529 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6530 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6531 if ( FAILED( result ) ) {
\r
6532 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6533 errorText_ = errorStream_.str();
\r
6534 error( RtAudioError::SYSTEM_ERROR );
\r
6537 handle->bufferPointer[1] = nextReadPointer;
\r
6539 // No byte swapping necessary in DirectSound implementation.
\r
6541 // If necessary, convert 8-bit data from unsigned to signed.
\r
6542 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6543 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6545 // Do buffer conversion if necessary.
\r
6546 if ( stream_.doConvertBuffer[1] )
\r
6547 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6551 MUTEX_UNLOCK( &stream_.mutex );
\r
6552 RtApi::tickStreamTime();
\r
6555 // Definitions for utility functions and callbacks
\r
6556 // specific to the DirectSound implementation.
\r
6558 static unsigned __stdcall callbackHandler( void *ptr )
\r
6560 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6561 RtApiDs *object = (RtApiDs *) info->object;
\r
6562 bool* isRunning = &info->isRunning;
\r
6564 while ( *isRunning == true ) {
\r
6565 object->callbackEvent();
\r
6568 _endthreadex( 0 );
\r
6572 #include "tchar.h"
\r
6574 static std::string convertTChar( LPCTSTR name )
\r
6576 #if defined( UNICODE ) || defined( _UNICODE )
\r
6577 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6578 std::string s( length-1, '\0' );
\r
6579 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6581 std::string s( name );
\r
6587 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6588 LPCTSTR description,
\r
6589 LPCTSTR /*module*/,
\r
6590 LPVOID lpContext )
\r
6592 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6593 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6596 bool validDevice = false;
\r
6597 if ( probeInfo.isInput == true ) {
\r
6599 LPDIRECTSOUNDCAPTURE object;
\r
6601 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6602 if ( hr != DS_OK ) return TRUE;
\r
6604 caps.dwSize = sizeof(caps);
\r
6605 hr = object->GetCaps( &caps );
\r
6606 if ( hr == DS_OK ) {
\r
6607 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6608 validDevice = true;
\r
6610 object->Release();
\r
6614 LPDIRECTSOUND object;
\r
6615 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6616 if ( hr != DS_OK ) return TRUE;
\r
6618 caps.dwSize = sizeof(caps);
\r
6619 hr = object->GetCaps( &caps );
\r
6620 if ( hr == DS_OK ) {
\r
6621 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6622 validDevice = true;
\r
6624 object->Release();
\r
6627 // If good device, then save its name and guid.
\r
6628 std::string name = convertTChar( description );
\r
6629 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6630 if ( lpguid == NULL )
\r
6631 name = "Default Device";
\r
6632 if ( validDevice ) {
\r
6633 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6634 if ( dsDevices[i].name == name ) {
\r
6635 dsDevices[i].found = true;
\r
6636 if ( probeInfo.isInput ) {
\r
6637 dsDevices[i].id[1] = lpguid;
\r
6638 dsDevices[i].validId[1] = true;
\r
6641 dsDevices[i].id[0] = lpguid;
\r
6642 dsDevices[i].validId[0] = true;
\r
6649 device.name = name;
\r
6650 device.found = true;
\r
6651 if ( probeInfo.isInput ) {
\r
6652 device.id[1] = lpguid;
\r
6653 device.validId[1] = true;
\r
6656 device.id[0] = lpguid;
\r
6657 device.validId[0] = true;
\r
6659 dsDevices.push_back( device );
\r
6665 static const char* getErrorString( int code )
\r
6669 case DSERR_ALLOCATED:
\r
6670 return "Already allocated";
\r
6672 case DSERR_CONTROLUNAVAIL:
\r
6673 return "Control unavailable";
\r
6675 case DSERR_INVALIDPARAM:
\r
6676 return "Invalid parameter";
\r
6678 case DSERR_INVALIDCALL:
\r
6679 return "Invalid call";
\r
6681 case DSERR_GENERIC:
\r
6682 return "Generic error";
\r
6684 case DSERR_PRIOLEVELNEEDED:
\r
6685 return "Priority level needed";
\r
6687 case DSERR_OUTOFMEMORY:
\r
6688 return "Out of memory";
\r
6690 case DSERR_BADFORMAT:
\r
6691 return "The sample rate or the channel format is not supported";
\r
6693 case DSERR_UNSUPPORTED:
\r
6694 return "Not supported";
\r
6696 case DSERR_NODRIVER:
\r
6697 return "No driver";
\r
6699 case DSERR_ALREADYINITIALIZED:
\r
6700 return "Already initialized";
\r
6702 case DSERR_NOAGGREGATION:
\r
6703 return "No aggregation";
\r
6705 case DSERR_BUFFERLOST:
\r
6706 return "Buffer lost";
\r
6708 case DSERR_OTHERAPPHASPRIO:
\r
6709 return "Another application already has priority";
\r
6711 case DSERR_UNINITIALIZED:
\r
6712 return "Uninitialized";
\r
6715 return "DirectSound unknown error";
\r
6718 //******************** End of __WINDOWS_DS__ *********************//
\r
6722 #if defined(__LINUX_ALSA__)
\r
6724 #include <alsa/asoundlib.h>
\r
6725 #include <unistd.h>
\r
6727 // A structure to hold various information related to the ALSA API
\r
6728 // implementation.
\r
6729 struct AlsaHandle {
\r
6730 snd_pcm_t *handles[2];
\r
6731 bool synchronized;
\r
6733 pthread_cond_t runnable_cv;
\r
6737 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6740 static void *alsaCallbackHandler( void * ptr );
\r
6742 RtApiAlsa :: RtApiAlsa()
\r
6744 // Nothing to do here.
\r
6747 RtApiAlsa :: ~RtApiAlsa()
\r
6749 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6752 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6754 unsigned nDevices = 0;
\r
6755 int result, subdevice, card;
\r
6757 snd_ctl_t *handle;
\r
6759 // Count cards and devices
\r
6761 snd_card_next( &card );
\r
6762 while ( card >= 0 ) {
\r
6763 sprintf( name, "hw:%d", card );
\r
6764 result = snd_ctl_open( &handle, name, 0 );
\r
6765 if ( result < 0 ) {
\r
6766 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6767 errorText_ = errorStream_.str();
\r
6768 error( RtAudioError::WARNING );
\r
6773 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6774 if ( result < 0 ) {
\r
6775 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6776 errorText_ = errorStream_.str();
\r
6777 error( RtAudioError::WARNING );
\r
6780 if ( subdevice < 0 )
\r
6785 snd_ctl_close( handle );
\r
6786 snd_card_next( &card );
\r
6789 result = snd_ctl_open( &handle, "default", 0 );
\r
6790 if (result == 0) {
\r
6792 snd_ctl_close( handle );
\r
6798 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6800 RtAudio::DeviceInfo info;
\r
6801 info.probed = false;
\r
6803 unsigned nDevices = 0;
\r
6804 int result, subdevice, card;
\r
6806 snd_ctl_t *chandle;
\r
6808 // Count cards and devices
\r
6810 snd_card_next( &card );
\r
6811 while ( card >= 0 ) {
\r
6812 sprintf( name, "hw:%d", card );
\r
6813 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6814 if ( result < 0 ) {
\r
6815 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6816 errorText_ = errorStream_.str();
\r
6817 error( RtAudioError::WARNING );
\r
6822 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6823 if ( result < 0 ) {
\r
6824 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6825 errorText_ = errorStream_.str();
\r
6826 error( RtAudioError::WARNING );
\r
6829 if ( subdevice < 0 ) break;
\r
6830 if ( nDevices == device ) {
\r
6831 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6837 snd_ctl_close( chandle );
\r
6838 snd_card_next( &card );
\r
6841 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6842 if ( result == 0 ) {
\r
6843 if ( nDevices == device ) {
\r
6844 strcpy( name, "default" );
\r
6850 if ( nDevices == 0 ) {
\r
6851 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6852 error( RtAudioError::INVALID_USE );
\r
6856 if ( device >= nDevices ) {
\r
6857 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6858 error( RtAudioError::INVALID_USE );
\r
6864 // If a stream is already open, we cannot probe the stream devices.
\r
6865 // Thus, use the saved results.
\r
6866 if ( stream_.state != STREAM_CLOSED &&
\r
6867 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6868 snd_ctl_close( chandle );
\r
6869 if ( device >= devices_.size() ) {
\r
6870 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6871 error( RtAudioError::WARNING );
\r
6874 return devices_[ device ];
\r
6877 int openMode = SND_PCM_ASYNC;
\r
6878 snd_pcm_stream_t stream;
\r
6879 snd_pcm_info_t *pcminfo;
\r
6880 snd_pcm_info_alloca( &pcminfo );
\r
6881 snd_pcm_t *phandle;
\r
6882 snd_pcm_hw_params_t *params;
\r
6883 snd_pcm_hw_params_alloca( ¶ms );
\r
6885 // First try for playback unless default device (which has subdev -1)
\r
6886 stream = SND_PCM_STREAM_PLAYBACK;
\r
6887 snd_pcm_info_set_stream( pcminfo, stream );
\r
6888 if ( subdevice != -1 ) {
\r
6889 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6890 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6892 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6893 if ( result < 0 ) {
\r
6894 // Device probably doesn't support playback.
\r
6895 goto captureProbe;
\r
6899 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6900 if ( result < 0 ) {
\r
6901 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6902 errorText_ = errorStream_.str();
\r
6903 error( RtAudioError::WARNING );
\r
6904 goto captureProbe;
\r
6907 // The device is open ... fill the parameter structure.
\r
6908 result = snd_pcm_hw_params_any( phandle, params );
\r
6909 if ( result < 0 ) {
\r
6910 snd_pcm_close( phandle );
\r
6911 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6912 errorText_ = errorStream_.str();
\r
6913 error( RtAudioError::WARNING );
\r
6914 goto captureProbe;
\r
6917 // Get output channel information.
\r
6918 unsigned int value;
\r
6919 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6920 if ( result < 0 ) {
\r
6921 snd_pcm_close( phandle );
\r
6922 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6923 errorText_ = errorStream_.str();
\r
6924 error( RtAudioError::WARNING );
\r
6925 goto captureProbe;
\r
6927 info.outputChannels = value;
\r
6928 snd_pcm_close( phandle );
\r
6931 stream = SND_PCM_STREAM_CAPTURE;
\r
6932 snd_pcm_info_set_stream( pcminfo, stream );
\r
6934 // Now try for capture unless default device (with subdev = -1)
\r
6935 if ( subdevice != -1 ) {
\r
6936 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6937 snd_ctl_close( chandle );
\r
6938 if ( result < 0 ) {
\r
6939 // Device probably doesn't support capture.
\r
6940 if ( info.outputChannels == 0 ) return info;
\r
6941 goto probeParameters;
\r
6945 snd_ctl_close( chandle );
\r
6947 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6948 if ( result < 0 ) {
\r
6949 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6950 errorText_ = errorStream_.str();
\r
6951 error( RtAudioError::WARNING );
\r
6952 if ( info.outputChannels == 0 ) return info;
\r
6953 goto probeParameters;
\r
6956 // The device is open ... fill the parameter structure.
\r
6957 result = snd_pcm_hw_params_any( phandle, params );
\r
6958 if ( result < 0 ) {
\r
6959 snd_pcm_close( phandle );
\r
6960 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6961 errorText_ = errorStream_.str();
\r
6962 error( RtAudioError::WARNING );
\r
6963 if ( info.outputChannels == 0 ) return info;
\r
6964 goto probeParameters;
\r
6967 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6968 if ( result < 0 ) {
\r
6969 snd_pcm_close( phandle );
\r
6970 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6971 errorText_ = errorStream_.str();
\r
6972 error( RtAudioError::WARNING );
\r
6973 if ( info.outputChannels == 0 ) return info;
\r
6974 goto probeParameters;
\r
6976 info.inputChannels = value;
\r
6977 snd_pcm_close( phandle );
\r
6979 // If device opens for both playback and capture, we determine the channels.
\r
6980 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6981 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6983 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6984 if ( device == 0 && info.outputChannels > 0 )
\r
6985 info.isDefaultOutput = true;
\r
6986 if ( device == 0 && info.inputChannels > 0 )
\r
6987 info.isDefaultInput = true;
\r
6990 // At this point, we just need to figure out the supported data
\r
6991 // formats and sample rates. We'll proceed by opening the device in
\r
6992 // the direction with the maximum number of channels, or playback if
\r
6993 // they are equal. This might limit our sample rate options, but so
\r
6996 if ( info.outputChannels >= info.inputChannels )
\r
6997 stream = SND_PCM_STREAM_PLAYBACK;
\r
6999 stream = SND_PCM_STREAM_CAPTURE;
\r
7000 snd_pcm_info_set_stream( pcminfo, stream );
\r
7002 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7003 if ( result < 0 ) {
\r
7004 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7005 errorText_ = errorStream_.str();
\r
7006 error( RtAudioError::WARNING );
\r
7010 // The device is open ... fill the parameter structure.
\r
7011 result = snd_pcm_hw_params_any( phandle, params );
\r
7012 if ( result < 0 ) {
\r
7013 snd_pcm_close( phandle );
\r
7014 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7015 errorText_ = errorStream_.str();
\r
7016 error( RtAudioError::WARNING );
\r
7020 // Test our discrete set of sample rate values.
\r
7021 info.sampleRates.clear();
\r
7022 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7023 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7024 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7026 if ( info.sampleRates.size() == 0 ) {
\r
7027 snd_pcm_close( phandle );
\r
7028 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7029 errorText_ = errorStream_.str();
\r
7030 error( RtAudioError::WARNING );
\r
7034 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7035 snd_pcm_format_t format;
\r
7036 info.nativeFormats = 0;
\r
7037 format = SND_PCM_FORMAT_S8;
\r
7038 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7039 info.nativeFormats |= RTAUDIO_SINT8;
\r
7040 format = SND_PCM_FORMAT_S16;
\r
7041 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7042 info.nativeFormats |= RTAUDIO_SINT16;
\r
7043 format = SND_PCM_FORMAT_S24;
\r
7044 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7045 info.nativeFormats |= RTAUDIO_SINT24;
\r
7046 format = SND_PCM_FORMAT_S32;
\r
7047 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7048 info.nativeFormats |= RTAUDIO_SINT32;
\r
7049 format = SND_PCM_FORMAT_FLOAT;
\r
7050 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7051 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7052 format = SND_PCM_FORMAT_FLOAT64;
\r
7053 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7054 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7056 // Check that we have at least one supported format
\r
7057 if ( info.nativeFormats == 0 ) {
\r
7058 snd_pcm_close( phandle );
\r
7059 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7060 errorText_ = errorStream_.str();
\r
7061 error( RtAudioError::WARNING );
\r
7065 // Get the device name
\r
7067 result = snd_card_get_name( card, &cardname );
\r
7068 if ( result >= 0 ) {
\r
7069 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7074 // That's all ... close the device and return
\r
7075 snd_pcm_close( phandle );
\r
7076 info.probed = true;
\r
7080 void RtApiAlsa :: saveDeviceInfo( void )
\r
7084 unsigned int nDevices = getDeviceCount();
\r
7085 devices_.resize( nDevices );
\r
7086 for ( unsigned int i=0; i<nDevices; i++ )
\r
7087 devices_[i] = getDeviceInfo( i );
\r
7090 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7091 unsigned int firstChannel, unsigned int sampleRate,
\r
7092 RtAudioFormat format, unsigned int *bufferSize,
\r
7093 RtAudio::StreamOptions *options )
\r
7096 #if defined(__RTAUDIO_DEBUG__)
\r
7097 snd_output_t *out;
\r
7098 snd_output_stdio_attach(&out, stderr, 0);
\r
7101 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7103 unsigned nDevices = 0;
\r
7104 int result, subdevice, card;
\r
7106 snd_ctl_t *chandle;
\r
7108 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7109 snprintf(name, sizeof(name), "%s", "default");
\r
7111 // Count cards and devices
\r
7113 snd_card_next( &card );
\r
7114 while ( card >= 0 ) {
\r
7115 sprintf( name, "hw:%d", card );
\r
7116 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7117 if ( result < 0 ) {
\r
7118 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7119 errorText_ = errorStream_.str();
\r
7124 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7125 if ( result < 0 ) break;
\r
7126 if ( subdevice < 0 ) break;
\r
7127 if ( nDevices == device ) {
\r
7128 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7129 snd_ctl_close( chandle );
\r
7134 snd_ctl_close( chandle );
\r
7135 snd_card_next( &card );
\r
7138 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7139 if ( result == 0 ) {
\r
7140 if ( nDevices == device ) {
\r
7141 strcpy( name, "default" );
\r
7147 if ( nDevices == 0 ) {
\r
7148 // This should not happen because a check is made before this function is called.
\r
7149 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7153 if ( device >= nDevices ) {
\r
7154 // This should not happen because a check is made before this function is called.
\r
7155 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7162 // The getDeviceInfo() function will not work for a device that is
\r
7163 // already open. Thus, we'll probe the system before opening a
\r
7164 // stream and save the results for use by getDeviceInfo().
\r
7165 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7166 this->saveDeviceInfo();
\r
7168 snd_pcm_stream_t stream;
\r
7169 if ( mode == OUTPUT )
\r
7170 stream = SND_PCM_STREAM_PLAYBACK;
\r
7172 stream = SND_PCM_STREAM_CAPTURE;
\r
7174 snd_pcm_t *phandle;
\r
7175 int openMode = SND_PCM_ASYNC;
\r
7176 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7177 if ( result < 0 ) {
\r
7178 if ( mode == OUTPUT )
\r
7179 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7181 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7182 errorText_ = errorStream_.str();
\r
7186 // Fill the parameter structure.
\r
7187 snd_pcm_hw_params_t *hw_params;
\r
7188 snd_pcm_hw_params_alloca( &hw_params );
\r
7189 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7190 if ( result < 0 ) {
\r
7191 snd_pcm_close( phandle );
\r
7192 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7193 errorText_ = errorStream_.str();
\r
7197 #if defined(__RTAUDIO_DEBUG__)
\r
7198 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7199 snd_pcm_hw_params_dump( hw_params, out );
\r
7202 // Set access ... check user preference.
\r
7203 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7204 stream_.userInterleaved = false;
\r
7205 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7206 if ( result < 0 ) {
\r
7207 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7208 stream_.deviceInterleaved[mode] = true;
\r
7211 stream_.deviceInterleaved[mode] = false;
\r
7214 stream_.userInterleaved = true;
\r
7215 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7216 if ( result < 0 ) {
\r
7217 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7218 stream_.deviceInterleaved[mode] = false;
\r
7221 stream_.deviceInterleaved[mode] = true;
\r
7224 if ( result < 0 ) {
\r
7225 snd_pcm_close( phandle );
\r
7226 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7227 errorText_ = errorStream_.str();
\r
7231 // Determine how to set the device format.
\r
7232 stream_.userFormat = format;
\r
7233 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7235 if ( format == RTAUDIO_SINT8 )
\r
7236 deviceFormat = SND_PCM_FORMAT_S8;
\r
7237 else if ( format == RTAUDIO_SINT16 )
\r
7238 deviceFormat = SND_PCM_FORMAT_S16;
\r
7239 else if ( format == RTAUDIO_SINT24 )
\r
7240 deviceFormat = SND_PCM_FORMAT_S24;
\r
7241 else if ( format == RTAUDIO_SINT32 )
\r
7242 deviceFormat = SND_PCM_FORMAT_S32;
\r
7243 else if ( format == RTAUDIO_FLOAT32 )
\r
7244 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7245 else if ( format == RTAUDIO_FLOAT64 )
\r
7246 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7248 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7249 stream_.deviceFormat[mode] = format;
\r
7253 // The user requested format is not natively supported by the device.
\r
7254 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7255 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7256 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7260 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7261 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7262 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7266 deviceFormat = SND_PCM_FORMAT_S32;
\r
7267 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7268 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7272 deviceFormat = SND_PCM_FORMAT_S24;
\r
7273 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7274 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7278 deviceFormat = SND_PCM_FORMAT_S16;
\r
7279 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7280 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7284 deviceFormat = SND_PCM_FORMAT_S8;
\r
7285 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7286 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7290 // If we get here, no supported format was found.
\r
7291 snd_pcm_close( phandle );
\r
7292 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7293 errorText_ = errorStream_.str();
\r
7297 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7298 if ( result < 0 ) {
\r
7299 snd_pcm_close( phandle );
\r
7300 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7301 errorText_ = errorStream_.str();
\r
7305 // Determine whether byte-swaping is necessary.
\r
7306 stream_.doByteSwap[mode] = false;
\r
7307 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7308 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7309 if ( result == 0 )
\r
7310 stream_.doByteSwap[mode] = true;
\r
7311 else if (result < 0) {
\r
7312 snd_pcm_close( phandle );
\r
7313 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7314 errorText_ = errorStream_.str();
\r
7319 // Set the sample rate.
\r
7320 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7321 if ( result < 0 ) {
\r
7322 snd_pcm_close( phandle );
\r
7323 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7324 errorText_ = errorStream_.str();
\r
7328 // Determine the number of channels for this device. We support a possible
\r
7329 // minimum device channel number > than the value requested by the user.
\r
7330 stream_.nUserChannels[mode] = channels;
\r
7331 unsigned int value;
\r
7332 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7333 unsigned int deviceChannels = value;
\r
7334 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7335 snd_pcm_close( phandle );
\r
7336 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7337 errorText_ = errorStream_.str();
\r
7341 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7342 if ( result < 0 ) {
\r
7343 snd_pcm_close( phandle );
\r
7344 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7345 errorText_ = errorStream_.str();
\r
7348 deviceChannels = value;
\r
7349 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7350 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7352 // Set the device channels.
\r
7353 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7354 if ( result < 0 ) {
\r
7355 snd_pcm_close( phandle );
\r
7356 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7357 errorText_ = errorStream_.str();
\r
7361 // Set the buffer (or period) size.
\r
7363 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7364 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7365 if ( result < 0 ) {
\r
7366 snd_pcm_close( phandle );
\r
7367 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7368 errorText_ = errorStream_.str();
\r
7371 *bufferSize = periodSize;
\r
7373 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7374 unsigned int periods = 0;
\r
7375 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7376 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7377 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7378 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7379 if ( result < 0 ) {
\r
7380 snd_pcm_close( phandle );
\r
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7382 errorText_ = errorStream_.str();
\r
7386 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7387 // MUST be the same in both directions!
\r
7388 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7389 snd_pcm_close( phandle );
\r
7390 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7391 errorText_ = errorStream_.str();
\r
7395 stream_.bufferSize = *bufferSize;
\r
7397 // Install the hardware configuration
\r
7398 result = snd_pcm_hw_params( phandle, hw_params );
\r
7399 if ( result < 0 ) {
\r
7400 snd_pcm_close( phandle );
\r
7401 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7402 errorText_ = errorStream_.str();
\r
7406 #if defined(__RTAUDIO_DEBUG__)
\r
7407 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7408 snd_pcm_hw_params_dump( hw_params, out );
\r
7411 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7412 snd_pcm_sw_params_t *sw_params = NULL;
\r
7413 snd_pcm_sw_params_alloca( &sw_params );
\r
7414 snd_pcm_sw_params_current( phandle, sw_params );
\r
7415 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7416 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7417 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7419 // The following two settings were suggested by Theo Veenker
\r
7420 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7421 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7423 // here are two options for a fix
\r
7424 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7425 snd_pcm_uframes_t val;
\r
7426 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7427 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7429 result = snd_pcm_sw_params( phandle, sw_params );
\r
7430 if ( result < 0 ) {
\r
7431 snd_pcm_close( phandle );
\r
7432 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7433 errorText_ = errorStream_.str();
\r
7437 #if defined(__RTAUDIO_DEBUG__)
\r
7438 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7439 snd_pcm_sw_params_dump( sw_params, out );
\r
7442 // Set flags for buffer conversion
\r
7443 stream_.doConvertBuffer[mode] = false;
\r
7444 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7445 stream_.doConvertBuffer[mode] = true;
\r
7446 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7447 stream_.doConvertBuffer[mode] = true;
\r
7448 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7449 stream_.nUserChannels[mode] > 1 )
\r
7450 stream_.doConvertBuffer[mode] = true;
\r
7452 // Allocate the ApiHandle if necessary and then save.
\r
7453 AlsaHandle *apiInfo = 0;
\r
7454 if ( stream_.apiHandle == 0 ) {
\r
7456 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7458 catch ( std::bad_alloc& ) {
\r
7459 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7463 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7464 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7468 stream_.apiHandle = (void *) apiInfo;
\r
7469 apiInfo->handles[0] = 0;
\r
7470 apiInfo->handles[1] = 0;
\r
7473 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7475 apiInfo->handles[mode] = phandle;
\r
7478 // Allocate necessary internal buffers.
\r
7479 unsigned long bufferBytes;
\r
7480 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7481 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7482 if ( stream_.userBuffer[mode] == NULL ) {
\r
7483 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7487 if ( stream_.doConvertBuffer[mode] ) {
\r
7489 bool makeBuffer = true;
\r
7490 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7491 if ( mode == INPUT ) {
\r
7492 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7493 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7494 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7498 if ( makeBuffer ) {
\r
7499 bufferBytes *= *bufferSize;
\r
7500 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7501 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7502 if ( stream_.deviceBuffer == NULL ) {
\r
7503 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7509 stream_.sampleRate = sampleRate;
\r
7510 stream_.nBuffers = periods;
\r
7511 stream_.device[mode] = device;
\r
7512 stream_.state = STREAM_STOPPED;
\r
7514 // Setup the buffer conversion information structure.
\r
7515 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7517 // Setup thread if necessary.
\r
7518 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7519 // We had already set up an output stream.
\r
7520 stream_.mode = DUPLEX;
\r
7521 // Link the streams if possible.
\r
7522 apiInfo->synchronized = false;
\r
7523 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7524 apiInfo->synchronized = true;
\r
7526 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7527 error( RtAudioError::WARNING );
\r
7531 stream_.mode = mode;
\r
7533 // Setup callback thread.
\r
7534 stream_.callbackInfo.object = (void *) this;
\r
7536 // Set the thread attributes for joinable and realtime scheduling
\r
7537 // priority (optional). The higher priority will only take affect
\r
7538 // if the program is run as root or suid. Note, under Linux
\r
7539 // processes with CAP_SYS_NICE privilege, a user can change
\r
7540 // scheduling policy and priority (thus need not be root). See
\r
7541 // POSIX "capabilities".
\r
7542 pthread_attr_t attr;
\r
7543 pthread_attr_init( &attr );
\r
7544 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7546 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7547 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7548 // We previously attempted to increase the audio callback priority
\r
7549 // to SCHED_RR here via the attributes. However, while no errors
\r
7550 // were reported in doing so, it did not work. So, now this is
\r
7551 // done in the alsaCallbackHandler function.
\r
7552 stream_.callbackInfo.doRealtime = true;
\r
7553 int priority = options->priority;
\r
7554 int min = sched_get_priority_min( SCHED_RR );
\r
7555 int max = sched_get_priority_max( SCHED_RR );
\r
7556 if ( priority < min ) priority = min;
\r
7557 else if ( priority > max ) priority = max;
\r
7558 stream_.callbackInfo.priority = priority;
\r
7562 stream_.callbackInfo.isRunning = true;
\r
7563 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7564 pthread_attr_destroy( &attr );
\r
7566 stream_.callbackInfo.isRunning = false;
\r
7567 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7576 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7577 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7578 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7580 stream_.apiHandle = 0;
\r
7583 if ( phandle) snd_pcm_close( phandle );
\r
7585 for ( int i=0; i<2; i++ ) {
\r
7586 if ( stream_.userBuffer[i] ) {
\r
7587 free( stream_.userBuffer[i] );
\r
7588 stream_.userBuffer[i] = 0;
\r
7592 if ( stream_.deviceBuffer ) {
\r
7593 free( stream_.deviceBuffer );
\r
7594 stream_.deviceBuffer = 0;
\r
7597 stream_.state = STREAM_CLOSED;
\r
7601 void RtApiAlsa :: closeStream()
\r
7603 if ( stream_.state == STREAM_CLOSED ) {
\r
7604 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7605 error( RtAudioError::WARNING );
\r
7609 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7610 stream_.callbackInfo.isRunning = false;
\r
7611 MUTEX_LOCK( &stream_.mutex );
\r
7612 if ( stream_.state == STREAM_STOPPED ) {
\r
7613 apiInfo->runnable = true;
\r
7614 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7616 MUTEX_UNLOCK( &stream_.mutex );
\r
7617 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7619 if ( stream_.state == STREAM_RUNNING ) {
\r
7620 stream_.state = STREAM_STOPPED;
\r
7621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7622 snd_pcm_drop( apiInfo->handles[0] );
\r
7623 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7624 snd_pcm_drop( apiInfo->handles[1] );
\r
7628 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7629 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7630 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7632 stream_.apiHandle = 0;
\r
7635 for ( int i=0; i<2; i++ ) {
\r
7636 if ( stream_.userBuffer[i] ) {
\r
7637 free( stream_.userBuffer[i] );
\r
7638 stream_.userBuffer[i] = 0;
\r
7642 if ( stream_.deviceBuffer ) {
\r
7643 free( stream_.deviceBuffer );
\r
7644 stream_.deviceBuffer = 0;
\r
7647 stream_.mode = UNINITIALIZED;
\r
7648 stream_.state = STREAM_CLOSED;
\r
7651 void RtApiAlsa :: startStream()
\r
7653 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7656 if ( stream_.state == STREAM_RUNNING ) {
\r
7657 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7658 error( RtAudioError::WARNING );
\r
7662 MUTEX_LOCK( &stream_.mutex );
\r
7665 snd_pcm_state_t state;
\r
7666 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7667 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7668 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7669 state = snd_pcm_state( handle[0] );
\r
7670 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7671 result = snd_pcm_prepare( handle[0] );
\r
7672 if ( result < 0 ) {
\r
7673 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7674 errorText_ = errorStream_.str();
\r
7680 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7681 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7682 state = snd_pcm_state( handle[1] );
\r
7683 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7684 result = snd_pcm_prepare( handle[1] );
\r
7685 if ( result < 0 ) {
\r
7686 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7687 errorText_ = errorStream_.str();
\r
7693 stream_.state = STREAM_RUNNING;
\r
7696 apiInfo->runnable = true;
\r
7697 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7698 MUTEX_UNLOCK( &stream_.mutex );
\r
7700 if ( result >= 0 ) return;
\r
7701 error( RtAudioError::SYSTEM_ERROR );
\r
7704 void RtApiAlsa :: stopStream()
\r
7707 if ( stream_.state == STREAM_STOPPED ) {
\r
7708 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7709 error( RtAudioError::WARNING );
\r
7713 stream_.state = STREAM_STOPPED;
\r
7714 MUTEX_LOCK( &stream_.mutex );
\r
7717 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7718 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7719 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7720 if ( apiInfo->synchronized )
\r
7721 result = snd_pcm_drop( handle[0] );
\r
7723 result = snd_pcm_drain( handle[0] );
\r
7724 if ( result < 0 ) {
\r
7725 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7726 errorText_ = errorStream_.str();
\r
7731 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7732 result = snd_pcm_drop( handle[1] );
\r
7733 if ( result < 0 ) {
\r
7734 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7735 errorText_ = errorStream_.str();
\r
7741 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7742 MUTEX_UNLOCK( &stream_.mutex );
\r
7744 if ( result >= 0 ) return;
\r
7745 error( RtAudioError::SYSTEM_ERROR );
\r
7748 void RtApiAlsa :: abortStream()
\r
7751 if ( stream_.state == STREAM_STOPPED ) {
\r
7752 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7753 error( RtAudioError::WARNING );
\r
7757 stream_.state = STREAM_STOPPED;
\r
7758 MUTEX_LOCK( &stream_.mutex );
\r
7761 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7762 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7763 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7764 result = snd_pcm_drop( handle[0] );
\r
7765 if ( result < 0 ) {
\r
7766 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7767 errorText_ = errorStream_.str();
\r
7772 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7773 result = snd_pcm_drop( handle[1] );
\r
7774 if ( result < 0 ) {
\r
7775 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7776 errorText_ = errorStream_.str();
\r
7782 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7783 MUTEX_UNLOCK( &stream_.mutex );
\r
7785 if ( result >= 0 ) return;
\r
7786 error( RtAudioError::SYSTEM_ERROR );
\r
7789 void RtApiAlsa :: callbackEvent()
\r
7791 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7792 if ( stream_.state == STREAM_STOPPED ) {
\r
7793 MUTEX_LOCK( &stream_.mutex );
\r
7794 while ( !apiInfo->runnable )
\r
7795 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7797 if ( stream_.state != STREAM_RUNNING ) {
\r
7798 MUTEX_UNLOCK( &stream_.mutex );
\r
7801 MUTEX_UNLOCK( &stream_.mutex );
\r
7804 if ( stream_.state == STREAM_CLOSED ) {
\r
7805 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7806 error( RtAudioError::WARNING );
\r
7810 int doStopStream = 0;
\r
7811 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7812 double streamTime = getStreamTime();
\r
7813 RtAudioStreamStatus status = 0;
\r
7814 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7815 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7816 apiInfo->xrun[0] = false;
\r
7818 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7819 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7820 apiInfo->xrun[1] = false;
\r
7822 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7823 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7825 if ( doStopStream == 2 ) {
\r
7830 MUTEX_LOCK( &stream_.mutex );
\r
7832 // The state might change while waiting on a mutex.
\r
7833 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7838 snd_pcm_t **handle;
\r
7839 snd_pcm_sframes_t frames;
\r
7840 RtAudioFormat format;
\r
7841 handle = (snd_pcm_t **) apiInfo->handles;
\r
7843 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7845 // Setup parameters.
\r
7846 if ( stream_.doConvertBuffer[1] ) {
\r
7847 buffer = stream_.deviceBuffer;
\r
7848 channels = stream_.nDeviceChannels[1];
\r
7849 format = stream_.deviceFormat[1];
\r
7852 buffer = stream_.userBuffer[1];
\r
7853 channels = stream_.nUserChannels[1];
\r
7854 format = stream_.userFormat;
\r
7857 // Read samples from device in interleaved/non-interleaved format.
\r
7858 if ( stream_.deviceInterleaved[1] )
\r
7859 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7861 void *bufs[channels];
\r
7862 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7863 for ( int i=0; i<channels; i++ )
\r
7864 bufs[i] = (void *) (buffer + (i * offset));
\r
7865 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7868 if ( result < (int) stream_.bufferSize ) {
\r
7869 // Either an error or overrun occured.
\r
7870 if ( result == -EPIPE ) {
\r
7871 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7872 if ( state == SND_PCM_STATE_XRUN ) {
\r
7873 apiInfo->xrun[1] = true;
\r
7874 result = snd_pcm_prepare( handle[1] );
\r
7875 if ( result < 0 ) {
\r
7876 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7877 errorText_ = errorStream_.str();
\r
7881 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7882 errorText_ = errorStream_.str();
\r
7886 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7887 errorText_ = errorStream_.str();
\r
7889 error( RtAudioError::WARNING );
\r
7893 // Do byte swapping if necessary.
\r
7894 if ( stream_.doByteSwap[1] )
\r
7895 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7897 // Do buffer conversion if necessary.
\r
7898 if ( stream_.doConvertBuffer[1] )
\r
7899 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7901 // Check stream latency
\r
7902 result = snd_pcm_delay( handle[1], &frames );
\r
7903 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7908 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7910 // Setup parameters and do buffer conversion if necessary.
\r
7911 if ( stream_.doConvertBuffer[0] ) {
\r
7912 buffer = stream_.deviceBuffer;
\r
7913 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7914 channels = stream_.nDeviceChannels[0];
\r
7915 format = stream_.deviceFormat[0];
\r
7918 buffer = stream_.userBuffer[0];
\r
7919 channels = stream_.nUserChannels[0];
\r
7920 format = stream_.userFormat;
\r
7923 // Do byte swapping if necessary.
\r
7924 if ( stream_.doByteSwap[0] )
\r
7925 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7927 // Write samples to device in interleaved/non-interleaved format.
\r
7928 if ( stream_.deviceInterleaved[0] )
\r
7929 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7931 void *bufs[channels];
\r
7932 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7933 for ( int i=0; i<channels; i++ )
\r
7934 bufs[i] = (void *) (buffer + (i * offset));
\r
7935 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7938 if ( result < (int) stream_.bufferSize ) {
\r
7939 // Either an error or underrun occured.
\r
7940 if ( result == -EPIPE ) {
\r
7941 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7942 if ( state == SND_PCM_STATE_XRUN ) {
\r
7943 apiInfo->xrun[0] = true;
\r
7944 result = snd_pcm_prepare( handle[0] );
\r
7945 if ( result < 0 ) {
\r
7946 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7947 errorText_ = errorStream_.str();
\r
7951 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7952 errorText_ = errorStream_.str();
\r
7956 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7957 errorText_ = errorStream_.str();
\r
7959 error( RtAudioError::WARNING );
\r
7963 // Check stream latency
\r
7964 result = snd_pcm_delay( handle[0], &frames );
\r
7965 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7969 MUTEX_UNLOCK( &stream_.mutex );
\r
7971 RtApi::tickStreamTime();
\r
7972 if ( doStopStream == 1 ) this->stopStream();
\r
7975 static void *alsaCallbackHandler( void *ptr )
\r
7977 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7978 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7979 bool *isRunning = &info->isRunning;
\r
7981 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7982 if ( &info->doRealtime ) {
\r
7983 pthread_t tID = pthread_self(); // ID of this thread
\r
7984 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7985 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7989 while ( *isRunning == true ) {
\r
7990 pthread_testcancel();
\r
7991 object->callbackEvent();
\r
7994 pthread_exit( NULL );
\r
7997 //******************** End of __LINUX_ALSA__ *********************//
\r
8000 #if defined(__LINUX_PULSE__)
\r
8002 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8003 // and Tristan Matthews.
\r
8005 #include <pulse/error.h>
\r
8006 #include <pulse/simple.h>
\r
8009 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8010 44100, 48000, 96000, 0};
\r
8012 struct rtaudio_pa_format_mapping_t {
\r
8013 RtAudioFormat rtaudio_format;
\r
8014 pa_sample_format_t pa_format;
\r
8017 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8018 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8019 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8020 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8021 {0, PA_SAMPLE_INVALID}};
\r
8023 struct PulseAudioHandle {
\r
8024 pa_simple *s_play;
\r
8027 pthread_cond_t runnable_cv;
\r
8029 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8032 RtApiPulse::~RtApiPulse()
\r
8034 if ( stream_.state != STREAM_CLOSED )
\r
8038 unsigned int RtApiPulse::getDeviceCount( void )
\r
8043 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8045 RtAudio::DeviceInfo info;
\r
8046 info.probed = true;
\r
8047 info.name = "PulseAudio";
\r
8048 info.outputChannels = 2;
\r
8049 info.inputChannels = 2;
\r
8050 info.duplexChannels = 2;
\r
8051 info.isDefaultOutput = true;
\r
8052 info.isDefaultInput = true;
\r
8054 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8055 info.sampleRates.push_back( *sr );
\r
8057 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8062 static void *pulseaudio_callback( void * user )
\r
8064 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8065 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8066 volatile bool *isRunning = &cbi->isRunning;
\r
8068 while ( *isRunning ) {
\r
8069 pthread_testcancel();
\r
8070 context->callbackEvent();
\r
8073 pthread_exit( NULL );
\r
8076 void RtApiPulse::closeStream( void )
\r
8078 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8080 stream_.callbackInfo.isRunning = false;
\r
8082 MUTEX_LOCK( &stream_.mutex );
\r
8083 if ( stream_.state == STREAM_STOPPED ) {
\r
8084 pah->runnable = true;
\r
8085 pthread_cond_signal( &pah->runnable_cv );
\r
8087 MUTEX_UNLOCK( &stream_.mutex );
\r
8089 pthread_join( pah->thread, 0 );
\r
8090 if ( pah->s_play ) {
\r
8091 pa_simple_flush( pah->s_play, NULL );
\r
8092 pa_simple_free( pah->s_play );
\r
8095 pa_simple_free( pah->s_rec );
\r
8097 pthread_cond_destroy( &pah->runnable_cv );
\r
8099 stream_.apiHandle = 0;
\r
8102 if ( stream_.userBuffer[0] ) {
\r
8103 free( stream_.userBuffer[0] );
\r
8104 stream_.userBuffer[0] = 0;
\r
8106 if ( stream_.userBuffer[1] ) {
\r
8107 free( stream_.userBuffer[1] );
\r
8108 stream_.userBuffer[1] = 0;
\r
8111 stream_.state = STREAM_CLOSED;
\r
8112 stream_.mode = UNINITIALIZED;
\r
8115 void RtApiPulse::callbackEvent( void )
\r
8117 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8119 if ( stream_.state == STREAM_STOPPED ) {
\r
8120 MUTEX_LOCK( &stream_.mutex );
\r
8121 while ( !pah->runnable )
\r
8122 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8124 if ( stream_.state != STREAM_RUNNING ) {
\r
8125 MUTEX_UNLOCK( &stream_.mutex );
\r
8128 MUTEX_UNLOCK( &stream_.mutex );
\r
8131 if ( stream_.state == STREAM_CLOSED ) {
\r
8132 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8133 "this shouldn't happen!";
\r
8134 error( RtAudioError::WARNING );
\r
8138 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8139 double streamTime = getStreamTime();
\r
8140 RtAudioStreamStatus status = 0;
\r
8141 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8142 stream_.bufferSize, streamTime, status,
\r
8143 stream_.callbackInfo.userData );
\r
8145 if ( doStopStream == 2 ) {
\r
8150 MUTEX_LOCK( &stream_.mutex );
\r
8151 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8152 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8154 if ( stream_.state != STREAM_RUNNING )
\r
8159 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8160 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8161 convertBuffer( stream_.deviceBuffer,
\r
8162 stream_.userBuffer[OUTPUT],
\r
8163 stream_.convertInfo[OUTPUT] );
\r
8164 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8165 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8167 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8168 formatBytes( stream_.userFormat );
\r
8170 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8171 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8172 pa_strerror( pa_error ) << ".";
\r
8173 errorText_ = errorStream_.str();
\r
8174 error( RtAudioError::WARNING );
\r
8178 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8179 if ( stream_.doConvertBuffer[INPUT] )
\r
8180 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8181 formatBytes( stream_.deviceFormat[INPUT] );
\r
8183 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8184 formatBytes( stream_.userFormat );
\r
8186 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8187 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8188 pa_strerror( pa_error ) << ".";
\r
8189 errorText_ = errorStream_.str();
\r
8190 error( RtAudioError::WARNING );
\r
8192 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8193 convertBuffer( stream_.userBuffer[INPUT],
\r
8194 stream_.deviceBuffer,
\r
8195 stream_.convertInfo[INPUT] );
\r
8200 MUTEX_UNLOCK( &stream_.mutex );
\r
8201 RtApi::tickStreamTime();
\r
8203 if ( doStopStream == 1 )
\r
8207 void RtApiPulse::startStream( void )
\r
8209 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8211 if ( stream_.state == STREAM_CLOSED ) {
\r
8212 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8213 error( RtAudioError::INVALID_USE );
\r
8216 if ( stream_.state == STREAM_RUNNING ) {
\r
8217 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8218 error( RtAudioError::WARNING );
\r
8222 MUTEX_LOCK( &stream_.mutex );
\r
8224 stream_.state = STREAM_RUNNING;
\r
8226 pah->runnable = true;
\r
8227 pthread_cond_signal( &pah->runnable_cv );
\r
8228 MUTEX_UNLOCK( &stream_.mutex );
\r
8231 void RtApiPulse::stopStream( void )
\r
8233 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8235 if ( stream_.state == STREAM_CLOSED ) {
\r
8236 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8237 error( RtAudioError::INVALID_USE );
\r
8240 if ( stream_.state == STREAM_STOPPED ) {
\r
8241 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8242 error( RtAudioError::WARNING );
\r
8246 stream_.state = STREAM_STOPPED;
\r
8247 MUTEX_LOCK( &stream_.mutex );
\r
8249 if ( pah && pah->s_play ) {
\r
8251 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8252 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8253 pa_strerror( pa_error ) << ".";
\r
8254 errorText_ = errorStream_.str();
\r
8255 MUTEX_UNLOCK( &stream_.mutex );
\r
8256 error( RtAudioError::SYSTEM_ERROR );
\r
8261 stream_.state = STREAM_STOPPED;
\r
8262 MUTEX_UNLOCK( &stream_.mutex );
\r
8265 void RtApiPulse::abortStream( void )
\r
8267 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8269 if ( stream_.state == STREAM_CLOSED ) {
\r
8270 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8271 error( RtAudioError::INVALID_USE );
\r
8274 if ( stream_.state == STREAM_STOPPED ) {
\r
8275 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8276 error( RtAudioError::WARNING );
\r
8280 stream_.state = STREAM_STOPPED;
\r
8281 MUTEX_LOCK( &stream_.mutex );
\r
8283 if ( pah && pah->s_play ) {
\r
8285 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8286 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8287 pa_strerror( pa_error ) << ".";
\r
8288 errorText_ = errorStream_.str();
\r
8289 MUTEX_UNLOCK( &stream_.mutex );
\r
8290 error( RtAudioError::SYSTEM_ERROR );
\r
8295 stream_.state = STREAM_STOPPED;
\r
8296 MUTEX_UNLOCK( &stream_.mutex );
\r
8299 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8300 unsigned int channels, unsigned int firstChannel,
\r
8301 unsigned int sampleRate, RtAudioFormat format,
\r
8302 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8304 PulseAudioHandle *pah = 0;
\r
8305 unsigned long bufferBytes = 0;
\r
8306 pa_sample_spec ss;
\r
8308 if ( device != 0 ) return false;
\r
8309 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8310 if ( channels != 1 && channels != 2 ) {
\r
8311 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8314 ss.channels = channels;
\r
8316 if ( firstChannel != 0 ) return false;
\r
8318 bool sr_found = false;
\r
8319 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8320 if ( sampleRate == *sr ) {
\r
8322 stream_.sampleRate = sampleRate;
\r
8323 ss.rate = sampleRate;
\r
8327 if ( !sr_found ) {
\r
8328 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8332 bool sf_found = 0;
\r
8333 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8334 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8335 if ( format == sf->rtaudio_format ) {
\r
8337 stream_.userFormat = sf->rtaudio_format;
\r
8338 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8339 ss.format = sf->pa_format;
\r
8343 if ( !sf_found ) { // Use internal data format conversion.
\r
8344 stream_.userFormat = format;
\r
8345 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8346 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8349 // Set other stream parameters.
\r
8350 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8351 else stream_.userInterleaved = true;
\r
8352 stream_.deviceInterleaved[mode] = true;
\r
8353 stream_.nBuffers = 1;
\r
8354 stream_.doByteSwap[mode] = false;
\r
8355 stream_.nUserChannels[mode] = channels;
\r
8356 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8357 stream_.channelOffset[mode] = 0;
\r
8358 std::string streamName = "RtAudio";
\r
8360 // Set flags for buffer conversion.
\r
8361 stream_.doConvertBuffer[mode] = false;
\r
8362 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8363 stream_.doConvertBuffer[mode] = true;
\r
8364 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8365 stream_.doConvertBuffer[mode] = true;
\r
8367 // Allocate necessary internal buffers.
\r
8368 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8369 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8370 if ( stream_.userBuffer[mode] == NULL ) {
\r
8371 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8374 stream_.bufferSize = *bufferSize;
\r
8376 if ( stream_.doConvertBuffer[mode] ) {
\r
8378 bool makeBuffer = true;
\r
8379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8380 if ( mode == INPUT ) {
\r
8381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8387 if ( makeBuffer ) {
\r
8388 bufferBytes *= *bufferSize;
\r
8389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8391 if ( stream_.deviceBuffer == NULL ) {
\r
8392 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8398 stream_.device[mode] = device;
\r
8400 // Setup the buffer conversion information structure.
\r
8401 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8403 if ( !stream_.apiHandle ) {
\r
8404 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8406 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8410 stream_.apiHandle = pah;
\r
8411 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8412 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8416 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8419 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8422 pa_buffer_attr buffer_attr;
\r
8423 buffer_attr.fragsize = bufferBytes;
\r
8424 buffer_attr.maxlength = -1;
\r
8426 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8427 if ( !pah->s_rec ) {
\r
8428 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8433 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8434 if ( !pah->s_play ) {
\r
8435 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8443 if ( stream_.mode == UNINITIALIZED )
\r
8444 stream_.mode = mode;
\r
8445 else if ( stream_.mode == mode )
\r
8448 stream_.mode = DUPLEX;
\r
8450 if ( !stream_.callbackInfo.isRunning ) {
\r
8451 stream_.callbackInfo.object = this;
\r
8452 stream_.callbackInfo.isRunning = true;
\r
8453 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8454 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8459 stream_.state = STREAM_STOPPED;
\r
8463 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8464 pthread_cond_destroy( &pah->runnable_cv );
\r
8466 stream_.apiHandle = 0;
\r
8469 for ( int i=0; i<2; i++ ) {
\r
8470 if ( stream_.userBuffer[i] ) {
\r
8471 free( stream_.userBuffer[i] );
\r
8472 stream_.userBuffer[i] = 0;
\r
8476 if ( stream_.deviceBuffer ) {
\r
8477 free( stream_.deviceBuffer );
\r
8478 stream_.deviceBuffer = 0;
\r
8484 //******************** End of __LINUX_PULSE__ *********************//
\r
8487 #if defined(__LINUX_OSS__)
\r
8489 #include <unistd.h>
\r
8490 #include <sys/ioctl.h>
\r
8491 #include <unistd.h>
\r
8492 #include <fcntl.h>
\r
8493 #include <sys/soundcard.h>
\r
8494 #include <errno.h>
\r
8497 static void *ossCallbackHandler(void * ptr);
\r
8499 // A structure to hold various information related to the OSS API
\r
8500 // implementation.
\r
8501 struct OssHandle {
\r
8502 int id[2]; // device ids
\r
8505 pthread_cond_t runnable;
\r
8508 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8511 RtApiOss :: RtApiOss()
\r
8513 // Nothing to do here.
\r
8516 RtApiOss :: ~RtApiOss()
\r
8518 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8521 unsigned int RtApiOss :: getDeviceCount( void )
\r
8523 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8524 if ( mixerfd == -1 ) {
\r
8525 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8526 error( RtAudioError::WARNING );
\r
8530 oss_sysinfo sysinfo;
\r
8531 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8533 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8534 error( RtAudioError::WARNING );
\r
8539 return sysinfo.numaudios;
\r
8542 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8544 RtAudio::DeviceInfo info;
\r
8545 info.probed = false;
\r
8547 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8548 if ( mixerfd == -1 ) {
\r
8549 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8550 error( RtAudioError::WARNING );
\r
8554 oss_sysinfo sysinfo;
\r
8555 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8556 if ( result == -1 ) {
\r
8558 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8559 error( RtAudioError::WARNING );
\r
8563 unsigned nDevices = sysinfo.numaudios;
\r
8564 if ( nDevices == 0 ) {
\r
8566 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8567 error( RtAudioError::INVALID_USE );
\r
8571 if ( device >= nDevices ) {
\r
8573 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8574 error( RtAudioError::INVALID_USE );
\r
8578 oss_audioinfo ainfo;
\r
8579 ainfo.dev = device;
\r
8580 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8582 if ( result == -1 ) {
\r
8583 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8584 errorText_ = errorStream_.str();
\r
8585 error( RtAudioError::WARNING );
\r
8590 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8591 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8592 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8593 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8594 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8597 // Probe data formats ... do for input
\r
8598 unsigned long mask = ainfo.iformats;
\r
8599 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8600 info.nativeFormats |= RTAUDIO_SINT16;
\r
8601 if ( mask & AFMT_S8 )
\r
8602 info.nativeFormats |= RTAUDIO_SINT8;
\r
8603 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8604 info.nativeFormats |= RTAUDIO_SINT32;
\r
8605 if ( mask & AFMT_FLOAT )
\r
8606 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8607 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8608 info.nativeFormats |= RTAUDIO_SINT24;
\r
8610 // Check that we have at least one supported format
\r
8611 if ( info.nativeFormats == 0 ) {
\r
8612 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8613 errorText_ = errorStream_.str();
\r
8614 error( RtAudioError::WARNING );
\r
8618 // Probe the supported sample rates.
\r
8619 info.sampleRates.clear();
\r
8620 if ( ainfo.nrates ) {
\r
8621 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8622 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8623 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8624 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8631 // Check min and max rate values;
\r
8632 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8633 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8634 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8638 if ( info.sampleRates.size() == 0 ) {
\r
8639 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8640 errorText_ = errorStream_.str();
\r
8641 error( RtAudioError::WARNING );
\r
8644 info.probed = true;
\r
8645 info.name = ainfo.name;
\r
8652 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8653 unsigned int firstChannel, unsigned int sampleRate,
\r
8654 RtAudioFormat format, unsigned int *bufferSize,
\r
8655 RtAudio::StreamOptions *options )
\r
8657 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8658 if ( mixerfd == -1 ) {
\r
8659 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8663 oss_sysinfo sysinfo;
\r
8664 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8665 if ( result == -1 ) {
\r
8667 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8671 unsigned nDevices = sysinfo.numaudios;
\r
8672 if ( nDevices == 0 ) {
\r
8673 // This should not happen because a check is made before this function is called.
\r
8675 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8679 if ( device >= nDevices ) {
\r
8680 // This should not happen because a check is made before this function is called.
\r
8682 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8686 oss_audioinfo ainfo;
\r
8687 ainfo.dev = device;
\r
8688 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8690 if ( result == -1 ) {
\r
8691 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8692 errorText_ = errorStream_.str();
\r
8696 // Check if device supports input or output
\r
8697 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8698 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8699 if ( mode == OUTPUT )
\r
8700 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8702 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8703 errorText_ = errorStream_.str();
\r
8708 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8709 if ( mode == OUTPUT )
\r
8710 flags |= O_WRONLY;
\r
8711 else { // mode == INPUT
\r
8712 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8713 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8714 close( handle->id[0] );
\r
8715 handle->id[0] = 0;
\r
8716 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8717 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8718 errorText_ = errorStream_.str();
\r
8721 // Check that the number previously set channels is the same.
\r
8722 if ( stream_.nUserChannels[0] != channels ) {
\r
8723 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8724 errorText_ = errorStream_.str();
\r
8730 flags |= O_RDONLY;
\r
8733 // Set exclusive access if specified.
\r
8734 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8736 // Try to open the device.
\r
8738 fd = open( ainfo.devnode, flags, 0 );
\r
8740 if ( errno == EBUSY )
\r
8741 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8743 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8744 errorText_ = errorStream_.str();
\r
8748 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8750 if ( flags | O_RDWR ) {
\r
8751 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8752 if ( result == -1) {
\r
8753 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8754 errorText_ = errorStream_.str();
\r
8760 // Check the device channel support.
\r
8761 stream_.nUserChannels[mode] = channels;
\r
8762 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8764 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8765 errorText_ = errorStream_.str();
\r
8769 // Set the number of channels.
\r
8770 int deviceChannels = channels + firstChannel;
\r
8771 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8772 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8774 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8775 errorText_ = errorStream_.str();
\r
8778 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8780 // Get the data format mask
\r
8782 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8783 if ( result == -1 ) {
\r
8785 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8786 errorText_ = errorStream_.str();
\r
8790 // Determine how to set the device format.
\r
8791 stream_.userFormat = format;
\r
8792 int deviceFormat = -1;
\r
8793 stream_.doByteSwap[mode] = false;
\r
8794 if ( format == RTAUDIO_SINT8 ) {
\r
8795 if ( mask & AFMT_S8 ) {
\r
8796 deviceFormat = AFMT_S8;
\r
8797 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8800 else if ( format == RTAUDIO_SINT16 ) {
\r
8801 if ( mask & AFMT_S16_NE ) {
\r
8802 deviceFormat = AFMT_S16_NE;
\r
8803 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8805 else if ( mask & AFMT_S16_OE ) {
\r
8806 deviceFormat = AFMT_S16_OE;
\r
8807 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8808 stream_.doByteSwap[mode] = true;
\r
8811 else if ( format == RTAUDIO_SINT24 ) {
\r
8812 if ( mask & AFMT_S24_NE ) {
\r
8813 deviceFormat = AFMT_S24_NE;
\r
8814 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8816 else if ( mask & AFMT_S24_OE ) {
\r
8817 deviceFormat = AFMT_S24_OE;
\r
8818 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8819 stream_.doByteSwap[mode] = true;
\r
8822 else if ( format == RTAUDIO_SINT32 ) {
\r
8823 if ( mask & AFMT_S32_NE ) {
\r
8824 deviceFormat = AFMT_S32_NE;
\r
8825 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8827 else if ( mask & AFMT_S32_OE ) {
\r
8828 deviceFormat = AFMT_S32_OE;
\r
8829 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8830 stream_.doByteSwap[mode] = true;
\r
8834 if ( deviceFormat == -1 ) {
\r
8835 // The user requested format is not natively supported by the device.
\r
8836 if ( mask & AFMT_S16_NE ) {
\r
8837 deviceFormat = AFMT_S16_NE;
\r
8838 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8840 else if ( mask & AFMT_S32_NE ) {
\r
8841 deviceFormat = AFMT_S32_NE;
\r
8842 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8844 else if ( mask & AFMT_S24_NE ) {
\r
8845 deviceFormat = AFMT_S24_NE;
\r
8846 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8848 else if ( mask & AFMT_S16_OE ) {
\r
8849 deviceFormat = AFMT_S16_OE;
\r
8850 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8851 stream_.doByteSwap[mode] = true;
\r
8853 else if ( mask & AFMT_S32_OE ) {
\r
8854 deviceFormat = AFMT_S32_OE;
\r
8855 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8856 stream_.doByteSwap[mode] = true;
\r
8858 else if ( mask & AFMT_S24_OE ) {
\r
8859 deviceFormat = AFMT_S24_OE;
\r
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8861 stream_.doByteSwap[mode] = true;
\r
8863 else if ( mask & AFMT_S8) {
\r
8864 deviceFormat = AFMT_S8;
\r
8865 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8869 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8870 // This really shouldn't happen ...
\r
8872 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8873 errorText_ = errorStream_.str();
\r
8877 // Set the data format.
\r
8878 int temp = deviceFormat;
\r
8879 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8880 if ( result == -1 || deviceFormat != temp ) {
\r
8882 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8883 errorText_ = errorStream_.str();
\r
8887 // Attempt to set the buffer size. According to OSS, the minimum
\r
8888 // number of buffers is two. The supposed minimum buffer size is 16
\r
8889 // bytes, so that will be our lower bound. The argument to this
\r
8890 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8891 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8892 // We'll check the actual value used near the end of the setup
\r
8894 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8895 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8897 if ( options ) buffers = options->numberOfBuffers;
\r
8898 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8899 if ( buffers < 2 ) buffers = 3;
\r
8900 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8901 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8902 if ( result == -1 ) {
\r
8904 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8905 errorText_ = errorStream_.str();
\r
8908 stream_.nBuffers = buffers;
\r
8910 // Save buffer size (in sample frames).
\r
8911 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8912 stream_.bufferSize = *bufferSize;
\r
8914 // Set the sample rate.
\r
8915 int srate = sampleRate;
\r
8916 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8917 if ( result == -1 ) {
\r
8919 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8920 errorText_ = errorStream_.str();
\r
8924 // Verify the sample rate setup worked.
\r
8925 if ( abs( srate - sampleRate ) > 100 ) {
\r
8927 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8928 errorText_ = errorStream_.str();
\r
8931 stream_.sampleRate = sampleRate;
\r
8933 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8934 // We're doing duplex setup here.
\r
8935 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8936 stream_.nDeviceChannels[0] = deviceChannels;
\r
8939 // Set interleaving parameters.
\r
8940 stream_.userInterleaved = true;
\r
8941 stream_.deviceInterleaved[mode] = true;
\r
8942 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8943 stream_.userInterleaved = false;
\r
8945 // Set flags for buffer conversion
\r
8946 stream_.doConvertBuffer[mode] = false;
\r
8947 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8948 stream_.doConvertBuffer[mode] = true;
\r
8949 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8950 stream_.doConvertBuffer[mode] = true;
\r
8951 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8952 stream_.nUserChannels[mode] > 1 )
\r
8953 stream_.doConvertBuffer[mode] = true;
\r
8955 // Allocate the stream handles if necessary and then save.
\r
8956 if ( stream_.apiHandle == 0 ) {
\r
8958 handle = new OssHandle;
\r
8960 catch ( std::bad_alloc& ) {
\r
8961 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8965 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8966 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8970 stream_.apiHandle = (void *) handle;
\r
8973 handle = (OssHandle *) stream_.apiHandle;
\r
8975 handle->id[mode] = fd;
\r
8977 // Allocate necessary internal buffers.
\r
8978 unsigned long bufferBytes;
\r
8979 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8980 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8981 if ( stream_.userBuffer[mode] == NULL ) {
\r
8982 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8986 if ( stream_.doConvertBuffer[mode] ) {
\r
8988 bool makeBuffer = true;
\r
8989 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8990 if ( mode == INPUT ) {
\r
8991 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8992 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8993 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8997 if ( makeBuffer ) {
\r
8998 bufferBytes *= *bufferSize;
\r
8999 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9000 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9001 if ( stream_.deviceBuffer == NULL ) {
\r
9002 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9008 stream_.device[mode] = device;
\r
9009 stream_.state = STREAM_STOPPED;
\r
9011 // Setup the buffer conversion information structure.
\r
9012 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9014 // Setup thread if necessary.
\r
9015 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9016 // We had already set up an output stream.
\r
9017 stream_.mode = DUPLEX;
\r
9018 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9021 stream_.mode = mode;
\r
9023 // Setup callback thread.
\r
9024 stream_.callbackInfo.object = (void *) this;
\r
9026 // Set the thread attributes for joinable and realtime scheduling
\r
9027 // priority. The higher priority will only take affect if the
\r
9028 // program is run as root or suid.
\r
9029 pthread_attr_t attr;
\r
9030 pthread_attr_init( &attr );
\r
9031 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9032 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9033 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9034 struct sched_param param;
\r
9035 int priority = options->priority;
\r
9036 int min = sched_get_priority_min( SCHED_RR );
\r
9037 int max = sched_get_priority_max( SCHED_RR );
\r
9038 if ( priority < min ) priority = min;
\r
9039 else if ( priority > max ) priority = max;
\r
9040 param.sched_priority = priority;
\r
9041 pthread_attr_setschedparam( &attr, ¶m );
\r
9042 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9045 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9047 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9050 stream_.callbackInfo.isRunning = true;
\r
9051 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9052 pthread_attr_destroy( &attr );
\r
9054 stream_.callbackInfo.isRunning = false;
\r
9055 errorText_ = "RtApiOss::error creating callback thread!";
\r
9064 pthread_cond_destroy( &handle->runnable );
\r
9065 if ( handle->id[0] ) close( handle->id[0] );
\r
9066 if ( handle->id[1] ) close( handle->id[1] );
\r
9068 stream_.apiHandle = 0;
\r
9071 for ( int i=0; i<2; i++ ) {
\r
9072 if ( stream_.userBuffer[i] ) {
\r
9073 free( stream_.userBuffer[i] );
\r
9074 stream_.userBuffer[i] = 0;
\r
9078 if ( stream_.deviceBuffer ) {
\r
9079 free( stream_.deviceBuffer );
\r
9080 stream_.deviceBuffer = 0;
\r
9086 void RtApiOss :: closeStream()
\r
9088 if ( stream_.state == STREAM_CLOSED ) {
\r
9089 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9090 error( RtAudioError::WARNING );
\r
9094 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9095 stream_.callbackInfo.isRunning = false;
\r
9096 MUTEX_LOCK( &stream_.mutex );
\r
9097 if ( stream_.state == STREAM_STOPPED )
\r
9098 pthread_cond_signal( &handle->runnable );
\r
9099 MUTEX_UNLOCK( &stream_.mutex );
\r
9100 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9102 if ( stream_.state == STREAM_RUNNING ) {
\r
9103 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9104 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9106 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9107 stream_.state = STREAM_STOPPED;
\r
9111 pthread_cond_destroy( &handle->runnable );
\r
9112 if ( handle->id[0] ) close( handle->id[0] );
\r
9113 if ( handle->id[1] ) close( handle->id[1] );
\r
9115 stream_.apiHandle = 0;
\r
9118 for ( int i=0; i<2; i++ ) {
\r
9119 if ( stream_.userBuffer[i] ) {
\r
9120 free( stream_.userBuffer[i] );
\r
9121 stream_.userBuffer[i] = 0;
\r
9125 if ( stream_.deviceBuffer ) {
\r
9126 free( stream_.deviceBuffer );
\r
9127 stream_.deviceBuffer = 0;
\r
9130 stream_.mode = UNINITIALIZED;
\r
9131 stream_.state = STREAM_CLOSED;
\r
9134 void RtApiOss :: startStream()
\r
9137 if ( stream_.state == STREAM_RUNNING ) {
\r
9138 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9139 error( RtAudioError::WARNING );
\r
9143 MUTEX_LOCK( &stream_.mutex );
\r
9145 stream_.state = STREAM_RUNNING;
\r
9147 // No need to do anything else here ... OSS automatically starts
\r
9148 // when fed samples.
\r
9150 MUTEX_UNLOCK( &stream_.mutex );
\r
9152 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9153 pthread_cond_signal( &handle->runnable );
\r
9156 void RtApiOss :: stopStream()
\r
9159 if ( stream_.state == STREAM_STOPPED ) {
\r
9160 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9161 error( RtAudioError::WARNING );
\r
9165 MUTEX_LOCK( &stream_.mutex );
\r
9167 // The state might change while waiting on a mutex.
\r
9168 if ( stream_.state == STREAM_STOPPED ) {
\r
9169 MUTEX_UNLOCK( &stream_.mutex );
\r
9174 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9175 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9177 // Flush the output with zeros a few times.
\r
9180 RtAudioFormat format;
\r
9182 if ( stream_.doConvertBuffer[0] ) {
\r
9183 buffer = stream_.deviceBuffer;
\r
9184 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9185 format = stream_.deviceFormat[0];
\r
9188 buffer = stream_.userBuffer[0];
\r
9189 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9190 format = stream_.userFormat;
\r
9193 memset( buffer, 0, samples * formatBytes(format) );
\r
9194 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9195 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9196 if ( result == -1 ) {
\r
9197 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9198 error( RtAudioError::WARNING );
\r
9202 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9203 if ( result == -1 ) {
\r
9204 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9205 errorText_ = errorStream_.str();
\r
9208 handle->triggered = false;
\r
9211 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9212 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9213 if ( result == -1 ) {
\r
9214 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9215 errorText_ = errorStream_.str();
\r
9221 stream_.state = STREAM_STOPPED;
\r
9222 MUTEX_UNLOCK( &stream_.mutex );
\r
9224 if ( result != -1 ) return;
\r
9225 error( RtAudioError::SYSTEM_ERROR );
\r
9228 void RtApiOss :: abortStream()
\r
9231 if ( stream_.state == STREAM_STOPPED ) {
\r
9232 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9233 error( RtAudioError::WARNING );
\r
9237 MUTEX_LOCK( &stream_.mutex );
\r
9239 // The state might change while waiting on a mutex.
\r
9240 if ( stream_.state == STREAM_STOPPED ) {
\r
9241 MUTEX_UNLOCK( &stream_.mutex );
\r
9246 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9247 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9248 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9249 if ( result == -1 ) {
\r
9250 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9251 errorText_ = errorStream_.str();
\r
9254 handle->triggered = false;
\r
9257 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9258 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9259 if ( result == -1 ) {
\r
9260 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9261 errorText_ = errorStream_.str();
\r
9267 stream_.state = STREAM_STOPPED;
\r
9268 MUTEX_UNLOCK( &stream_.mutex );
\r
9270 if ( result != -1 ) return;
\r
9271 error( RtAudioError::SYSTEM_ERROR );
\r
9274 void RtApiOss :: callbackEvent()
\r
9276 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9277 if ( stream_.state == STREAM_STOPPED ) {
\r
9278 MUTEX_LOCK( &stream_.mutex );
\r
9279 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9280 if ( stream_.state != STREAM_RUNNING ) {
\r
9281 MUTEX_UNLOCK( &stream_.mutex );
\r
9284 MUTEX_UNLOCK( &stream_.mutex );
\r
9287 if ( stream_.state == STREAM_CLOSED ) {
\r
9288 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9289 error( RtAudioError::WARNING );
\r
9293 // Invoke user callback to get fresh output data.
\r
9294 int doStopStream = 0;
\r
9295 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9296 double streamTime = getStreamTime();
\r
9297 RtAudioStreamStatus status = 0;
\r
9298 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9299 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9300 handle->xrun[0] = false;
\r
9302 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9303 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9304 handle->xrun[1] = false;
\r
9306 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9307 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9308 if ( doStopStream == 2 ) {
\r
9309 this->abortStream();
\r
9313 MUTEX_LOCK( &stream_.mutex );
\r
9315 // The state might change while waiting on a mutex.
\r
9316 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9321 RtAudioFormat format;
\r
9323 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9325 // Setup parameters and do buffer conversion if necessary.
\r
9326 if ( stream_.doConvertBuffer[0] ) {
\r
9327 buffer = stream_.deviceBuffer;
\r
9328 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9329 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9330 format = stream_.deviceFormat[0];
\r
9333 buffer = stream_.userBuffer[0];
\r
9334 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9335 format = stream_.userFormat;
\r
9338 // Do byte swapping if necessary.
\r
9339 if ( stream_.doByteSwap[0] )
\r
9340 byteSwapBuffer( buffer, samples, format );
\r
9342 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9344 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9345 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9346 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9347 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9348 handle->triggered = true;
\r
9351 // Write samples to device.
\r
9352 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9354 if ( result == -1 ) {
\r
9355 // We'll assume this is an underrun, though there isn't a
\r
9356 // specific means for determining that.
\r
9357 handle->xrun[0] = true;
\r
9358 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9359 error( RtAudioError::WARNING );
\r
9360 // Continue on to input section.
\r
9364 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9366 // Setup parameters.
\r
9367 if ( stream_.doConvertBuffer[1] ) {
\r
9368 buffer = stream_.deviceBuffer;
\r
9369 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9370 format = stream_.deviceFormat[1];
\r
9373 buffer = stream_.userBuffer[1];
\r
9374 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9375 format = stream_.userFormat;
\r
9378 // Read samples from device.
\r
9379 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9381 if ( result == -1 ) {
\r
9382 // We'll assume this is an overrun, though there isn't a
\r
9383 // specific means for determining that.
\r
9384 handle->xrun[1] = true;
\r
9385 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9386 error( RtAudioError::WARNING );
\r
9390 // Do byte swapping if necessary.
\r
9391 if ( stream_.doByteSwap[1] )
\r
9392 byteSwapBuffer( buffer, samples, format );
\r
9394 // Do buffer conversion if necessary.
\r
9395 if ( stream_.doConvertBuffer[1] )
\r
9396 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9400 MUTEX_UNLOCK( &stream_.mutex );
\r
9402 RtApi::tickStreamTime();
\r
9403 if ( doStopStream == 1 ) this->stopStream();
\r
9406 static void *ossCallbackHandler( void *ptr )
\r
9408 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9409 RtApiOss *object = (RtApiOss *) info->object;
\r
9410 bool *isRunning = &info->isRunning;
\r
9412 while ( *isRunning == true ) {
\r
9413 pthread_testcancel();
\r
9414 object->callbackEvent();
\r
9417 pthread_exit( NULL );
\r
9420 //******************** End of __LINUX_OSS__ *********************//
\r
9424 // *************************************************** //
\r
9426 // Protected common (OS-independent) RtAudio methods.
\r
9428 // *************************************************** //
\r
9430 // This method can be modified to control the behavior of error
\r
9431 // message printing.
\r
9432 void RtApi :: error( RtAudioError::Type type )
\r
9434 errorStream_.str(""); // clear the ostringstream
\r
9436 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9437 if ( errorCallback ) {
\r
9438 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9440 if ( firstErrorOccurred_ )
\r
9443 firstErrorOccurred_ = true;
\r
9444 const std::string errorMessage = errorText_;
\r
9446 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9447 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9451 errorCallback( type, errorMessage );
\r
9452 firstErrorOccurred_ = false;
\r
9456 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9457 std::cerr << '\n' << errorText_ << "\n\n";
\r
9458 else if ( type != RtAudioError::WARNING )
\r
9459 throw( RtAudioError( errorText_, type ) );
\r
9462 void RtApi :: verifyStream()
\r
9464 if ( stream_.state == STREAM_CLOSED ) {
\r
9465 errorText_ = "RtApi:: a stream is not open!";
\r
9466 error( RtAudioError::INVALID_USE );
\r
9470 void RtApi :: clearStreamInfo()
\r
9472 stream_.mode = UNINITIALIZED;
\r
9473 stream_.state = STREAM_CLOSED;
\r
9474 stream_.sampleRate = 0;
\r
9475 stream_.bufferSize = 0;
\r
9476 stream_.nBuffers = 0;
\r
9477 stream_.userFormat = 0;
\r
9478 stream_.userInterleaved = true;
\r
9479 stream_.streamTime = 0.0;
\r
9480 stream_.apiHandle = 0;
\r
9481 stream_.deviceBuffer = 0;
\r
9482 stream_.callbackInfo.callback = 0;
\r
9483 stream_.callbackInfo.userData = 0;
\r
9484 stream_.callbackInfo.isRunning = false;
\r
9485 stream_.callbackInfo.errorCallback = 0;
\r
9486 for ( int i=0; i<2; i++ ) {
\r
9487 stream_.device[i] = 11111;
\r
9488 stream_.doConvertBuffer[i] = false;
\r
9489 stream_.deviceInterleaved[i] = true;
\r
9490 stream_.doByteSwap[i] = false;
\r
9491 stream_.nUserChannels[i] = 0;
\r
9492 stream_.nDeviceChannels[i] = 0;
\r
9493 stream_.channelOffset[i] = 0;
\r
9494 stream_.deviceFormat[i] = 0;
\r
9495 stream_.latency[i] = 0;
\r
9496 stream_.userBuffer[i] = 0;
\r
9497 stream_.convertInfo[i].channels = 0;
\r
9498 stream_.convertInfo[i].inJump = 0;
\r
9499 stream_.convertInfo[i].outJump = 0;
\r
9500 stream_.convertInfo[i].inFormat = 0;
\r
9501 stream_.convertInfo[i].outFormat = 0;
\r
9502 stream_.convertInfo[i].inOffset.clear();
\r
9503 stream_.convertInfo[i].outOffset.clear();
\r
9507 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9509 if ( format == RTAUDIO_SINT16 )
\r
9511 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9513 else if ( format == RTAUDIO_FLOAT64 )
\r
9515 else if ( format == RTAUDIO_SINT24 )
\r
9517 else if ( format == RTAUDIO_SINT8 )
\r
9520 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9521 error( RtAudioError::WARNING );
\r
9526 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9528 if ( mode == INPUT ) { // convert device to user buffer
\r
9529 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9530 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9531 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9532 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9534 else { // convert user to device buffer
\r
9535 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9536 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9537 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9538 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9541 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9542 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9544 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9546 // Set up the interleave/deinterleave offsets.
\r
9547 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9548 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9549 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9550 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9551 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9552 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9553 stream_.convertInfo[mode].inJump = 1;
\r
9557 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9558 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9559 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9560 stream_.convertInfo[mode].outJump = 1;
\r
9564 else { // no (de)interleaving
\r
9565 if ( stream_.userInterleaved ) {
\r
9566 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9567 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9568 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9572 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9573 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9574 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9575 stream_.convertInfo[mode].inJump = 1;
\r
9576 stream_.convertInfo[mode].outJump = 1;
\r
9581 // Add channel offset.
\r
9582 if ( firstChannel > 0 ) {
\r
9583 if ( stream_.deviceInterleaved[mode] ) {
\r
9584 if ( mode == OUTPUT ) {
\r
9585 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9586 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9589 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9590 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9594 if ( mode == OUTPUT ) {
\r
9595 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9596 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9599 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9600 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9606 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9608 // This function does format conversion, input/output channel compensation, and
\r
9609 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9610 // the lower three bytes of a 32-bit integer.
\r
9612 // Clear our device buffer when in/out duplex device channels are different
\r
9613 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9614 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9615 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9618 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9620 Float64 *out = (Float64 *)outBuffer;
\r
9622 if (info.inFormat == RTAUDIO_SINT8) {
\r
9623 signed char *in = (signed char *)inBuffer;
\r
9624 scale = 1.0 / 127.5;
\r
9625 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9626 for (j=0; j<info.channels; j++) {
\r
9627 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9628 out[info.outOffset[j]] += 0.5;
\r
9629 out[info.outOffset[j]] *= scale;
\r
9631 in += info.inJump;
\r
9632 out += info.outJump;
\r
9635 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9636 Int16 *in = (Int16 *)inBuffer;
\r
9637 scale = 1.0 / 32767.5;
\r
9638 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9639 for (j=0; j<info.channels; j++) {
\r
9640 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9641 out[info.outOffset[j]] += 0.5;
\r
9642 out[info.outOffset[j]] *= scale;
\r
9644 in += info.inJump;
\r
9645 out += info.outJump;
\r
9648 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9649 Int24 *in = (Int24 *)inBuffer;
\r
9650 scale = 1.0 / 8388607.5;
\r
9651 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9652 for (j=0; j<info.channels; j++) {
\r
9653 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9654 out[info.outOffset[j]] += 0.5;
\r
9655 out[info.outOffset[j]] *= scale;
\r
9657 in += info.inJump;
\r
9658 out += info.outJump;
\r
9661 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9662 Int32 *in = (Int32 *)inBuffer;
\r
9663 scale = 1.0 / 2147483647.5;
\r
9664 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9665 for (j=0; j<info.channels; j++) {
\r
9666 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9667 out[info.outOffset[j]] += 0.5;
\r
9668 out[info.outOffset[j]] *= scale;
\r
9670 in += info.inJump;
\r
9671 out += info.outJump;
\r
9674 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9675 Float32 *in = (Float32 *)inBuffer;
\r
9676 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9677 for (j=0; j<info.channels; j++) {
\r
9678 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9680 in += info.inJump;
\r
9681 out += info.outJump;
\r
9684 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9685 // Channel compensation and/or (de)interleaving only.
\r
9686 Float64 *in = (Float64 *)inBuffer;
\r
9687 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9688 for (j=0; j<info.channels; j++) {
\r
9689 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9691 in += info.inJump;
\r
9692 out += info.outJump;
\r
9696 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9698 Float32 *out = (Float32 *)outBuffer;
\r
9700 if (info.inFormat == RTAUDIO_SINT8) {
\r
9701 signed char *in = (signed char *)inBuffer;
\r
9702 scale = (Float32) ( 1.0 / 127.5 );
\r
9703 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9704 for (j=0; j<info.channels; j++) {
\r
9705 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9706 out[info.outOffset[j]] += 0.5;
\r
9707 out[info.outOffset[j]] *= scale;
\r
9709 in += info.inJump;
\r
9710 out += info.outJump;
\r
9713 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9714 Int16 *in = (Int16 *)inBuffer;
\r
9715 scale = (Float32) ( 1.0 / 32767.5 );
\r
9716 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9717 for (j=0; j<info.channels; j++) {
\r
9718 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9719 out[info.outOffset[j]] += 0.5;
\r
9720 out[info.outOffset[j]] *= scale;
\r
9722 in += info.inJump;
\r
9723 out += info.outJump;
\r
9726 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9727 Int24 *in = (Int24 *)inBuffer;
\r
9728 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9729 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9730 for (j=0; j<info.channels; j++) {
\r
9731 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9732 out[info.outOffset[j]] += 0.5;
\r
9733 out[info.outOffset[j]] *= scale;
\r
9735 in += info.inJump;
\r
9736 out += info.outJump;
\r
9739 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9740 Int32 *in = (Int32 *)inBuffer;
\r
9741 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9742 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9743 for (j=0; j<info.channels; j++) {
\r
9744 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9745 out[info.outOffset[j]] += 0.5;
\r
9746 out[info.outOffset[j]] *= scale;
\r
9748 in += info.inJump;
\r
9749 out += info.outJump;
\r
9752 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9753 // Channel compensation and/or (de)interleaving only.
\r
9754 Float32 *in = (Float32 *)inBuffer;
\r
9755 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9756 for (j=0; j<info.channels; j++) {
\r
9757 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9759 in += info.inJump;
\r
9760 out += info.outJump;
\r
9763 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9764 Float64 *in = (Float64 *)inBuffer;
\r
9765 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9766 for (j=0; j<info.channels; j++) {
\r
9767 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9769 in += info.inJump;
\r
9770 out += info.outJump;
\r
9774 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9775 Int32 *out = (Int32 *)outBuffer;
\r
9776 if (info.inFormat == RTAUDIO_SINT8) {
\r
9777 signed char *in = (signed char *)inBuffer;
\r
9778 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9779 for (j=0; j<info.channels; j++) {
\r
9780 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9781 out[info.outOffset[j]] <<= 24;
\r
9783 in += info.inJump;
\r
9784 out += info.outJump;
\r
9787 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9788 Int16 *in = (Int16 *)inBuffer;
\r
9789 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9790 for (j=0; j<info.channels; j++) {
\r
9791 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9792 out[info.outOffset[j]] <<= 16;
\r
9794 in += info.inJump;
\r
9795 out += info.outJump;
\r
9798 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9799 Int24 *in = (Int24 *)inBuffer;
\r
9800 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9801 for (j=0; j<info.channels; j++) {
\r
9802 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9803 out[info.outOffset[j]] <<= 8;
\r
9805 in += info.inJump;
\r
9806 out += info.outJump;
\r
9809 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9810 // Channel compensation and/or (de)interleaving only.
\r
9811 Int32 *in = (Int32 *)inBuffer;
\r
9812 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9813 for (j=0; j<info.channels; j++) {
\r
9814 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9816 in += info.inJump;
\r
9817 out += info.outJump;
\r
9820 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9821 Float32 *in = (Float32 *)inBuffer;
\r
9822 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9823 for (j=0; j<info.channels; j++) {
\r
9824 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9826 in += info.inJump;
\r
9827 out += info.outJump;
\r
9830 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9831 Float64 *in = (Float64 *)inBuffer;
\r
9832 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9833 for (j=0; j<info.channels; j++) {
\r
9834 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9836 in += info.inJump;
\r
9837 out += info.outJump;
\r
9841 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9842 Int24 *out = (Int24 *)outBuffer;
\r
9843 if (info.inFormat == RTAUDIO_SINT8) {
\r
9844 signed char *in = (signed char *)inBuffer;
\r
9845 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9846 for (j=0; j<info.channels; j++) {
\r
9847 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9848 //out[info.outOffset[j]] <<= 16;
\r
9850 in += info.inJump;
\r
9851 out += info.outJump;
\r
9854 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9855 Int16 *in = (Int16 *)inBuffer;
\r
9856 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9857 for (j=0; j<info.channels; j++) {
\r
9858 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9859 //out[info.outOffset[j]] <<= 8;
\r
9861 in += info.inJump;
\r
9862 out += info.outJump;
\r
9865 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9866 // Channel compensation and/or (de)interleaving only.
\r
9867 Int24 *in = (Int24 *)inBuffer;
\r
9868 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9869 for (j=0; j<info.channels; j++) {
\r
9870 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9872 in += info.inJump;
\r
9873 out += info.outJump;
\r
9876 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9877 Int32 *in = (Int32 *)inBuffer;
\r
9878 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9879 for (j=0; j<info.channels; j++) {
\r
9880 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9881 //out[info.outOffset[j]] >>= 8;
\r
9883 in += info.inJump;
\r
9884 out += info.outJump;
\r
9887 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9888 Float32 *in = (Float32 *)inBuffer;
\r
9889 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9890 for (j=0; j<info.channels; j++) {
\r
9891 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9893 in += info.inJump;
\r
9894 out += info.outJump;
\r
9897 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9898 Float64 *in = (Float64 *)inBuffer;
\r
9899 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9900 for (j=0; j<info.channels; j++) {
\r
9901 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9903 in += info.inJump;
\r
9904 out += info.outJump;
\r
9908 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9909 Int16 *out = (Int16 *)outBuffer;
\r
9910 if (info.inFormat == RTAUDIO_SINT8) {
\r
9911 signed char *in = (signed char *)inBuffer;
\r
9912 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9913 for (j=0; j<info.channels; j++) {
\r
9914 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9915 out[info.outOffset[j]] <<= 8;
\r
9917 in += info.inJump;
\r
9918 out += info.outJump;
\r
9921 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9922 // Channel compensation and/or (de)interleaving only.
\r
9923 Int16 *in = (Int16 *)inBuffer;
\r
9924 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9925 for (j=0; j<info.channels; j++) {
\r
9926 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9928 in += info.inJump;
\r
9929 out += info.outJump;
\r
9932 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9933 Int24 *in = (Int24 *)inBuffer;
\r
9934 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9935 for (j=0; j<info.channels; j++) {
\r
9936 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9938 in += info.inJump;
\r
9939 out += info.outJump;
\r
9942 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9943 Int32 *in = (Int32 *)inBuffer;
\r
9944 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9945 for (j=0; j<info.channels; j++) {
\r
9946 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9948 in += info.inJump;
\r
9949 out += info.outJump;
\r
9952 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9953 Float32 *in = (Float32 *)inBuffer;
\r
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9955 for (j=0; j<info.channels; j++) {
\r
9956 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9958 in += info.inJump;
\r
9959 out += info.outJump;
\r
9962 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9963 Float64 *in = (Float64 *)inBuffer;
\r
9964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9965 for (j=0; j<info.channels; j++) {
\r
9966 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9968 in += info.inJump;
\r
9969 out += info.outJump;
\r
9973 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9974 signed char *out = (signed char *)outBuffer;
\r
9975 if (info.inFormat == RTAUDIO_SINT8) {
\r
9976 // Channel compensation and/or (de)interleaving only.
\r
9977 signed char *in = (signed char *)inBuffer;
\r
9978 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9979 for (j=0; j<info.channels; j++) {
\r
9980 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9982 in += info.inJump;
\r
9983 out += info.outJump;
\r
9986 if (info.inFormat == RTAUDIO_SINT16) {
\r
9987 Int16 *in = (Int16 *)inBuffer;
\r
9988 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9989 for (j=0; j<info.channels; j++) {
\r
9990 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9992 in += info.inJump;
\r
9993 out += info.outJump;
\r
9996 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9997 Int24 *in = (Int24 *)inBuffer;
\r
9998 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9999 for (j=0; j<info.channels; j++) {
\r
10000 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10002 in += info.inJump;
\r
10003 out += info.outJump;
\r
10006 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10007 Int32 *in = (Int32 *)inBuffer;
\r
10008 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10009 for (j=0; j<info.channels; j++) {
\r
10010 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10012 in += info.inJump;
\r
10013 out += info.outJump;
\r
10016 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10017 Float32 *in = (Float32 *)inBuffer;
\r
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10019 for (j=0; j<info.channels; j++) {
\r
10020 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10022 in += info.inJump;
\r
10023 out += info.outJump;
\r
10026 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10027 Float64 *in = (Float64 *)inBuffer;
\r
10028 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10029 for (j=0; j<info.channels; j++) {
\r
10030 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10032 in += info.inJump;
\r
10033 out += info.outJump;
\r
10039 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10040 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10041 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10043 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10045 register char val;
\r
10046 register char *ptr;
\r
10049 if ( format == RTAUDIO_SINT16 ) {
\r
10050 for ( unsigned int i=0; i<samples; i++ ) {
\r
10051 // Swap 1st and 2nd bytes.
\r
10053 *(ptr) = *(ptr+1);
\r
10056 // Increment 2 bytes.
\r
10060 else if ( format == RTAUDIO_SINT32 ||
\r
10061 format == RTAUDIO_FLOAT32 ) {
\r
10062 for ( unsigned int i=0; i<samples; i++ ) {
\r
10063 // Swap 1st and 4th bytes.
\r
10065 *(ptr) = *(ptr+3);
\r
10068 // Swap 2nd and 3rd bytes.
\r
10071 *(ptr) = *(ptr+1);
\r
10074 // Increment 3 more bytes.
\r
10078 else if ( format == RTAUDIO_SINT24 ) {
\r
10079 for ( unsigned int i=0; i<samples; i++ ) {
\r
10080 // Swap 1st and 3rd bytes.
\r
10082 *(ptr) = *(ptr+2);
\r
10085 // Increment 2 more bytes.
\r
10089 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10090 for ( unsigned int i=0; i<samples; i++ ) {
\r
10091 // Swap 1st and 8th bytes
\r
10093 *(ptr) = *(ptr+7);
\r
10096 // Swap 2nd and 7th bytes
\r
10099 *(ptr) = *(ptr+5);
\r
10102 // Swap 3rd and 6th bytes
\r
10105 *(ptr) = *(ptr+3);
\r
10108 // Swap 4th and 5th bytes
\r
10111 *(ptr) = *(ptr+1);
\r
10114 // Increment 5 more bytes.
\r
10120 // Indentation settings for Vim and Emacs
\r
10122 // Local Variables:
\r
10123 // c-basic-offset: 2
\r
10124 // indent-tabs-mode: nil
\r
10127 // vim: et sts=2 sw=2
\r