1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1686 if ( handle->drainCounter ) {
\r
1687 handle->drainCounter++;
\r
1692 AudioDeviceID inputDevice;
\r
1693 inputDevice = handle->id[1];
\r
1694 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1696 if ( handle->nStreams[1] == 1 ) {
\r
1697 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1698 convertBuffer( stream_.userBuffer[1],
\r
1699 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1700 stream_.convertInfo[1] );
\r
1702 else { // copy to user buffer
\r
1703 memcpy( stream_.userBuffer[1],
\r
1704 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1708 else { // read from multiple streams
\r
1709 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1710 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1712 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1713 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1714 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1715 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1716 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1719 else { // read from multiple multi-channel streams
\r
1720 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1721 Float32 *out, *in;
\r
1723 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1724 UInt32 outChannels = stream_.nUserChannels[1];
\r
1725 if ( stream_.doConvertBuffer[1] ) {
\r
1726 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1727 outChannels = stream_.nDeviceChannels[1];
\r
1730 if ( outInterleaved ) outOffset = 1;
\r
1731 else outOffset = stream_.bufferSize;
\r
1733 channelsLeft = outChannels;
\r
1734 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1736 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1737 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1740 // Account for possible channel offset in first stream
\r
1741 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1742 streamChannels -= stream_.channelOffset[1];
\r
1743 inJump = stream_.channelOffset[1];
\r
1747 // Account for possible unread channels at end of the last stream
\r
1748 if ( streamChannels > channelsLeft ) {
\r
1749 inJump = streamChannels - channelsLeft;
\r
1750 streamChannels = channelsLeft;
\r
1753 // Determine output buffer offsets and skips
\r
1754 if ( outInterleaved ) {
\r
1755 outJump = outChannels;
\r
1756 out += outChannels - channelsLeft;
\r
1760 out += (outChannels - channelsLeft) * outOffset;
\r
1763 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1764 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1765 out[j*outOffset] = *in++;
\r
1770 channelsLeft -= streamChannels;
\r
1774 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1775 convertBuffer( stream_.userBuffer[1],
\r
1776 stream_.deviceBuffer,
\r
1777 stream_.convertInfo[1] );
\r
1783 //MUTEX_UNLOCK( &stream_.mutex );
\r
1785 RtApi::tickStreamTime();
\r
1789 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1793 case kAudioHardwareNotRunningError:
\r
1794 return "kAudioHardwareNotRunningError";
\r
1796 case kAudioHardwareUnspecifiedError:
\r
1797 return "kAudioHardwareUnspecifiedError";
\r
1799 case kAudioHardwareUnknownPropertyError:
\r
1800 return "kAudioHardwareUnknownPropertyError";
\r
1802 case kAudioHardwareBadPropertySizeError:
\r
1803 return "kAudioHardwareBadPropertySizeError";
\r
1805 case kAudioHardwareIllegalOperationError:
\r
1806 return "kAudioHardwareIllegalOperationError";
\r
1808 case kAudioHardwareBadObjectError:
\r
1809 return "kAudioHardwareBadObjectError";
\r
1811 case kAudioHardwareBadDeviceError:
\r
1812 return "kAudioHardwareBadDeviceError";
\r
1814 case kAudioHardwareBadStreamError:
\r
1815 return "kAudioHardwareBadStreamError";
\r
1817 case kAudioHardwareUnsupportedOperationError:
\r
1818 return "kAudioHardwareUnsupportedOperationError";
\r
1820 case kAudioDeviceUnsupportedFormatError:
\r
1821 return "kAudioDeviceUnsupportedFormatError";
\r
1823 case kAudioDevicePermissionsError:
\r
1824 return "kAudioDevicePermissionsError";
\r
1827 return "CoreAudio unknown error";
\r
1831 //******************** End of __MACOSX_CORE__ *********************//
\r
1834 #if defined(__UNIX_JACK__)
\r
1836 // JACK is a low-latency audio server, originally written for the
\r
1837 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1838 // connect a number of different applications to an audio device, as
\r
1839 // well as allowing them to share audio between themselves.
\r
1841 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1842 // have ports connected to the server. The JACK server is typically
\r
1843 // started in a terminal as follows:
\r
1845 // .jackd -d alsa -d hw:0
\r
1847 // or through an interface program such as qjackctl. Many of the
\r
1848 // parameters normally set for a stream are fixed by the JACK server
\r
1849 // and can be specified when the JACK server is started. In
\r
1852 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1854 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1855 // frames, and number of buffers = 4. Once the server is running, it
\r
1856 // is not possible to override these values. If the values are not
\r
1857 // specified in the command-line, the JACK server uses default values.
\r
1859 // The JACK server does not have to be running when an instance of
\r
1860 // RtApiJack is created, though the function getDeviceCount() will
\r
1861 // report 0 devices found until JACK has been started. When no
\r
1862 // devices are available (i.e., the JACK server is not running), a
\r
1863 // stream cannot be opened.
\r
1865 #include <jack/jack.h>
\r
1866 #include <unistd.h>
\r
1869 // A structure to hold various information related to the Jack API
\r
1870 // implementation.
\r
1871 struct JackHandle {
\r
1872 jack_client_t *client;
\r
1873 jack_port_t **ports[2];
\r
1874 std::string deviceName[2];
\r
1876 pthread_cond_t condition;
\r
1877 int drainCounter; // Tracks callback counts when draining
\r
1878 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1881 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1884 static void jackSilentError( const char * ) {};
\r
1886 RtApiJack :: RtApiJack()
\r
1888 // Nothing to do here.
\r
1889 #if !defined(__RTAUDIO_DEBUG__)
\r
1890 // Turn off Jack's internal error reporting.
\r
1891 jack_set_error_function( &jackSilentError );
\r
1895 RtApiJack :: ~RtApiJack()
\r
1897 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1900 unsigned int RtApiJack :: getDeviceCount( void )
\r
1902 // See if we can become a jack client.
\r
1903 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1904 jack_status_t *status = NULL;
\r
1905 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1906 if ( client == 0 ) return 0;
\r
1908 const char **ports;
\r
1909 std::string port, previousPort;
\r
1910 unsigned int nChannels = 0, nDevices = 0;
\r
1911 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1913 // Parse the port names up to the first colon (:).
\r
1914 size_t iColon = 0;
\r
1916 port = (char *) ports[ nChannels ];
\r
1917 iColon = port.find(":");
\r
1918 if ( iColon != std::string::npos ) {
\r
1919 port = port.substr( 0, iColon + 1 );
\r
1920 if ( port != previousPort ) {
\r
1922 previousPort = port;
\r
1925 } while ( ports[++nChannels] );
\r
1929 jack_client_close( client );
\r
1933 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1935 RtAudio::DeviceInfo info;
\r
1936 info.probed = false;
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1941 if ( client == 0 ) {
\r
1942 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1943 error( RtAudioError::WARNING );
\r
1947 const char **ports;
\r
1948 std::string port, previousPort;
\r
1949 unsigned int nPorts = 0, nDevices = 0;
\r
1950 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1952 // Parse the port names up to the first colon (:).
\r
1953 size_t iColon = 0;
\r
1955 port = (char *) ports[ nPorts ];
\r
1956 iColon = port.find(":");
\r
1957 if ( iColon != std::string::npos ) {
\r
1958 port = port.substr( 0, iColon );
\r
1959 if ( port != previousPort ) {
\r
1960 if ( nDevices == device ) info.name = port;
\r
1962 previousPort = port;
\r
1965 } while ( ports[++nPorts] );
\r
1969 if ( device >= nDevices ) {
\r
1970 jack_client_close( client );
\r
1971 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1972 error( RtAudioError::INVALID_USE );
\r
1976 // Get the current jack server sample rate.
\r
1977 info.sampleRates.clear();
\r
1978 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1980 // Count the available ports containing the client name as device
\r
1981 // channels. Jack "input ports" equal RtAudio output channels.
\r
1982 unsigned int nChannels = 0;
\r
1983 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1985 while ( ports[ nChannels ] ) nChannels++;
\r
1987 info.outputChannels = nChannels;
\r
1990 // Jack "output ports" equal RtAudio input channels.
\r
1992 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1994 while ( ports[ nChannels ] ) nChannels++;
\r
1996 info.inputChannels = nChannels;
\r
1999 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2000 jack_client_close(client);
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 // If device opens for both playback and capture, we determine the channels.
\r
2007 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2008 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2010 // Jack always uses 32-bit floats.
\r
2011 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2013 // Jack doesn't provide default devices so we'll use the first available one.
\r
2014 if ( device == 0 && info.outputChannels > 0 )
\r
2015 info.isDefaultOutput = true;
\r
2016 if ( device == 0 && info.inputChannels > 0 )
\r
2017 info.isDefaultInput = true;
\r
2019 jack_client_close(client);
\r
2020 info.probed = true;
\r
2024 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2026 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2028 RtApiJack *object = (RtApiJack *) info->object;
\r
2029 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2034 // This function will be called by a spawned thread when the Jack
\r
2035 // server signals that it is shutting down. It is necessary to handle
\r
2036 // it this way because the jackShutdown() function must return before
\r
2037 // the jack_deactivate() function (in closeStream()) will return.
\r
2038 static void *jackCloseStream( void *ptr )
\r
2040 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2041 RtApiJack *object = (RtApiJack *) info->object;
\r
2043 object->closeStream();
\r
2045 pthread_exit( NULL );
\r
2047 static void jackShutdown( void *infoPointer )
\r
2049 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2050 RtApiJack *object = (RtApiJack *) info->object;
\r
2052 // Check current stream state. If stopped, then we'll assume this
\r
2053 // was called as a result of a call to RtApiJack::stopStream (the
\r
2054 // deactivation of a client handle causes this function to be called).
\r
2055 // If not, we'll assume the Jack server is shutting down or some
\r
2056 // other problem occurred and we should close the stream.
\r
2057 if ( object->isStreamRunning() == false ) return;
\r
2059 ThreadHandle threadId;
\r
2060 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2061 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2064 static int jackXrun( void *infoPointer )
\r
2066 JackHandle *handle = (JackHandle *) infoPointer;
\r
2068 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2069 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2074 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2075 unsigned int firstChannel, unsigned int sampleRate,
\r
2076 RtAudioFormat format, unsigned int *bufferSize,
\r
2077 RtAudio::StreamOptions *options )
\r
2079 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2081 // Look for jack server and try to become a client (only do once per stream).
\r
2082 jack_client_t *client = 0;
\r
2083 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2084 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2085 jack_status_t *status = NULL;
\r
2086 if ( options && !options->streamName.empty() )
\r
2087 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2089 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2090 if ( client == 0 ) {
\r
2091 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2092 error( RtAudioError::WARNING );
\r
2097 // The handle must have been created on an earlier pass.
\r
2098 client = handle->client;
\r
2101 const char **ports;
\r
2102 std::string port, previousPort, deviceName;
\r
2103 unsigned int nPorts = 0, nDevices = 0;
\r
2104 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2106 // Parse the port names up to the first colon (:).
\r
2107 size_t iColon = 0;
\r
2109 port = (char *) ports[ nPorts ];
\r
2110 iColon = port.find(":");
\r
2111 if ( iColon != std::string::npos ) {
\r
2112 port = port.substr( 0, iColon );
\r
2113 if ( port != previousPort ) {
\r
2114 if ( nDevices == device ) deviceName = port;
\r
2116 previousPort = port;
\r
2119 } while ( ports[++nPorts] );
\r
2123 if ( device >= nDevices ) {
\r
2124 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2128 // Count the available ports containing the client name as device
\r
2129 // channels. Jack "input ports" equal RtAudio output channels.
\r
2130 unsigned int nChannels = 0;
\r
2131 unsigned long flag = JackPortIsInput;
\r
2132 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2133 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2135 while ( ports[ nChannels ] ) nChannels++;
\r
2139 // Compare the jack ports for specified client to the requested number of channels.
\r
2140 if ( nChannels < (channels + firstChannel) ) {
\r
2141 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2142 errorText_ = errorStream_.str();
\r
2146 // Check the jack server sample rate.
\r
2147 unsigned int jackRate = jack_get_sample_rate( client );
\r
2148 if ( sampleRate != jackRate ) {
\r
2149 jack_client_close( client );
\r
2150 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2151 errorText_ = errorStream_.str();
\r
2154 stream_.sampleRate = jackRate;
\r
2156 // Get the latency of the JACK port.
\r
2157 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2158 if ( ports[ firstChannel ] ) {
\r
2159 // Added by Ge Wang
\r
2160 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2161 // the range (usually the min and max are equal)
\r
2162 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2163 // get the latency range
\r
2164 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2165 // be optimistic, use the min!
\r
2166 stream_.latency[mode] = latrange.min;
\r
2167 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2171 // The jack server always uses 32-bit floating-point data.
\r
2172 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2173 stream_.userFormat = format;
\r
2175 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2176 else stream_.userInterleaved = true;
\r
2178 // Jack always uses non-interleaved buffers.
\r
2179 stream_.deviceInterleaved[mode] = false;
\r
2181 // Jack always provides host byte-ordered data.
\r
2182 stream_.doByteSwap[mode] = false;
\r
2184 // Get the buffer size. The buffer size and number of buffers
\r
2185 // (periods) is set when the jack server is started.
\r
2186 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2187 *bufferSize = stream_.bufferSize;
\r
2189 stream_.nDeviceChannels[mode] = channels;
\r
2190 stream_.nUserChannels[mode] = channels;
\r
2192 // Set flags for buffer conversion.
\r
2193 stream_.doConvertBuffer[mode] = false;
\r
2194 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2195 stream_.doConvertBuffer[mode] = true;
\r
2196 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2197 stream_.nUserChannels[mode] > 1 )
\r
2198 stream_.doConvertBuffer[mode] = true;
\r
2200 // Allocate our JackHandle structure for the stream.
\r
2201 if ( handle == 0 ) {
\r
2203 handle = new JackHandle;
\r
2205 catch ( std::bad_alloc& ) {
\r
2206 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2210 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2211 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2214 stream_.apiHandle = (void *) handle;
\r
2215 handle->client = client;
\r
2217 handle->deviceName[mode] = deviceName;
\r
2219 // Allocate necessary internal buffers.
\r
2220 unsigned long bufferBytes;
\r
2221 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2222 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2223 if ( stream_.userBuffer[mode] == NULL ) {
\r
2224 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2228 if ( stream_.doConvertBuffer[mode] ) {
\r
2230 bool makeBuffer = true;
\r
2231 if ( mode == OUTPUT )
\r
2232 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2233 else { // mode == INPUT
\r
2234 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2235 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2236 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2237 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2241 if ( makeBuffer ) {
\r
2242 bufferBytes *= *bufferSize;
\r
2243 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2244 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2245 if ( stream_.deviceBuffer == NULL ) {
\r
2246 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2252 // Allocate memory for the Jack ports (channels) identifiers.
\r
2253 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2254 if ( handle->ports[mode] == NULL ) {
\r
2255 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2259 stream_.device[mode] = device;
\r
2260 stream_.channelOffset[mode] = firstChannel;
\r
2261 stream_.state = STREAM_STOPPED;
\r
2262 stream_.callbackInfo.object = (void *) this;
\r
2264 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2265 // We had already set up the stream for output.
\r
2266 stream_.mode = DUPLEX;
\r
2268 stream_.mode = mode;
\r
2269 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2270 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2271 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2274 // Register our ports.
\r
2276 if ( mode == OUTPUT ) {
\r
2277 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2278 snprintf( label, 64, "outport %d", i );
\r
2279 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2280 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2284 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2285 snprintf( label, 64, "inport %d", i );
\r
2286 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2287 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2291 // Setup the buffer conversion information structure. We don't use
\r
2292 // buffers to do channel offsets, so we override that parameter
\r
2294 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2300 pthread_cond_destroy( &handle->condition );
\r
2301 jack_client_close( handle->client );
\r
2303 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2304 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2307 stream_.apiHandle = 0;
\r
2310 for ( int i=0; i<2; i++ ) {
\r
2311 if ( stream_.userBuffer[i] ) {
\r
2312 free( stream_.userBuffer[i] );
\r
2313 stream_.userBuffer[i] = 0;
\r
2317 if ( stream_.deviceBuffer ) {
\r
2318 free( stream_.deviceBuffer );
\r
2319 stream_.deviceBuffer = 0;
\r
2325 void RtApiJack :: closeStream( void )
\r
2327 if ( stream_.state == STREAM_CLOSED ) {
\r
2328 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2329 error( RtAudioError::WARNING );
\r
2333 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2336 if ( stream_.state == STREAM_RUNNING )
\r
2337 jack_deactivate( handle->client );
\r
2339 jack_client_close( handle->client );
\r
2343 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2344 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2345 pthread_cond_destroy( &handle->condition );
\r
2347 stream_.apiHandle = 0;
\r
2350 for ( int i=0; i<2; i++ ) {
\r
2351 if ( stream_.userBuffer[i] ) {
\r
2352 free( stream_.userBuffer[i] );
\r
2353 stream_.userBuffer[i] = 0;
\r
2357 if ( stream_.deviceBuffer ) {
\r
2358 free( stream_.deviceBuffer );
\r
2359 stream_.deviceBuffer = 0;
\r
2362 stream_.mode = UNINITIALIZED;
\r
2363 stream_.state = STREAM_CLOSED;
\r
2366 void RtApiJack :: startStream( void )
\r
2369 if ( stream_.state == STREAM_RUNNING ) {
\r
2370 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2371 error( RtAudioError::WARNING );
\r
2375 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2376 int result = jack_activate( handle->client );
\r
2378 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2382 const char **ports;
\r
2384 // Get the list of available ports.
\r
2385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2387 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2388 if ( ports == NULL) {
\r
2389 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2393 // Now make the port connections. Since RtAudio wasn't designed to
\r
2394 // allow the user to select particular channels of a device, we'll
\r
2395 // just open the first "nChannels" ports with offset.
\r
2396 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2398 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2399 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2402 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2409 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2411 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2412 if ( ports == NULL) {
\r
2413 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2417 // Now make the port connections. See note above.
\r
2418 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2420 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2421 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2424 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2431 handle->drainCounter = 0;
\r
2432 handle->internalDrain = false;
\r
2433 stream_.state = STREAM_RUNNING;
\r
2436 if ( result == 0 ) return;
\r
2437 error( RtAudioError::SYSTEM_ERROR );
\r
2440 void RtApiJack :: stopStream( void )
\r
2443 if ( stream_.state == STREAM_STOPPED ) {
\r
2444 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2445 error( RtAudioError::WARNING );
\r
2449 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2452 if ( handle->drainCounter == 0 ) {
\r
2453 handle->drainCounter = 2;
\r
2454 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2458 jack_deactivate( handle->client );
\r
2459 stream_.state = STREAM_STOPPED;
\r
2462 void RtApiJack :: abortStream( void )
\r
2465 if ( stream_.state == STREAM_STOPPED ) {
\r
2466 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2467 error( RtAudioError::WARNING );
\r
2471 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2472 handle->drainCounter = 2;
\r
2477 // This function will be called by a spawned thread when the user
\r
2478 // callback function signals that the stream should be stopped or
\r
2479 // aborted. It is necessary to handle it this way because the
\r
2480 // callbackEvent() function must return before the jack_deactivate()
\r
2481 // function will return.
\r
2482 static void *jackStopStream( void *ptr )
\r
2484 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2485 RtApiJack *object = (RtApiJack *) info->object;
\r
2487 object->stopStream();
\r
2488 pthread_exit( NULL );
\r
2491 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2493 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2494 if ( stream_.state == STREAM_CLOSED ) {
\r
2495 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2496 error( RtAudioError::WARNING );
\r
2499 if ( stream_.bufferSize != nframes ) {
\r
2500 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2501 error( RtAudioError::WARNING );
\r
2505 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2506 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2508 // Check if we were draining the stream and signal is finished.
\r
2509 if ( handle->drainCounter > 3 ) {
\r
2510 ThreadHandle threadId;
\r
2512 stream_.state = STREAM_STOPPING;
\r
2513 if ( handle->internalDrain == true )
\r
2514 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2516 pthread_cond_signal( &handle->condition );
\r
2520 // Invoke user callback first, to get fresh output data.
\r
2521 if ( handle->drainCounter == 0 ) {
\r
2522 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2523 double streamTime = getStreamTime();
\r
2524 RtAudioStreamStatus status = 0;
\r
2525 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2526 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2527 handle->xrun[0] = false;
\r
2529 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2530 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2531 handle->xrun[1] = false;
\r
2533 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2534 stream_.bufferSize, streamTime, status, info->userData );
\r
2535 if ( cbReturnValue == 2 ) {
\r
2536 stream_.state = STREAM_STOPPING;
\r
2537 handle->drainCounter = 2;
\r
2539 pthread_create( &id, NULL, jackStopStream, info );
\r
2542 else if ( cbReturnValue == 1 ) {
\r
2543 handle->drainCounter = 1;
\r
2544 handle->internalDrain = true;
\r
2548 jack_default_audio_sample_t *jackbuffer;
\r
2549 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2552 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2554 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2555 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2556 memset( jackbuffer, 0, bufferBytes );
\r
2560 else if ( stream_.doConvertBuffer[0] ) {
\r
2562 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2564 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2565 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2566 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2569 else { // no buffer conversion
\r
2570 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2571 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2572 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2576 if ( handle->drainCounter ) {
\r
2577 handle->drainCounter++;
\r
2582 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2584 if ( stream_.doConvertBuffer[1] ) {
\r
2585 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2586 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2587 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2589 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2591 else { // no buffer conversion
\r
2592 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2593 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2594 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2600 RtApi::tickStreamTime();
\r
2603 //******************** End of __UNIX_JACK__ *********************//
\r
2606 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2608 // The ASIO API is designed around a callback scheme, so this
\r
2609 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2610 // Jack. The primary constraint with ASIO is that it only allows
\r
2611 // access to a single driver at a time. Thus, it is not possible to
\r
2612 // have more than one simultaneous RtAudio stream.
\r
2614 // This implementation also requires a number of external ASIO files
\r
2615 // and a few global variables. The ASIO callback scheme does not
\r
2616 // allow for the passing of user data, so we must create a global
\r
2617 // pointer to our callbackInfo structure.
\r
2619 // On unix systems, we make use of a pthread condition variable.
\r
2620 // Since there is no equivalent in Windows, I hacked something based
\r
2621 // on information found in
\r
2622 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2624 #include "asiosys.h"
\r
2626 #include "iasiothiscallresolver.h"
\r
2627 #include "asiodrivers.h"
\r
2630 static AsioDrivers drivers;
\r
2631 static ASIOCallbacks asioCallbacks;
\r
2632 static ASIODriverInfo driverInfo;
\r
2633 static CallbackInfo *asioCallbackInfo;
\r
2634 static bool asioXRun;
\r
2636 struct AsioHandle {
\r
2637 int drainCounter; // Tracks callback counts when draining
\r
2638 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2639 ASIOBufferInfo *bufferInfos;
\r
2643 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2646 // Function declarations (definitions at end of section)
\r
2647 static const char* getAsioErrorString( ASIOError result );
\r
2648 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2649 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2651 RtApiAsio :: RtApiAsio()
\r
2653 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2654 // CoInitialize beforehand, but it must be for appartment threading
\r
2655 // (in which case, CoInitilialize will return S_FALSE here).
\r
2656 coInitialized_ = false;
\r
2657 HRESULT hr = CoInitialize( NULL );
\r
2658 if ( FAILED(hr) ) {
\r
2659 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2660 error( RtAudioError::WARNING );
\r
2662 coInitialized_ = true;
\r
2664 drivers.removeCurrentDriver();
\r
2665 driverInfo.asioVersion = 2;
\r
2667 // See note in DirectSound implementation about GetDesktopWindow().
\r
2668 driverInfo.sysRef = GetForegroundWindow();
\r
2671 RtApiAsio :: ~RtApiAsio()
\r
2673 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2674 if ( coInitialized_ ) CoUninitialize();
\r
2677 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2679 return (unsigned int) drivers.asioGetNumDev();
\r
2682 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2684 RtAudio::DeviceInfo info;
\r
2685 info.probed = false;
\r
2688 unsigned int nDevices = getDeviceCount();
\r
2689 if ( nDevices == 0 ) {
\r
2690 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2691 error( RtAudioError::INVALID_USE );
\r
2695 if ( device >= nDevices ) {
\r
2696 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2697 error( RtAudioError::INVALID_USE );
\r
2701 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2702 if ( stream_.state != STREAM_CLOSED ) {
\r
2703 if ( device >= devices_.size() ) {
\r
2704 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2705 error( RtAudioError::WARNING );
\r
2708 return devices_[ device ];
\r
2711 char driverName[32];
\r
2712 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2713 if ( result != ASE_OK ) {
\r
2714 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2715 errorText_ = errorStream_.str();
\r
2716 error( RtAudioError::WARNING );
\r
2720 info.name = driverName;
\r
2722 if ( !drivers.loadDriver( driverName ) ) {
\r
2723 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2724 errorText_ = errorStream_.str();
\r
2725 error( RtAudioError::WARNING );
\r
2729 result = ASIOInit( &driverInfo );
\r
2730 if ( result != ASE_OK ) {
\r
2731 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2732 errorText_ = errorStream_.str();
\r
2733 error( RtAudioError::WARNING );
\r
2737 // Determine the device channel information.
\r
2738 long inputChannels, outputChannels;
\r
2739 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2740 if ( result != ASE_OK ) {
\r
2741 drivers.removeCurrentDriver();
\r
2742 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2743 errorText_ = errorStream_.str();
\r
2744 error( RtAudioError::WARNING );
\r
2748 info.outputChannels = outputChannels;
\r
2749 info.inputChannels = inputChannels;
\r
2750 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2751 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2753 // Determine the supported sample rates.
\r
2754 info.sampleRates.clear();
\r
2755 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2756 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2757 if ( result == ASE_OK )
\r
2758 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2761 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2762 ASIOChannelInfo channelInfo;
\r
2763 channelInfo.channel = 0;
\r
2764 channelInfo.isInput = true;
\r
2765 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2766 result = ASIOGetChannelInfo( &channelInfo );
\r
2767 if ( result != ASE_OK ) {
\r
2768 drivers.removeCurrentDriver();
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 info.nativeFormats = 0;
\r
2776 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2777 info.nativeFormats |= RTAUDIO_SINT16;
\r
2778 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT32;
\r
2780 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2784 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2785 info.nativeFormats |= RTAUDIO_SINT24;
\r
2787 if ( info.outputChannels > 0 )
\r
2788 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2789 if ( info.inputChannels > 0 )
\r
2790 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2792 info.probed = true;
\r
2793 drivers.removeCurrentDriver();
\r
2797 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2799 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2800 object->callbackEvent( index );
\r
2803 void RtApiAsio :: saveDeviceInfo( void )
\r
2807 unsigned int nDevices = getDeviceCount();
\r
2808 devices_.resize( nDevices );
\r
2809 for ( unsigned int i=0; i<nDevices; i++ )
\r
2810 devices_[i] = getDeviceInfo( i );
\r
2813 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2814 unsigned int firstChannel, unsigned int sampleRate,
\r
2815 RtAudioFormat format, unsigned int *bufferSize,
\r
2816 RtAudio::StreamOptions *options )
\r
2818 // For ASIO, a duplex stream MUST use the same driver.
\r
2819 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2820 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2824 char driverName[32];
\r
2825 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2826 if ( result != ASE_OK ) {
\r
2827 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2828 errorText_ = errorStream_.str();
\r
2832 // Only load the driver once for duplex stream.
\r
2833 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2834 // The getDeviceInfo() function will not work when a stream is open
\r
2835 // because ASIO does not allow multiple devices to run at the same
\r
2836 // time. Thus, we'll probe the system before opening a stream and
\r
2837 // save the results for use by getDeviceInfo().
\r
2838 this->saveDeviceInfo();
\r
2840 if ( !drivers.loadDriver( driverName ) ) {
\r
2841 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2842 errorText_ = errorStream_.str();
\r
2846 result = ASIOInit( &driverInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2849 errorText_ = errorStream_.str();
\r
2854 // Check the device channel count.
\r
2855 long inputChannels, outputChannels;
\r
2856 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2857 if ( result != ASE_OK ) {
\r
2858 drivers.removeCurrentDriver();
\r
2859 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2860 errorText_ = errorStream_.str();
\r
2864 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2865 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2866 drivers.removeCurrentDriver();
\r
2867 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2868 errorText_ = errorStream_.str();
\r
2871 stream_.nDeviceChannels[mode] = channels;
\r
2872 stream_.nUserChannels[mode] = channels;
\r
2873 stream_.channelOffset[mode] = firstChannel;
\r
2875 // Verify the sample rate is supported.
\r
2876 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2877 if ( result != ASE_OK ) {
\r
2878 drivers.removeCurrentDriver();
\r
2879 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2880 errorText_ = errorStream_.str();
\r
2884 // Get the current sample rate
\r
2885 ASIOSampleRate currentRate;
\r
2886 result = ASIOGetSampleRate( ¤tRate );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2890 errorText_ = errorStream_.str();
\r
2894 // Set the sample rate only if necessary
\r
2895 if ( currentRate != sampleRate ) {
\r
2896 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2897 if ( result != ASE_OK ) {
\r
2898 drivers.removeCurrentDriver();
\r
2899 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2900 errorText_ = errorStream_.str();
\r
2905 // Determine the driver data type.
\r
2906 ASIOChannelInfo channelInfo;
\r
2907 channelInfo.channel = 0;
\r
2908 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2909 else channelInfo.isInput = true;
\r
2910 result = ASIOGetChannelInfo( &channelInfo );
\r
2911 if ( result != ASE_OK ) {
\r
2912 drivers.removeCurrentDriver();
\r
2913 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2914 errorText_ = errorStream_.str();
\r
2918 // Assuming WINDOWS host is always little-endian.
\r
2919 stream_.doByteSwap[mode] = false;
\r
2920 stream_.userFormat = format;
\r
2921 stream_.deviceFormat[mode] = 0;
\r
2922 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2923 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2926 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2927 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2928 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2930 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2931 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2932 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2934 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2935 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2936 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2938 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2939 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2940 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2943 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2944 drivers.removeCurrentDriver();
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2946 errorText_ = errorStream_.str();
\r
2950 // Set the buffer size. For a duplex stream, this will end up
\r
2951 // setting the buffer size based on the input constraints, which
\r
2953 long minSize, maxSize, preferSize, granularity;
\r
2954 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2955 if ( result != ASE_OK ) {
\r
2956 drivers.removeCurrentDriver();
\r
2957 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2958 errorText_ = errorStream_.str();
\r
2962 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2963 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2964 else if ( granularity == -1 ) {
\r
2965 // Make sure bufferSize is a power of two.
\r
2966 int log2_of_min_size = 0;
\r
2967 int log2_of_max_size = 0;
\r
2969 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2970 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2971 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2974 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2975 int min_delta_num = log2_of_min_size;
\r
2977 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2978 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2979 if (current_delta < min_delta) {
\r
2980 min_delta = current_delta;
\r
2981 min_delta_num = i;
\r
2985 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2986 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2987 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2989 else if ( granularity != 0 ) {
\r
2990 // Set to an even multiple of granularity, rounding up.
\r
2991 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2994 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2995 drivers.removeCurrentDriver();
\r
2996 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3000 stream_.bufferSize = *bufferSize;
\r
3001 stream_.nBuffers = 2;
\r
3003 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3004 else stream_.userInterleaved = true;
\r
3006 // ASIO always uses non-interleaved buffers.
\r
3007 stream_.deviceInterleaved[mode] = false;
\r
3009 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3010 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3011 if ( handle == 0 ) {
\r
3013 handle = new AsioHandle;
\r
3015 catch ( std::bad_alloc& ) {
\r
3016 //if ( handle == NULL ) {
\r
3017 drivers.removeCurrentDriver();
\r
3018 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3021 handle->bufferInfos = 0;
\r
3023 // Create a manual-reset event.
\r
3024 handle->condition = CreateEvent( NULL, // no security
\r
3025 TRUE, // manual-reset
\r
3026 FALSE, // non-signaled initially
\r
3027 NULL ); // unnamed
\r
3028 stream_.apiHandle = (void *) handle;
\r
3031 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3032 // and output separately, we'll have to dispose of previously
\r
3033 // created output buffers for a duplex stream.
\r
3034 long inputLatency, outputLatency;
\r
3035 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3036 ASIODisposeBuffers();
\r
3037 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3040 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3041 bool buffersAllocated = false;
\r
3042 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3043 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3044 if ( handle->bufferInfos == NULL ) {
\r
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3046 errorText_ = errorStream_.str();
\r
3050 ASIOBufferInfo *infos;
\r
3051 infos = handle->bufferInfos;
\r
3052 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3053 infos->isInput = ASIOFalse;
\r
3054 infos->channelNum = i + stream_.channelOffset[0];
\r
3055 infos->buffers[0] = infos->buffers[1] = 0;
\r
3057 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3058 infos->isInput = ASIOTrue;
\r
3059 infos->channelNum = i + stream_.channelOffset[1];
\r
3060 infos->buffers[0] = infos->buffers[1] = 0;
\r
3063 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3064 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3065 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3066 asioCallbacks.asioMessage = &asioMessages;
\r
3067 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3068 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3069 if ( result != ASE_OK ) {
\r
3070 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3071 errorText_ = errorStream_.str();
\r
3074 buffersAllocated = true;
\r
3076 // Set flags for buffer conversion.
\r
3077 stream_.doConvertBuffer[mode] = false;
\r
3078 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3079 stream_.doConvertBuffer[mode] = true;
\r
3080 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3081 stream_.nUserChannels[mode] > 1 )
\r
3082 stream_.doConvertBuffer[mode] = true;
\r
3084 // Allocate necessary internal buffers
\r
3085 unsigned long bufferBytes;
\r
3086 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3087 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3088 if ( stream_.userBuffer[mode] == NULL ) {
\r
3089 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3093 if ( stream_.doConvertBuffer[mode] ) {
\r
3095 bool makeBuffer = true;
\r
3096 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3097 if ( mode == INPUT ) {
\r
3098 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3099 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3100 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3104 if ( makeBuffer ) {
\r
3105 bufferBytes *= *bufferSize;
\r
3106 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3107 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3108 if ( stream_.deviceBuffer == NULL ) {
\r
3109 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3115 stream_.sampleRate = sampleRate;
\r
3116 stream_.device[mode] = device;
\r
3117 stream_.state = STREAM_STOPPED;
\r
3118 asioCallbackInfo = &stream_.callbackInfo;
\r
3119 stream_.callbackInfo.object = (void *) this;
\r
3120 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3121 // We had already set up an output stream.
\r
3122 stream_.mode = DUPLEX;
\r
3124 stream_.mode = mode;
\r
3126 // Determine device latencies
\r
3127 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3128 if ( result != ASE_OK ) {
\r
3129 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3130 errorText_ = errorStream_.str();
\r
3131 error( RtAudioError::WARNING); // warn but don't fail
\r
3134 stream_.latency[0] = outputLatency;
\r
3135 stream_.latency[1] = inputLatency;
\r
3138 // Setup the buffer conversion information structure. We don't use
\r
3139 // buffers to do channel offsets, so we override that parameter
\r
3141 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3146 if ( buffersAllocated )
\r
3147 ASIODisposeBuffers();
\r
3148 drivers.removeCurrentDriver();
\r
3151 CloseHandle( handle->condition );
\r
3152 if ( handle->bufferInfos )
\r
3153 free( handle->bufferInfos );
\r
3155 stream_.apiHandle = 0;
\r
3158 for ( int i=0; i<2; i++ ) {
\r
3159 if ( stream_.userBuffer[i] ) {
\r
3160 free( stream_.userBuffer[i] );
\r
3161 stream_.userBuffer[i] = 0;
\r
3165 if ( stream_.deviceBuffer ) {
\r
3166 free( stream_.deviceBuffer );
\r
3167 stream_.deviceBuffer = 0;
\r
3173 void RtApiAsio :: closeStream()
\r
3175 if ( stream_.state == STREAM_CLOSED ) {
\r
3176 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3177 error( RtAudioError::WARNING );
\r
3181 if ( stream_.state == STREAM_RUNNING ) {
\r
3182 stream_.state = STREAM_STOPPED;
\r
3185 ASIODisposeBuffers();
\r
3186 drivers.removeCurrentDriver();
\r
3188 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3190 CloseHandle( handle->condition );
\r
3191 if ( handle->bufferInfos )
\r
3192 free( handle->bufferInfos );
\r
3194 stream_.apiHandle = 0;
\r
3197 for ( int i=0; i<2; i++ ) {
\r
3198 if ( stream_.userBuffer[i] ) {
\r
3199 free( stream_.userBuffer[i] );
\r
3200 stream_.userBuffer[i] = 0;
\r
3204 if ( stream_.deviceBuffer ) {
\r
3205 free( stream_.deviceBuffer );
\r
3206 stream_.deviceBuffer = 0;
\r
3209 stream_.mode = UNINITIALIZED;
\r
3210 stream_.state = STREAM_CLOSED;
\r
3213 bool stopThreadCalled = false;
\r
3215 void RtApiAsio :: startStream()
\r
3218 if ( stream_.state == STREAM_RUNNING ) {
\r
3219 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3220 error( RtAudioError::WARNING );
\r
3224 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3225 ASIOError result = ASIOStart();
\r
3226 if ( result != ASE_OK ) {
\r
3227 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3228 errorText_ = errorStream_.str();
\r
3232 handle->drainCounter = 0;
\r
3233 handle->internalDrain = false;
\r
3234 ResetEvent( handle->condition );
\r
3235 stream_.state = STREAM_RUNNING;
\r
3239 stopThreadCalled = false;
\r
3241 if ( result == ASE_OK ) return;
\r
3242 error( RtAudioError::SYSTEM_ERROR );
\r
3245 void RtApiAsio :: stopStream()
\r
3248 if ( stream_.state == STREAM_STOPPED ) {
\r
3249 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3250 error( RtAudioError::WARNING );
\r
3254 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3256 if ( handle->drainCounter == 0 ) {
\r
3257 handle->drainCounter = 2;
\r
3258 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3262 stream_.state = STREAM_STOPPED;
\r
3264 ASIOError result = ASIOStop();
\r
3265 if ( result != ASE_OK ) {
\r
3266 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3267 errorText_ = errorStream_.str();
\r
3270 if ( result == ASE_OK ) return;
\r
3271 error( RtAudioError::SYSTEM_ERROR );
\r
3274 void RtApiAsio :: abortStream()
\r
3277 if ( stream_.state == STREAM_STOPPED ) {
\r
3278 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3279 error( RtAudioError::WARNING );
\r
3283 // The following lines were commented-out because some behavior was
\r
3284 // noted where the device buffers need to be zeroed to avoid
\r
3285 // continuing sound, even when the device buffers are completely
\r
3286 // disposed. So now, calling abort is the same as calling stop.
\r
3287 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3288 // handle->drainCounter = 2;
\r
3292 // This function will be called by a spawned thread when the user
\r
3293 // callback function signals that the stream should be stopped or
\r
3294 // aborted. It is necessary to handle it this way because the
\r
3295 // callbackEvent() function must return before the ASIOStop()
\r
3296 // function will return.
\r
3297 static unsigned __stdcall asioStopStream( void *ptr )
\r
3299 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3300 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3302 object->stopStream();
\r
3303 _endthreadex( 0 );
\r
3307 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3309 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3310 if ( stream_.state == STREAM_CLOSED ) {
\r
3311 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3312 error( RtAudioError::WARNING );
\r
3316 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3317 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3319 // Check if we were draining the stream and signal if finished.
\r
3320 if ( handle->drainCounter > 3 ) {
\r
3322 stream_.state = STREAM_STOPPING;
\r
3323 if ( handle->internalDrain == false )
\r
3324 SetEvent( handle->condition );
\r
3325 else { // spawn a thread to stop the stream
\r
3326 unsigned threadId;
\r
3327 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3328 &stream_.callbackInfo, 0, &threadId );
\r
3333 // Invoke user callback to get fresh output data UNLESS we are
\r
3334 // draining stream.
\r
3335 if ( handle->drainCounter == 0 ) {
\r
3336 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3337 double streamTime = getStreamTime();
\r
3338 RtAudioStreamStatus status = 0;
\r
3339 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3340 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3343 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3344 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3347 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3348 stream_.bufferSize, streamTime, status, info->userData );
\r
3349 if ( cbReturnValue == 2 ) {
\r
3350 stream_.state = STREAM_STOPPING;
\r
3351 handle->drainCounter = 2;
\r
3352 unsigned threadId;
\r
3353 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3354 &stream_.callbackInfo, 0, &threadId );
\r
3357 else if ( cbReturnValue == 1 ) {
\r
3358 handle->drainCounter = 1;
\r
3359 handle->internalDrain = true;
\r
3363 unsigned int nChannels, bufferBytes, i, j;
\r
3364 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3365 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3367 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3369 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3373 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3377 else if ( stream_.doConvertBuffer[0] ) {
\r
3379 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3380 if ( stream_.doByteSwap[0] )
\r
3381 byteSwapBuffer( stream_.deviceBuffer,
\r
3382 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3383 stream_.deviceFormat[0] );
\r
3385 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3386 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3387 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3388 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3394 if ( stream_.doByteSwap[0] )
\r
3395 byteSwapBuffer( stream_.userBuffer[0],
\r
3396 stream_.bufferSize * stream_.nUserChannels[0],
\r
3397 stream_.userFormat );
\r
3399 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3400 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3401 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3402 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3407 if ( handle->drainCounter ) {
\r
3408 handle->drainCounter++;
\r
3413 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3415 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3417 if (stream_.doConvertBuffer[1]) {
\r
3419 // Always interleave ASIO input data.
\r
3420 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3421 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3422 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3423 handle->bufferInfos[i].buffers[bufferIndex],
\r
3427 if ( stream_.doByteSwap[1] )
\r
3428 byteSwapBuffer( stream_.deviceBuffer,
\r
3429 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3430 stream_.deviceFormat[1] );
\r
3431 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3435 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3436 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3437 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3438 handle->bufferInfos[i].buffers[bufferIndex],
\r
3443 if ( stream_.doByteSwap[1] )
\r
3444 byteSwapBuffer( stream_.userBuffer[1],
\r
3445 stream_.bufferSize * stream_.nUserChannels[1],
\r
3446 stream_.userFormat );
\r
3451 // The following call was suggested by Malte Clasen. While the API
\r
3452 // documentation indicates it should not be required, some device
\r
3453 // drivers apparently do not function correctly without it.
\r
3454 ASIOOutputReady();
\r
3456 RtApi::tickStreamTime();
\r
3460 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3462 // The ASIO documentation says that this usually only happens during
\r
3463 // external sync. Audio processing is not stopped by the driver,
\r
3464 // actual sample rate might not have even changed, maybe only the
\r
3465 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3468 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3470 object->stopStream();
\r
3472 catch ( RtAudioError &exception ) {
\r
3473 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3477 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3480 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3484 switch( selector ) {
\r
3485 case kAsioSelectorSupported:
\r
3486 if ( value == kAsioResetRequest
\r
3487 || value == kAsioEngineVersion
\r
3488 || value == kAsioResyncRequest
\r
3489 || value == kAsioLatenciesChanged
\r
3490 // The following three were added for ASIO 2.0, you don't
\r
3491 // necessarily have to support them.
\r
3492 || value == kAsioSupportsTimeInfo
\r
3493 || value == kAsioSupportsTimeCode
\r
3494 || value == kAsioSupportsInputMonitor)
\r
3497 case kAsioResetRequest:
\r
3498 // Defer the task and perform the reset of the driver during the
\r
3499 // next "safe" situation. You cannot reset the driver right now,
\r
3500 // as this code is called from the driver. Reset the driver is
\r
3501 // done by completely destruct is. I.e. ASIOStop(),
\r
3502 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3504 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3507 case kAsioResyncRequest:
\r
3508 // This informs the application that the driver encountered some
\r
3509 // non-fatal data loss. It is used for synchronization purposes
\r
3510 // of different media. Added mainly to work around the Win16Mutex
\r
3511 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3512 // which could lose data because the Mutex was held too long by
\r
3513 // another thread. However a driver can issue it in other
\r
3514 // situations, too.
\r
3515 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3519 case kAsioLatenciesChanged:
\r
3520 // This will inform the host application that the drivers were
\r
3521 // latencies changed. Beware, it this does not mean that the
\r
3522 // buffer sizes have changed! You might need to update internal
\r
3524 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3527 case kAsioEngineVersion:
\r
3528 // Return the supported ASIO version of the host application. If
\r
3529 // a host application does not implement this selector, ASIO 1.0
\r
3530 // is assumed by the driver.
\r
3533 case kAsioSupportsTimeInfo:
\r
3534 // Informs the driver whether the
\r
3535 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3536 // For compatibility with ASIO 1.0 drivers the host application
\r
3537 // should always support the "old" bufferSwitch method, too.
\r
3540 case kAsioSupportsTimeCode:
\r
3541 // Informs the driver whether application is interested in time
\r
3542 // code info. If an application does not need to know about time
\r
3543 // code, the driver has less work to do.
\r
3550 static const char* getAsioErrorString( ASIOError result )
\r
3555 const char*message;
\r
3558 static const Messages m[] =
\r
3560 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3561 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3562 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3563 { ASE_InvalidMode, "Invalid mode." },
\r
3564 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3565 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3566 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3569 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3570 if ( m[i].value == result ) return m[i].message;
\r
3572 return "Unknown error.";
\r
3575 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3579 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3581 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3582 // - Introduces support for the Windows WASAPI API
\r
3583 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3584 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3585 // - Includes automatic internal conversion of sample rate, buffer size and channel count
\r
3590 #include <audioclient.h>
\r
3592 #include <mmdeviceapi.h>
\r
3593 #include <functiondiscoverykeys_devpkey.h>
\r
3595 //=============================================================================
\r
3597 #define SAFE_RELEASE( objectPtr )\
\r
3600 objectPtr->Release();\
\r
3601 objectPtr = NULL;\
\r
3604 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3606 //-----------------------------------------------------------------------------
\r
3608 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3609 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3610 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3611 // provide intermediate storage for read / write synchronization.
\r
3612 class WasapiBuffer
\r
3616 : buffer_( NULL ),
\r
3625 // sets the length of the internal ring buffer
\r
3626 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3629 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3631 bufferSize_ = bufferSize;
\r
3636 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3637 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3639 if ( !buffer || // incoming buffer is NULL
\r
3640 bufferSize == 0 || // incoming buffer has no data
\r
3641 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3646 unsigned int relOutIndex = outIndex_;
\r
3647 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3648 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3649 relOutIndex += bufferSize_;
\r
3652 // "in" index can end on the "out" index but cannot begin at it
\r
3653 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3654 return false; // not enough space between "in" index and "out" index
\r
3657 // copy buffer from external to internal
\r
3658 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3659 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3660 int fromInSize = bufferSize - fromZeroSize;
\r
3664 case RTAUDIO_SINT8:
\r
3665 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3666 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3668 case RTAUDIO_SINT16:
\r
3669 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3670 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3672 case RTAUDIO_SINT24:
\r
3673 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3674 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3676 case RTAUDIO_SINT32:
\r
3677 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3678 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3680 case RTAUDIO_FLOAT32:
\r
3681 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3682 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3684 case RTAUDIO_FLOAT64:
\r
3685 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3686 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3690 // update "in" index
\r
3691 inIndex_ += bufferSize;
\r
3692 inIndex_ %= bufferSize_;
\r
3697 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3698 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3700 if ( !buffer || // incoming buffer is NULL
\r
3701 bufferSize == 0 || // incoming buffer has no data
\r
3702 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3707 unsigned int relInIndex = inIndex_;
\r
3708 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3709 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3710 relInIndex += bufferSize_;
\r
3713 // "out" index can begin at and end on the "in" index
\r
3714 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3715 return false; // not enough space between "out" index and "in" index
\r
3718 // copy buffer from internal to external
\r
3719 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3720 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3721 int fromOutSize = bufferSize - fromZeroSize;
\r
3725 case RTAUDIO_SINT8:
\r
3726 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3727 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3729 case RTAUDIO_SINT16:
\r
3730 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3731 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3733 case RTAUDIO_SINT24:
\r
3734 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3735 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3737 case RTAUDIO_SINT32:
\r
3738 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3739 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3741 case RTAUDIO_FLOAT32:
\r
3742 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3743 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3745 case RTAUDIO_FLOAT64:
\r
3746 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3747 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3751 // update "out" index
\r
3752 outIndex_ += bufferSize;
\r
3753 outIndex_ %= bufferSize_;
\r
3760 unsigned int bufferSize_;
\r
3761 unsigned int inIndex_;
\r
3762 unsigned int outIndex_;
\r
3765 //-----------------------------------------------------------------------------
\r
3767 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3768 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3769 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3770 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3771 // one rate and its multiple.
\r
3772 void convertBufferWasapi( char* outBuffer,
\r
3773 const char* inBuffer,
\r
3774 const unsigned int& inChannelCount,
\r
3775 const unsigned int& outChannelCount,
\r
3776 const unsigned int& inSampleRate,
\r
3777 const unsigned int& outSampleRate,
\r
3778 const unsigned int& inSampleCount,
\r
3779 unsigned int& outSampleCount,
\r
3780 const RtAudioFormat& format )
\r
3782 // calculate the new outSampleCount and relative sampleStep
\r
3783 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3784 float sampleStep = 1.0f / sampleRatio;
\r
3785 float inSampleFraction = 0.0f;
\r
3786 unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );
\r
3788 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3790 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3791 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3793 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3797 case RTAUDIO_SINT8:
\r
3798 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3800 case RTAUDIO_SINT16:
\r
3801 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3803 case RTAUDIO_SINT24:
\r
3804 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3806 case RTAUDIO_SINT32:
\r
3807 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3809 case RTAUDIO_FLOAT32:
\r
3810 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3812 case RTAUDIO_FLOAT64:
\r
3813 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3817 // jump to next in sample
\r
3818 inSampleFraction += sampleStep;
\r
3822 //-----------------------------------------------------------------------------
\r
3824 // A structure to hold various information related to the WASAPI implementation.
\r
3825 struct WasapiHandle
\r
3827 IAudioClient* captureAudioClient;
\r
3828 IAudioClient* renderAudioClient;
\r
3829 IAudioCaptureClient* captureClient;
\r
3830 IAudioRenderClient* renderClient;
\r
3831 HANDLE captureEvent;
\r
3832 HANDLE renderEvent;
\r
3835 : captureAudioClient( NULL ),
\r
3836 renderAudioClient( NULL ),
\r
3837 captureClient( NULL ),
\r
3838 renderClient( NULL ),
\r
3839 captureEvent( NULL ),
\r
3840 renderEvent( NULL ) {}
\r
3843 //=============================================================================
\r
3845 RtApiWasapi::RtApiWasapi()
\r
3846 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3848 // WASAPI can run either apartment or multi-threaded
\r
3849 HRESULT hr = CoInitialize( NULL );
\r
3851 if ( !FAILED( hr ) )
\r
3852 coInitialized_ = true;
\r
3854 // Instantiate device enumerator
\r
3855 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3856 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3857 ( void** ) &deviceEnumerator_ );
\r
3859 if ( FAILED( hr ) ) {
\r
3860 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3861 error( RtAudioError::DRIVER_ERROR );
\r
3865 //-----------------------------------------------------------------------------
\r
3867 RtApiWasapi::~RtApiWasapi()
\r
3869 // if this object previously called CoInitialize()
\r
3870 if ( coInitialized_ ) {
\r
3874 if ( stream_.state != STREAM_CLOSED ) {
\r
3878 SAFE_RELEASE( deviceEnumerator_ );
\r
3881 //=============================================================================
\r
3883 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3885 unsigned int captureDeviceCount = 0;
\r
3886 unsigned int renderDeviceCount = 0;
\r
3888 IMMDeviceCollection* captureDevices = NULL;
\r
3889 IMMDeviceCollection* renderDevices = NULL;
\r
3891 // Count capture devices
\r
3892 errorText_.clear();
\r
3893 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3894 if ( FAILED( hr ) ) {
\r
3895 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3899 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3900 if ( FAILED( hr ) ) {
\r
3901 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3905 // Count render devices
\r
3906 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3907 if ( FAILED( hr ) ) {
\r
3908 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3912 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3913 if ( FAILED( hr ) ) {
\r
3914 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3919 // release all references
\r
3920 SAFE_RELEASE( captureDevices );
\r
3921 SAFE_RELEASE( renderDevices );
\r
3923 if ( errorText_.empty() )
\r
3924 return captureDeviceCount + renderDeviceCount;
\r
3926 error( RtAudioError::DRIVER_ERROR );
\r
3930 //-----------------------------------------------------------------------------
\r
3932 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3934 RtAudio::DeviceInfo info;
\r
3935 unsigned int captureDeviceCount = 0;
\r
3936 unsigned int renderDeviceCount = 0;
\r
3937 std::wstring deviceName;
\r
3938 std::string defaultDeviceName;
\r
3939 bool isCaptureDevice = false;
\r
3941 PROPVARIANT deviceNameProp;
\r
3942 PROPVARIANT defaultDeviceNameProp;
\r
3944 IMMDeviceCollection* captureDevices = NULL;
\r
3945 IMMDeviceCollection* renderDevices = NULL;
\r
3946 IMMDevice* devicePtr = NULL;
\r
3947 IMMDevice* defaultDevicePtr = NULL;
\r
3948 IAudioClient* audioClient = NULL;
\r
3949 IPropertyStore* devicePropStore = NULL;
\r
3950 IPropertyStore* defaultDevicePropStore = NULL;
\r
3952 WAVEFORMATEX* deviceFormat = NULL;
\r
3953 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3956 info.probed = false;
\r
3958 // Count capture devices
\r
3959 errorText_.clear();
\r
3960 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3961 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3962 if ( FAILED( hr ) ) {
\r
3963 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3967 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3968 if ( FAILED( hr ) ) {
\r
3969 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3973 // Count render devices
\r
3974 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3975 if ( FAILED( hr ) ) {
\r
3976 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3980 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3981 if ( FAILED( hr ) ) {
\r
3982 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3986 // validate device index
\r
3987 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3988 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3989 errorType = RtAudioError::INVALID_USE;
\r
3993 // determine whether index falls within capture or render devices
\r
3994 if ( device >= renderDeviceCount ) {
\r
3995 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3996 if ( FAILED( hr ) ) {
\r
3997 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4000 isCaptureDevice = true;
\r
4003 hr = renderDevices->Item( device, &devicePtr );
\r
4004 if ( FAILED( hr ) ) {
\r
4005 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4008 isCaptureDevice = false;
\r
4011 // get default device name
\r
4012 if ( isCaptureDevice ) {
\r
4013 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4014 if ( FAILED( hr ) ) {
\r
4015 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4020 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4021 if ( FAILED( hr ) ) {
\r
4022 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4027 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4028 if ( FAILED( hr ) ) {
\r
4029 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4032 PropVariantInit( &defaultDeviceNameProp );
\r
4034 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4035 if ( FAILED( hr ) ) {
\r
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4040 deviceName = defaultDeviceNameProp.pwszVal;
\r
4041 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4044 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4045 if ( FAILED( hr ) ) {
\r
4046 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4050 PropVariantInit( &deviceNameProp );
\r
4052 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4053 if ( FAILED( hr ) ) {
\r
4054 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4058 deviceName = deviceNameProp.pwszVal;
\r
4059 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4062 if ( isCaptureDevice ) {
\r
4063 info.isDefaultInput = info.name == defaultDeviceName;
\r
4064 info.isDefaultOutput = false;
\r
4067 info.isDefaultInput = false;
\r
4068 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4072 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4073 if ( FAILED( hr ) ) {
\r
4074 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4078 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4079 if ( FAILED( hr ) ) {
\r
4080 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4084 if ( isCaptureDevice ) {
\r
4085 info.inputChannels = deviceFormat->nChannels;
\r
4086 info.outputChannels = 0;
\r
4087 info.duplexChannels = 0;
\r
4090 info.inputChannels = 0;
\r
4091 info.outputChannels = deviceFormat->nChannels;
\r
4092 info.duplexChannels = 0;
\r
4096 info.sampleRates.clear();
\r
4098 // allow support for all sample rates as we have a built-in sample rate converter
\r
4099 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4100 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4104 info.nativeFormats = 0;
\r
4106 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4107 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4108 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4110 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4111 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4113 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4114 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4117 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4118 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4119 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4121 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4122 info.nativeFormats |= RTAUDIO_SINT8;
\r
4124 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4125 info.nativeFormats |= RTAUDIO_SINT16;
\r
4127 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4128 info.nativeFormats |= RTAUDIO_SINT24;
\r
4130 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4131 info.nativeFormats |= RTAUDIO_SINT32;
\r
4136 info.probed = true;
\r
4139 // release all references
\r
4140 PropVariantClear( &deviceNameProp );
\r
4141 PropVariantClear( &defaultDeviceNameProp );
\r
4143 SAFE_RELEASE( captureDevices );
\r
4144 SAFE_RELEASE( renderDevices );
\r
4145 SAFE_RELEASE( devicePtr );
\r
4146 SAFE_RELEASE( defaultDevicePtr );
\r
4147 SAFE_RELEASE( audioClient );
\r
4148 SAFE_RELEASE( devicePropStore );
\r
4149 SAFE_RELEASE( defaultDevicePropStore );
\r
4151 CoTaskMemFree( deviceFormat );
\r
4152 CoTaskMemFree( closestMatchFormat );
\r
4154 if ( !errorText_.empty() )
\r
4155 error( errorType );
\r
4159 //-----------------------------------------------------------------------------
\r
4161 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4163 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4164 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4172 //-----------------------------------------------------------------------------
\r
4174 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4176 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4177 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4185 //-----------------------------------------------------------------------------
\r
4187 void RtApiWasapi::closeStream( void )
\r
4189 if ( stream_.state == STREAM_CLOSED ) {
\r
4190 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4191 error( RtAudioError::WARNING );
\r
4195 if ( stream_.state != STREAM_STOPPED )
\r
4198 // clean up stream memory
\r
4199 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4200 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4202 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4203 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4205 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4206 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4208 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4209 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4211 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4212 stream_.apiHandle = NULL;
\r
4214 for ( int i = 0; i < 2; i++ ) {
\r
4215 if ( stream_.userBuffer[i] ) {
\r
4216 free( stream_.userBuffer[i] );
\r
4217 stream_.userBuffer[i] = 0;
\r
4221 if ( stream_.deviceBuffer ) {
\r
4222 free( stream_.deviceBuffer );
\r
4223 stream_.deviceBuffer = 0;
\r
4226 // update stream state
\r
4227 stream_.state = STREAM_CLOSED;
\r
4230 //-----------------------------------------------------------------------------
\r
4232 void RtApiWasapi::startStream( void )
\r
4236 if ( stream_.state == STREAM_RUNNING ) {
\r
4237 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4238 error( RtAudioError::WARNING );
\r
4242 // update stream state
\r
4243 stream_.state = STREAM_RUNNING;
\r
4245 // create WASAPI stream thread
\r
4246 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4248 if ( !stream_.callbackInfo.thread ) {
\r
4249 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4250 error( RtAudioError::THREAD_ERROR );
\r
4253 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4254 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4258 //-----------------------------------------------------------------------------
\r
4260 void RtApiWasapi::stopStream( void )
\r
4264 if ( stream_.state == STREAM_STOPPED ) {
\r
4265 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4266 error( RtAudioError::WARNING );
\r
4270 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4271 stream_.state = STREAM_STOPPING;
\r
4273 // wait until stream thread is stopped
\r
4274 while( stream_.state != STREAM_STOPPED ) {
\r
4278 // Wait for the last buffer to play before stopping.
\r
4279 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4281 // stop capture client if applicable
\r
4282 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4283 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4284 if ( FAILED( hr ) ) {
\r
4285 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4286 error( RtAudioError::DRIVER_ERROR );
\r
4291 // stop render client if applicable
\r
4292 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4293 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4294 if ( FAILED( hr ) ) {
\r
4295 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4296 error( RtAudioError::DRIVER_ERROR );
\r
4301 // close thread handle
\r
4302 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4303 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4304 error( RtAudioError::THREAD_ERROR );
\r
4308 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4311 //-----------------------------------------------------------------------------
\r
4313 void RtApiWasapi::abortStream( void )
\r
4317 if ( stream_.state == STREAM_STOPPED ) {
\r
4318 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4319 error( RtAudioError::WARNING );
\r
4323 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4324 stream_.state = STREAM_STOPPING;
\r
4326 // wait until stream thread is stopped
\r
4327 while ( stream_.state != STREAM_STOPPED ) {
\r
4331 // stop capture client if applicable
\r
4332 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4333 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4334 if ( FAILED( hr ) ) {
\r
4335 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4336 error( RtAudioError::DRIVER_ERROR );
\r
4341 // stop render client if applicable
\r
4342 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4343 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4344 if ( FAILED( hr ) ) {
\r
4345 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4346 error( RtAudioError::DRIVER_ERROR );
\r
4351 // close thread handle
\r
4352 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4353 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4354 error( RtAudioError::THREAD_ERROR );
\r
4358 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4361 //-----------------------------------------------------------------------------
\r
4363 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4364 unsigned int firstChannel, unsigned int sampleRate,
\r
4365 RtAudioFormat format, unsigned int* bufferSize,
\r
4366 RtAudio::StreamOptions* options )
\r
4368 bool methodResult = FAILURE;
\r
4369 unsigned int captureDeviceCount = 0;
\r
4370 unsigned int renderDeviceCount = 0;
\r
4372 IMMDeviceCollection* captureDevices = NULL;
\r
4373 IMMDeviceCollection* renderDevices = NULL;
\r
4374 IMMDevice* devicePtr = NULL;
\r
4375 WAVEFORMATEX* deviceFormat = NULL;
\r
4376 unsigned int bufferBytes;
\r
4377 stream_.state = STREAM_STOPPED;
\r
4379 // create API Handle if not already created
\r
4380 if ( !stream_.apiHandle )
\r
4381 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4383 // Count capture devices
\r
4384 errorText_.clear();
\r
4385 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4386 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4387 if ( FAILED( hr ) ) {
\r
4388 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4392 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4393 if ( FAILED( hr ) ) {
\r
4394 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4398 // Count render devices
\r
4399 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4400 if ( FAILED( hr ) ) {
\r
4401 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4405 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4406 if ( FAILED( hr ) ) {
\r
4407 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4411 // validate device index
\r
4412 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4413 errorType = RtAudioError::INVALID_USE;
\r
4414 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4418 // determine whether index falls within capture or render devices
\r
4419 if ( device >= renderDeviceCount ) {
\r
4420 if ( mode != INPUT ) {
\r
4421 errorType = RtAudioError::INVALID_USE;
\r
4422 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4426 // retrieve captureAudioClient from devicePtr
\r
4427 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4429 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4430 if ( FAILED( hr ) ) {
\r
4431 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4435 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4436 NULL, ( void** ) &captureAudioClient );
\r
4437 if ( FAILED( hr ) ) {
\r
4438 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4442 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4443 if ( FAILED( hr ) ) {
\r
4444 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4448 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4449 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4452 if ( mode != OUTPUT ) {
\r
4453 errorType = RtAudioError::INVALID_USE;
\r
4454 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4458 // retrieve renderAudioClient from devicePtr
\r
4459 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4461 hr = renderDevices->Item( device, &devicePtr );
\r
4462 if ( FAILED( hr ) ) {
\r
4463 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4467 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4468 NULL, ( void** ) &renderAudioClient );
\r
4469 if ( FAILED( hr ) ) {
\r
4470 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4474 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4475 if ( FAILED( hr ) ) {
\r
4476 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4480 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4481 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4484 // fill stream data
\r
4485 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4486 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4487 stream_.mode = DUPLEX;
\r
4490 stream_.mode = mode;
\r
4493 stream_.device[mode] = device;
\r
4494 stream_.doByteSwap[mode] = false;
\r
4495 stream_.sampleRate = sampleRate;
\r
4496 stream_.bufferSize = *bufferSize;
\r
4497 stream_.nBuffers = 1;
\r
4498 stream_.nUserChannels[mode] = channels;
\r
4499 stream_.channelOffset[mode] = firstChannel;
\r
4500 stream_.userFormat = format;
\r
4501 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4503 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4504 stream_.userInterleaved = false;
\r
4506 stream_.userInterleaved = true;
\r
4507 stream_.deviceInterleaved[mode] = true;
\r
4509 // Set flags for buffer conversion.
\r
4510 stream_.doConvertBuffer[mode] = false;
\r
4511 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4512 stream_.doConvertBuffer[mode] = true;
\r
4513 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4514 stream_.nUserChannels[mode] > 1 )
\r
4515 stream_.doConvertBuffer[mode] = true;
\r
4517 if ( stream_.doConvertBuffer[mode] )
\r
4518 setConvertInfo( mode, 0 );
\r
4520 // Allocate necessary internal buffers
\r
4521 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4523 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4524 if ( !stream_.userBuffer[mode] ) {
\r
4525 errorType = RtAudioError::MEMORY_ERROR;
\r
4526 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4530 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4531 stream_.callbackInfo.priority = 15;
\r
4533 stream_.callbackInfo.priority = 0;
\r
4535 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4536 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4538 methodResult = SUCCESS;
\r
4542 SAFE_RELEASE( captureDevices );
\r
4543 SAFE_RELEASE( renderDevices );
\r
4544 SAFE_RELEASE( devicePtr );
\r
4545 CoTaskMemFree( deviceFormat );
\r
4547 // if method failed, close the stream
\r
4548 if ( methodResult == FAILURE )
\r
4551 if ( !errorText_.empty() )
\r
4552 error( errorType );
\r
4553 return methodResult;
\r
4556 //=============================================================================
\r
4558 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4561 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4566 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4569 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4574 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4577 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4582 //-----------------------------------------------------------------------------
\r
4584 void RtApiWasapi::wasapiThread()
\r
4586 // as this is a new thread, we must CoInitialize it
\r
4587 CoInitialize( NULL );
\r
4591 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4592 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4593 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4594 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4595 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4596 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4598 WAVEFORMATEX* captureFormat = NULL;
\r
4599 WAVEFORMATEX* renderFormat = NULL;
\r
4600 float captureSrRatio = 0.0f;
\r
4601 float renderSrRatio = 0.0f;
\r
4602 WasapiBuffer captureBuffer;
\r
4603 WasapiBuffer renderBuffer;
\r
4605 // declare local stream variables
\r
4606 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4607 BYTE* streamBuffer = NULL;
\r
4608 unsigned long captureFlags = 0;
\r
4609 unsigned int bufferFrameCount = 0;
\r
4610 unsigned int numFramesPadding = 0;
\r
4611 unsigned int convBufferSize = 0;
\r
4612 bool callbackPushed = false;
\r
4613 bool callbackPulled = false;
\r
4614 bool callbackStopped = false;
\r
4615 int callbackResult = 0;
\r
4617 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4618 char* convBuffer = NULL;
\r
4619 unsigned int convBuffSize = 0;
\r
4620 unsigned int deviceBuffSize = 0;
\r
4622 errorText_.clear();
\r
4623 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4625 // Attempt to assign "Pro Audio" characteristic to thread
\r
4626 HMODULE AvrtDll = LoadLibrary( "AVRT.dll" );
\r
4628 DWORD taskIndex = 0;
\r
4629 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4630 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4631 FreeLibrary( AvrtDll );
\r
4634 // start capture stream if applicable
\r
4635 if ( captureAudioClient ) {
\r
4636 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4637 if ( FAILED( hr ) ) {
\r
4638 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4642 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4644 // initialize capture stream according to desire buffer size
\r
4645 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4646 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4648 if ( !captureClient ) {
\r
4649 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4650 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4651 desiredBufferPeriod,
\r
4652 desiredBufferPeriod,
\r
4655 if ( FAILED( hr ) ) {
\r
4656 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4660 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4661 ( void** ) &captureClient );
\r
4662 if ( FAILED( hr ) ) {
\r
4663 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4667 // configure captureEvent to trigger on every available capture buffer
\r
4668 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4669 if ( !captureEvent ) {
\r
4670 errorType = RtAudioError::SYSTEM_ERROR;
\r
4671 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4675 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4676 if ( FAILED( hr ) ) {
\r
4677 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4681 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4682 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4685 unsigned int inBufferSize = 0;
\r
4686 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4687 if ( FAILED( hr ) ) {
\r
4688 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4692 // scale outBufferSize according to stream->user sample rate ratio
\r
4693 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4694 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4696 // set captureBuffer size
\r
4697 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4699 // reset the capture stream
\r
4700 hr = captureAudioClient->Reset();
\r
4701 if ( FAILED( hr ) ) {
\r
4702 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4706 // start the capture stream
\r
4707 hr = captureAudioClient->Start();
\r
4708 if ( FAILED( hr ) ) {
\r
4709 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4714 // start render stream if applicable
\r
4715 if ( renderAudioClient ) {
\r
4716 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4717 if ( FAILED( hr ) ) {
\r
4718 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4722 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4724 // initialize render stream according to desire buffer size
\r
4725 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4726 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4728 if ( !renderClient ) {
\r
4729 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4730 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4731 desiredBufferPeriod,
\r
4732 desiredBufferPeriod,
\r
4735 if ( FAILED( hr ) ) {
\r
4736 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4740 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4741 ( void** ) &renderClient );
\r
4742 if ( FAILED( hr ) ) {
\r
4743 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4747 // configure renderEvent to trigger on every available render buffer
\r
4748 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4749 if ( !renderEvent ) {
\r
4750 errorType = RtAudioError::SYSTEM_ERROR;
\r
4751 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4755 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4756 if ( FAILED( hr ) ) {
\r
4757 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4761 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4762 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4765 unsigned int outBufferSize = 0;
\r
4766 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4767 if ( FAILED( hr ) ) {
\r
4768 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4772 // scale inBufferSize according to user->stream sample rate ratio
\r
4773 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4774 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4776 // set renderBuffer size
\r
4777 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4779 // reset the render stream
\r
4780 hr = renderAudioClient->Reset();
\r
4781 if ( FAILED( hr ) ) {
\r
4782 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4786 // start the render stream
\r
4787 hr = renderAudioClient->Start();
\r
4788 if ( FAILED( hr ) ) {
\r
4789 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4794 if ( stream_.mode == INPUT ) {
\r
4795 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4796 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4798 else if ( stream_.mode == OUTPUT ) {
\r
4799 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4800 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4802 else if ( stream_.mode == DUPLEX ) {
\r
4803 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4804 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4805 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4806 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4809 convBuffer = ( char* ) malloc( convBuffSize );
\r
4810 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4811 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4812 errorType = RtAudioError::MEMORY_ERROR;
\r
4813 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4817 // stream process loop
\r
4818 while ( stream_.state != STREAM_STOPPING ) {
\r
4819 if ( !callbackPulled ) {
\r
4822 // 1. Pull callback buffer from inputBuffer
\r
4823 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4824 // Convert callback buffer to user format
\r
4826 if ( captureAudioClient ) {
\r
4827 // Pull callback buffer from inputBuffer
\r
4828 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4829 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4830 stream_.deviceFormat[INPUT] );
\r
4832 if ( callbackPulled ) {
\r
4833 // Convert callback buffer to user sample rate and channel count
\r
4834 convertBufferWasapi( stream_.deviceBuffer,
\r
4836 stream_.nDeviceChannels[INPUT],
\r
4837 stream_.nUserChannels[INPUT],
\r
4838 captureFormat->nSamplesPerSec,
\r
4839 stream_.sampleRate,
\r
4840 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4842 stream_.deviceFormat[INPUT] );
\r
4844 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4845 // Convert callback buffer to user format
\r
4846 convertBuffer( stream_.userBuffer[INPUT],
\r
4847 stream_.deviceBuffer,
\r
4848 stream_.convertInfo[INPUT] );
\r
4851 // no conversion, simple copy deviceBuffer to userBuffer
\r
4852 memcpy( stream_.userBuffer[INPUT],
\r
4853 stream_.deviceBuffer,
\r
4854 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4859 // if there is no capture stream, set callbackPulled flag
\r
4860 callbackPulled = true;
\r
4863 // Execute Callback
\r
4864 // ================
\r
4865 // 1. Execute user callback method
\r
4866 // 2. Handle return value from callback
\r
4868 // if callback has not requested the stream to stop
\r
4869 if ( callbackPulled && !callbackStopped ) {
\r
4870 // Execute user callback method
\r
4871 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4872 stream_.userBuffer[INPUT],
\r
4873 stream_.bufferSize,
\r
4875 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4876 stream_.callbackInfo.userData );
\r
4878 // Handle return value from callback
\r
4879 if ( callbackResult == 1 ) {
\r
4880 // instantiate a thread to stop this thread
\r
4881 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4882 if ( !threadHandle ) {
\r
4883 errorType = RtAudioError::THREAD_ERROR;
\r
4884 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4887 else if ( !CloseHandle( threadHandle ) ) {
\r
4888 errorType = RtAudioError::THREAD_ERROR;
\r
4889 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4893 callbackStopped = true;
\r
4895 else if ( callbackResult == 2 ) {
\r
4896 // instantiate a thread to stop this thread
\r
4897 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4898 if ( !threadHandle ) {
\r
4899 errorType = RtAudioError::THREAD_ERROR;
\r
4900 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4903 else if ( !CloseHandle( threadHandle ) ) {
\r
4904 errorType = RtAudioError::THREAD_ERROR;
\r
4905 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4909 callbackStopped = true;
\r
4914 // Callback Output
\r
4915 // ===============
\r
4916 // 1. Convert callback buffer to stream format
\r
4917 // 2. Convert callback buffer to stream sample rate and channel count
\r
4918 // 3. Push callback buffer into outputBuffer
\r
4920 if ( renderAudioClient && callbackPulled ) {
\r
4921 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4922 // Convert callback buffer to stream format
\r
4923 convertBuffer( stream_.deviceBuffer,
\r
4924 stream_.userBuffer[OUTPUT],
\r
4925 stream_.convertInfo[OUTPUT] );
\r
4927 // Convert callback buffer to stream sample rate and channel count
\r
4928 convertBufferWasapi( convBuffer,
\r
4929 stream_.deviceBuffer,
\r
4930 stream_.nUserChannels[OUTPUT],
\r
4931 stream_.nDeviceChannels[OUTPUT],
\r
4932 stream_.sampleRate,
\r
4933 renderFormat->nSamplesPerSec,
\r
4934 stream_.bufferSize,
\r
4936 stream_.deviceFormat[OUTPUT] );
\r
4939 // Convert callback buffer to stream sample rate and channel count
\r
4940 convertBufferWasapi( convBuffer,
\r
4941 stream_.userBuffer[OUTPUT],
\r
4942 stream_.nUserChannels[OUTPUT],
\r
4943 stream_.nDeviceChannels[OUTPUT],
\r
4944 stream_.sampleRate,
\r
4945 renderFormat->nSamplesPerSec,
\r
4946 stream_.bufferSize,
\r
4948 stream_.deviceFormat[OUTPUT] );
\r
4951 // Push callback buffer into outputBuffer
\r
4952 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4953 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4954 stream_.deviceFormat[OUTPUT] );
\r
4959 // 1. Get capture buffer from stream
\r
4960 // 2. Push capture buffer into inputBuffer
\r
4961 // 3. If 2. was successful: Release capture buffer
\r
4963 if ( captureAudioClient ) {
\r
4964 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4965 if ( !callbackPulled ) {
\r
4966 WaitForSingleObject( captureEvent, INFINITE );
\r
4969 // Get capture buffer from stream
\r
4970 hr = captureClient->GetBuffer( &streamBuffer,
\r
4971 &bufferFrameCount,
\r
4972 &captureFlags, NULL, NULL );
\r
4973 if ( FAILED( hr ) ) {
\r
4974 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4978 if ( bufferFrameCount != 0 ) {
\r
4979 // Push capture buffer into inputBuffer
\r
4980 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4981 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4982 stream_.deviceFormat[INPUT] ) )
\r
4984 // Release capture buffer
\r
4985 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4986 if ( FAILED( hr ) ) {
\r
4987 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4993 // Inform WASAPI that capture was unsuccessful
\r
4994 hr = captureClient->ReleaseBuffer( 0 );
\r
4995 if ( FAILED( hr ) ) {
\r
4996 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5003 // Inform WASAPI that capture was unsuccessful
\r
5004 hr = captureClient->ReleaseBuffer( 0 );
\r
5005 if ( FAILED( hr ) ) {
\r
5006 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5014 // 1. Get render buffer from stream
\r
5015 // 2. Pull next buffer from outputBuffer
\r
5016 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5017 // Release render buffer
\r
5019 if ( renderAudioClient ) {
\r
5020 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5021 if ( callbackPulled && !callbackPushed ) {
\r
5022 WaitForSingleObject( renderEvent, INFINITE );
\r
5025 // Get render buffer from stream
\r
5026 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5027 if ( FAILED( hr ) ) {
\r
5028 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5032 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5033 if ( FAILED( hr ) ) {
\r
5034 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5038 bufferFrameCount -= numFramesPadding;
\r
5040 if ( bufferFrameCount != 0 ) {
\r
5041 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5042 if ( FAILED( hr ) ) {
\r
5043 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5047 // Pull next buffer from outputBuffer
\r
5048 // Fill render buffer with next buffer
\r
5049 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5050 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5051 stream_.deviceFormat[OUTPUT] ) )
\r
5053 // Release render buffer
\r
5054 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5055 if ( FAILED( hr ) ) {
\r
5056 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5062 // Inform WASAPI that render was unsuccessful
\r
5063 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5064 if ( FAILED( hr ) ) {
\r
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5072 // Inform WASAPI that render was unsuccessful
\r
5073 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5074 if ( FAILED( hr ) ) {
\r
5075 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5081 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5082 if ( callbackPushed ) {
\r
5083 callbackPulled = false;
\r
5086 // tick stream time
\r
5087 RtApi::tickStreamTime();
\r
5092 CoTaskMemFree( captureFormat );
\r
5093 CoTaskMemFree( renderFormat );
\r
5095 //delete convBuffer;
\r
5096 free ( convBuffer );
\r
5100 // update stream state
\r
5101 stream_.state = STREAM_STOPPED;
\r
5103 if ( errorText_.empty() )
\r
5106 error( errorType );
\r
5109 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5113 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5115 // Modified by Robin Davies, October 2005
\r
5116 // - Improvements to DirectX pointer chasing.
\r
5117 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5118 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5119 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5120 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5122 #include <dsound.h>
\r
5123 #include <assert.h>
\r
5124 #include <algorithm>
\r
5126 #if defined(__MINGW32__)
\r
5127 // missing from latest mingw winapi
\r
5128 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5129 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5130 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5131 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5134 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5136 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5137 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5140 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5142 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5143 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5144 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5145 return pointer >= earlierPointer && pointer < laterPointer;
\r
5148 // A structure to hold various information related to the DirectSound
\r
5149 // API implementation.
\r
5151 unsigned int drainCounter; // Tracks callback counts when draining
\r
5152 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5156 UINT bufferPointer[2];
\r
5157 DWORD dsBufferSize[2];
\r
5158 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5162 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5165 // Declarations for utility functions, callbacks, and structures
\r
5166 // specific to the DirectSound implementation.
\r
5167 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5168 LPCTSTR description,
\r
5170 LPVOID lpContext );
\r
5172 static const char* getErrorString( int code );
\r
5174 static unsigned __stdcall callbackHandler( void *ptr );
\r
5183 : found(false) { validId[0] = false; validId[1] = false; }
\r
5186 struct DsProbeData {
\r
5188 std::vector<struct DsDevice>* dsDevices;
\r
5191 RtApiDs :: RtApiDs()
\r
5193 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5194 // accept whatever the mainline chose for a threading model.
\r
5195 coInitialized_ = false;
\r
5196 HRESULT hr = CoInitialize( NULL );
\r
5197 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5200 RtApiDs :: ~RtApiDs()
\r
5202 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5203 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5206 // The DirectSound default output is always the first device.
\r
5207 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5212 // The DirectSound default input is always the first input device,
\r
5213 // which is the first capture device enumerated.
\r
5214 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5219 unsigned int RtApiDs :: getDeviceCount( void )
\r
5221 // Set query flag for previously found devices to false, so that we
\r
5222 // can check for any devices that have disappeared.
\r
5223 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5224 dsDevices[i].found = false;
\r
5226 // Query DirectSound devices.
\r
5227 struct DsProbeData probeInfo;
\r
5228 probeInfo.isInput = false;
\r
5229 probeInfo.dsDevices = &dsDevices;
\r
5230 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5231 if ( FAILED( result ) ) {
\r
5232 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5233 errorText_ = errorStream_.str();
\r
5234 error( RtAudioError::WARNING );
\r
5237 // Query DirectSoundCapture devices.
\r
5238 probeInfo.isInput = true;
\r
5239 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5240 if ( FAILED( result ) ) {
\r
5241 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5242 errorText_ = errorStream_.str();
\r
5243 error( RtAudioError::WARNING );
\r
5246 // Clean out any devices that may have disappeared.
\r
5247 std::vector< int > indices;
\r
5248 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5249 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5250 //unsigned int nErased = 0;
\r
5251 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5252 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5253 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5255 return static_cast<unsigned int>(dsDevices.size());
\r
5258 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5260 RtAudio::DeviceInfo info;
\r
5261 info.probed = false;
\r
5263 if ( dsDevices.size() == 0 ) {
\r
5264 // Force a query of all devices
\r
5266 if ( dsDevices.size() == 0 ) {
\r
5267 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5268 error( RtAudioError::INVALID_USE );
\r
5273 if ( device >= dsDevices.size() ) {
\r
5274 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5275 error( RtAudioError::INVALID_USE );
\r
5280 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5282 LPDIRECTSOUND output;
\r
5284 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5285 if ( FAILED( result ) ) {
\r
5286 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5287 errorText_ = errorStream_.str();
\r
5288 error( RtAudioError::WARNING );
\r
5292 outCaps.dwSize = sizeof( outCaps );
\r
5293 result = output->GetCaps( &outCaps );
\r
5294 if ( FAILED( result ) ) {
\r
5295 output->Release();
\r
5296 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5297 errorText_ = errorStream_.str();
\r
5298 error( RtAudioError::WARNING );
\r
5302 // Get output channel information.
\r
5303 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5305 // Get sample rate information.
\r
5306 info.sampleRates.clear();
\r
5307 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5308 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5309 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5310 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5313 // Get format information.
\r
5314 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5315 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5317 output->Release();
\r
5319 if ( getDefaultOutputDevice() == device )
\r
5320 info.isDefaultOutput = true;
\r
5322 if ( dsDevices[ device ].validId[1] == false ) {
\r
5323 info.name = dsDevices[ device ].name;
\r
5324 info.probed = true;
\r
5330 LPDIRECTSOUNDCAPTURE input;
\r
5331 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5332 if ( FAILED( result ) ) {
\r
5333 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5334 errorText_ = errorStream_.str();
\r
5335 error( RtAudioError::WARNING );
\r
5340 inCaps.dwSize = sizeof( inCaps );
\r
5341 result = input->GetCaps( &inCaps );
\r
5342 if ( FAILED( result ) ) {
\r
5344 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5345 errorText_ = errorStream_.str();
\r
5346 error( RtAudioError::WARNING );
\r
5350 // Get input channel information.
\r
5351 info.inputChannels = inCaps.dwChannels;
\r
5353 // Get sample rate and format information.
\r
5354 std::vector<unsigned int> rates;
\r
5355 if ( inCaps.dwChannels >= 2 ) {
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5362 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5363 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5365 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5369 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5371 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5374 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5375 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5378 else if ( inCaps.dwChannels == 1 ) {
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5386 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5388 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5391 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5392 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5394 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5397 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5398 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5401 else info.inputChannels = 0; // technically, this would be an error
\r
5405 if ( info.inputChannels == 0 ) return info;
\r
5407 // Copy the supported rates to the info structure but avoid duplication.
\r
5409 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5411 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5412 if ( rates[i] == info.sampleRates[j] ) {
\r
5417 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5419 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5421 // If device opens for both playback and capture, we determine the channels.
\r
5422 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5423 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5425 if ( device == 0 ) info.isDefaultInput = true;
\r
5427 // Copy name and return.
\r
5428 info.name = dsDevices[ device ].name;
\r
5429 info.probed = true;
\r
5433 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5434 unsigned int firstChannel, unsigned int sampleRate,
\r
5435 RtAudioFormat format, unsigned int *bufferSize,
\r
5436 RtAudio::StreamOptions *options )
\r
5438 if ( channels + firstChannel > 2 ) {
\r
5439 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5443 size_t nDevices = dsDevices.size();
\r
5444 if ( nDevices == 0 ) {
\r
5445 // This should not happen because a check is made before this function is called.
\r
5446 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5450 if ( device >= nDevices ) {
\r
5451 // This should not happen because a check is made before this function is called.
\r
5452 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5456 if ( mode == OUTPUT ) {
\r
5457 if ( dsDevices[ device ].validId[0] == false ) {
\r
5458 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5459 errorText_ = errorStream_.str();
\r
5463 else { // mode == INPUT
\r
5464 if ( dsDevices[ device ].validId[1] == false ) {
\r
5465 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5466 errorText_ = errorStream_.str();
\r
5471 // According to a note in PortAudio, using GetDesktopWindow()
\r
5472 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5473 // that occur when the application's window is not the foreground
\r
5474 // window. Also, if the application window closes before the
\r
5475 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5476 // problems when using GetDesktopWindow() but it seems fine now
\r
5477 // (January 2010). I'll leave it commented here.
\r
5478 // HWND hWnd = GetForegroundWindow();
\r
5479 HWND hWnd = GetDesktopWindow();
\r
5481 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5482 // two. This is a judgement call and a value of two is probably too
\r
5483 // low for capture, but it should work for playback.
\r
5485 if ( options ) nBuffers = options->numberOfBuffers;
\r
5486 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5487 if ( nBuffers < 2 ) nBuffers = 3;
\r
5489 // Check the lower range of the user-specified buffer size and set
\r
5490 // (arbitrarily) to a lower bound of 32.
\r
5491 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5493 // Create the wave format structure. The data format setting will
\r
5494 // be determined later.
\r
5495 WAVEFORMATEX waveFormat;
\r
5496 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5497 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5498 waveFormat.nChannels = channels + firstChannel;
\r
5499 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5501 // Determine the device buffer size. By default, we'll use the value
\r
5502 // defined above (32K), but we will grow it to make allowances for
\r
5503 // very large software buffer sizes.
\r
5504 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5505 DWORD dsPointerLeadTime = 0;
\r
5507 void *ohandle = 0, *bhandle = 0;
\r
5509 if ( mode == OUTPUT ) {
\r
5511 LPDIRECTSOUND output;
\r
5512 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5513 if ( FAILED( result ) ) {
\r
5514 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5515 errorText_ = errorStream_.str();
\r
5520 outCaps.dwSize = sizeof( outCaps );
\r
5521 result = output->GetCaps( &outCaps );
\r
5522 if ( FAILED( result ) ) {
\r
5523 output->Release();
\r
5524 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5525 errorText_ = errorStream_.str();
\r
5529 // Check channel information.
\r
5530 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5531 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5532 errorText_ = errorStream_.str();
\r
5536 // Check format information. Use 16-bit format unless not
\r
5537 // supported or user requests 8-bit.
\r
5538 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5539 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5540 waveFormat.wBitsPerSample = 16;
\r
5541 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5544 waveFormat.wBitsPerSample = 8;
\r
5545 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5547 stream_.userFormat = format;
\r
5549 // Update wave format structure and buffer information.
\r
5550 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5551 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5552 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5554 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5555 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5556 dsBufferSize *= 2;
\r
5558 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5559 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5560 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5561 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5562 if ( FAILED( result ) ) {
\r
5563 output->Release();
\r
5564 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5565 errorText_ = errorStream_.str();
\r
5569 // Even though we will write to the secondary buffer, we need to
\r
5570 // access the primary buffer to set the correct output format
\r
5571 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5572 // buffer description.
\r
5573 DSBUFFERDESC bufferDescription;
\r
5574 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5575 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5576 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5578 // Obtain the primary buffer
\r
5579 LPDIRECTSOUNDBUFFER buffer;
\r
5580 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5581 if ( FAILED( result ) ) {
\r
5582 output->Release();
\r
5583 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5584 errorText_ = errorStream_.str();
\r
5588 // Set the primary DS buffer sound format.
\r
5589 result = buffer->SetFormat( &waveFormat );
\r
5590 if ( FAILED( result ) ) {
\r
5591 output->Release();
\r
5592 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5593 errorText_ = errorStream_.str();
\r
5597 // Setup the secondary DS buffer description.
\r
5598 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5599 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5600 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5601 DSBCAPS_GLOBALFOCUS |
\r
5602 DSBCAPS_GETCURRENTPOSITION2 |
\r
5603 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5604 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5605 bufferDescription.lpwfxFormat = &waveFormat;
\r
5607 // Try to create the secondary DS buffer. If that doesn't work,
\r
5608 // try to use software mixing. Otherwise, there's a problem.
\r
5609 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5610 if ( FAILED( result ) ) {
\r
5611 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5612 DSBCAPS_GLOBALFOCUS |
\r
5613 DSBCAPS_GETCURRENTPOSITION2 |
\r
5614 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5615 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5616 if ( FAILED( result ) ) {
\r
5617 output->Release();
\r
5618 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5619 errorText_ = errorStream_.str();
\r
5624 // Get the buffer size ... might be different from what we specified.
\r
5626 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5627 result = buffer->GetCaps( &dsbcaps );
\r
5628 if ( FAILED( result ) ) {
\r
5629 output->Release();
\r
5630 buffer->Release();
\r
5631 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5632 errorText_ = errorStream_.str();
\r
5636 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5638 // Lock the DS buffer
\r
5641 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5642 if ( FAILED( result ) ) {
\r
5643 output->Release();
\r
5644 buffer->Release();
\r
5645 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5646 errorText_ = errorStream_.str();
\r
5650 // Zero the DS buffer
\r
5651 ZeroMemory( audioPtr, dataLen );
\r
5653 // Unlock the DS buffer
\r
5654 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5655 if ( FAILED( result ) ) {
\r
5656 output->Release();
\r
5657 buffer->Release();
\r
5658 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5659 errorText_ = errorStream_.str();
\r
5663 ohandle = (void *) output;
\r
5664 bhandle = (void *) buffer;
\r
5667 if ( mode == INPUT ) {
\r
5669 LPDIRECTSOUNDCAPTURE input;
\r
5670 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5671 if ( FAILED( result ) ) {
\r
5672 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5673 errorText_ = errorStream_.str();
\r
5678 inCaps.dwSize = sizeof( inCaps );
\r
5679 result = input->GetCaps( &inCaps );
\r
5680 if ( FAILED( result ) ) {
\r
5682 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5683 errorText_ = errorStream_.str();
\r
5687 // Check channel information.
\r
5688 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5689 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5693 // Check format information. Use 16-bit format unless user
\r
5694 // requests 8-bit.
\r
5695 DWORD deviceFormats;
\r
5696 if ( channels + firstChannel == 2 ) {
\r
5697 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5698 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5699 waveFormat.wBitsPerSample = 8;
\r
5700 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5702 else { // assume 16-bit is supported
\r
5703 waveFormat.wBitsPerSample = 16;
\r
5704 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5707 else { // channel == 1
\r
5708 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5709 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5710 waveFormat.wBitsPerSample = 8;
\r
5711 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5713 else { // assume 16-bit is supported
\r
5714 waveFormat.wBitsPerSample = 16;
\r
5715 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5718 stream_.userFormat = format;
\r
5720 // Update wave format structure and buffer information.
\r
5721 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5722 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5723 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5725 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5726 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5727 dsBufferSize *= 2;
\r
5729 // Setup the secondary DS buffer description.
\r
5730 DSCBUFFERDESC bufferDescription;
\r
5731 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5732 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5733 bufferDescription.dwFlags = 0;
\r
5734 bufferDescription.dwReserved = 0;
\r
5735 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5736 bufferDescription.lpwfxFormat = &waveFormat;
\r
5738 // Create the capture buffer.
\r
5739 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5740 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5741 if ( FAILED( result ) ) {
\r
5743 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5744 errorText_ = errorStream_.str();
\r
5748 // Get the buffer size ... might be different from what we specified.
\r
5749 DSCBCAPS dscbcaps;
\r
5750 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5751 result = buffer->GetCaps( &dscbcaps );
\r
5752 if ( FAILED( result ) ) {
\r
5754 buffer->Release();
\r
5755 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5756 errorText_ = errorStream_.str();
\r
5760 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5762 // NOTE: We could have a problem here if this is a duplex stream
\r
5763 // and the play and capture hardware buffer sizes are different
\r
5764 // (I'm actually not sure if that is a problem or not).
\r
5765 // Currently, we are not verifying that.
\r
5767 // Lock the capture buffer
\r
5770 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5771 if ( FAILED( result ) ) {
\r
5773 buffer->Release();
\r
5774 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5775 errorText_ = errorStream_.str();
\r
5779 // Zero the buffer
\r
5780 ZeroMemory( audioPtr, dataLen );
\r
5782 // Unlock the buffer
\r
5783 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5784 if ( FAILED( result ) ) {
\r
5786 buffer->Release();
\r
5787 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5788 errorText_ = errorStream_.str();
\r
5792 ohandle = (void *) input;
\r
5793 bhandle = (void *) buffer;
\r
5796 // Set various stream parameters
\r
5797 DsHandle *handle = 0;
\r
5798 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5799 stream_.nUserChannels[mode] = channels;
\r
5800 stream_.bufferSize = *bufferSize;
\r
5801 stream_.channelOffset[mode] = firstChannel;
\r
5802 stream_.deviceInterleaved[mode] = true;
\r
5803 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5804 else stream_.userInterleaved = true;
\r
5806 // Set flag for buffer conversion
\r
5807 stream_.doConvertBuffer[mode] = false;
\r
5808 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5809 stream_.doConvertBuffer[mode] = true;
\r
5810 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5811 stream_.doConvertBuffer[mode] = true;
\r
5812 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5813 stream_.nUserChannels[mode] > 1 )
\r
5814 stream_.doConvertBuffer[mode] = true;
\r
5816 // Allocate necessary internal buffers
\r
5817 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5818 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5819 if ( stream_.userBuffer[mode] == NULL ) {
\r
5820 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5824 if ( stream_.doConvertBuffer[mode] ) {
\r
5826 bool makeBuffer = true;
\r
5827 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5828 if ( mode == INPUT ) {
\r
5829 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5830 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5831 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5835 if ( makeBuffer ) {
\r
5836 bufferBytes *= *bufferSize;
\r
5837 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5838 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5839 if ( stream_.deviceBuffer == NULL ) {
\r
5840 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5846 // Allocate our DsHandle structures for the stream.
\r
5847 if ( stream_.apiHandle == 0 ) {
\r
5849 handle = new DsHandle;
\r
5851 catch ( std::bad_alloc& ) {
\r
5852 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5856 // Create a manual-reset event.
\r
5857 handle->condition = CreateEvent( NULL, // no security
\r
5858 TRUE, // manual-reset
\r
5859 FALSE, // non-signaled initially
\r
5860 NULL ); // unnamed
\r
5861 stream_.apiHandle = (void *) handle;
\r
5864 handle = (DsHandle *) stream_.apiHandle;
\r
5865 handle->id[mode] = ohandle;
\r
5866 handle->buffer[mode] = bhandle;
\r
5867 handle->dsBufferSize[mode] = dsBufferSize;
\r
5868 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5870 stream_.device[mode] = device;
\r
5871 stream_.state = STREAM_STOPPED;
\r
5872 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5873 // We had already set up an output stream.
\r
5874 stream_.mode = DUPLEX;
\r
5876 stream_.mode = mode;
\r
5877 stream_.nBuffers = nBuffers;
\r
5878 stream_.sampleRate = sampleRate;
\r
5880 // Setup the buffer conversion information structure.
\r
5881 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5883 // Setup the callback thread.
\r
5884 if ( stream_.callbackInfo.isRunning == false ) {
\r
5885 unsigned threadId;
\r
5886 stream_.callbackInfo.isRunning = true;
\r
5887 stream_.callbackInfo.object = (void *) this;
\r
5888 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5889 &stream_.callbackInfo, 0, &threadId );
\r
5890 if ( stream_.callbackInfo.thread == 0 ) {
\r
5891 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5895 // Boost DS thread priority
\r
5896 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5902 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5903 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5904 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5905 if ( buffer ) buffer->Release();
\r
5906 object->Release();
\r
5908 if ( handle->buffer[1] ) {
\r
5909 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5910 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5911 if ( buffer ) buffer->Release();
\r
5912 object->Release();
\r
5914 CloseHandle( handle->condition );
\r
5916 stream_.apiHandle = 0;
\r
5919 for ( int i=0; i<2; i++ ) {
\r
5920 if ( stream_.userBuffer[i] ) {
\r
5921 free( stream_.userBuffer[i] );
\r
5922 stream_.userBuffer[i] = 0;
\r
5926 if ( stream_.deviceBuffer ) {
\r
5927 free( stream_.deviceBuffer );
\r
5928 stream_.deviceBuffer = 0;
\r
5931 stream_.state = STREAM_CLOSED;
\r
5935 void RtApiDs :: closeStream()
\r
5937 if ( stream_.state == STREAM_CLOSED ) {
\r
5938 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5939 error( RtAudioError::WARNING );
\r
5943 // Stop the callback thread.
\r
5944 stream_.callbackInfo.isRunning = false;
\r
5945 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5946 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5948 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5950 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5951 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5952 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5955 buffer->Release();
\r
5957 object->Release();
\r
5959 if ( handle->buffer[1] ) {
\r
5960 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5961 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5964 buffer->Release();
\r
5966 object->Release();
\r
5968 CloseHandle( handle->condition );
\r
5970 stream_.apiHandle = 0;
\r
5973 for ( int i=0; i<2; i++ ) {
\r
5974 if ( stream_.userBuffer[i] ) {
\r
5975 free( stream_.userBuffer[i] );
\r
5976 stream_.userBuffer[i] = 0;
\r
5980 if ( stream_.deviceBuffer ) {
\r
5981 free( stream_.deviceBuffer );
\r
5982 stream_.deviceBuffer = 0;
\r
5985 stream_.mode = UNINITIALIZED;
\r
5986 stream_.state = STREAM_CLOSED;
\r
5989 void RtApiDs :: startStream()
\r
5992 if ( stream_.state == STREAM_RUNNING ) {
\r
5993 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5994 error( RtAudioError::WARNING );
\r
5998 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6000 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6001 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6002 // this is already in effect.
\r
6003 timeBeginPeriod( 1 );
\r
6005 buffersRolling = false;
\r
6006 duplexPrerollBytes = 0;
\r
6008 if ( stream_.mode == DUPLEX ) {
\r
6009 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6010 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6013 HRESULT result = 0;
\r
6014 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6016 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6017 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6018 if ( FAILED( result ) ) {
\r
6019 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6020 errorText_ = errorStream_.str();
\r
6025 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6027 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6028 result = buffer->Start( DSCBSTART_LOOPING );
\r
6029 if ( FAILED( result ) ) {
\r
6030 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6031 errorText_ = errorStream_.str();
\r
6036 handle->drainCounter = 0;
\r
6037 handle->internalDrain = false;
\r
6038 ResetEvent( handle->condition );
\r
6039 stream_.state = STREAM_RUNNING;
\r
6042 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6045 void RtApiDs :: stopStream()
\r
6048 if ( stream_.state == STREAM_STOPPED ) {
\r
6049 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6050 error( RtAudioError::WARNING );
\r
6054 HRESULT result = 0;
\r
6057 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6058 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6059 if ( handle->drainCounter == 0 ) {
\r
6060 handle->drainCounter = 2;
\r
6061 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6064 stream_.state = STREAM_STOPPED;
\r
6066 // Stop the buffer and clear memory
\r
6067 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6068 result = buffer->Stop();
\r
6069 if ( FAILED( result ) ) {
\r
6070 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6071 errorText_ = errorStream_.str();
\r
6075 // Lock the buffer and clear it so that if we start to play again,
\r
6076 // we won't have old data playing.
\r
6077 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6078 if ( FAILED( result ) ) {
\r
6079 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6080 errorText_ = errorStream_.str();
\r
6084 // Zero the DS buffer
\r
6085 ZeroMemory( audioPtr, dataLen );
\r
6087 // Unlock the DS buffer
\r
6088 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6089 if ( FAILED( result ) ) {
\r
6090 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6091 errorText_ = errorStream_.str();
\r
6095 // If we start playing again, we must begin at beginning of buffer.
\r
6096 handle->bufferPointer[0] = 0;
\r
6099 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6100 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6104 stream_.state = STREAM_STOPPED;
\r
6106 result = buffer->Stop();
\r
6107 if ( FAILED( result ) ) {
\r
6108 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6109 errorText_ = errorStream_.str();
\r
6113 // Lock the buffer and clear it so that if we start to play again,
\r
6114 // we won't have old data playing.
\r
6115 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6116 if ( FAILED( result ) ) {
\r
6117 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6118 errorText_ = errorStream_.str();
\r
6122 // Zero the DS buffer
\r
6123 ZeroMemory( audioPtr, dataLen );
\r
6125 // Unlock the DS buffer
\r
6126 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6127 if ( FAILED( result ) ) {
\r
6128 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6129 errorText_ = errorStream_.str();
\r
6133 // If we start recording again, we must begin at beginning of buffer.
\r
6134 handle->bufferPointer[1] = 0;
\r
6138 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6139 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6142 void RtApiDs :: abortStream()
\r
6145 if ( stream_.state == STREAM_STOPPED ) {
\r
6146 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6147 error( RtAudioError::WARNING );
\r
6151 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6152 handle->drainCounter = 2;
\r
6157 void RtApiDs :: callbackEvent()
\r
6159 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6160 Sleep( 50 ); // sleep 50 milliseconds
\r
6164 if ( stream_.state == STREAM_CLOSED ) {
\r
6165 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6166 error( RtAudioError::WARNING );
\r
6170 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6171 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6173 // Check if we were draining the stream and signal is finished.
\r
6174 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6176 stream_.state = STREAM_STOPPING;
\r
6177 if ( handle->internalDrain == false )
\r
6178 SetEvent( handle->condition );
\r
6184 // Invoke user callback to get fresh output data UNLESS we are
\r
6185 // draining stream.
\r
6186 if ( handle->drainCounter == 0 ) {
\r
6187 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6188 double streamTime = getStreamTime();
\r
6189 RtAudioStreamStatus status = 0;
\r
6190 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6191 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6192 handle->xrun[0] = false;
\r
6194 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6195 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6196 handle->xrun[1] = false;
\r
6198 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6199 stream_.bufferSize, streamTime, status, info->userData );
\r
6200 if ( cbReturnValue == 2 ) {
\r
6201 stream_.state = STREAM_STOPPING;
\r
6202 handle->drainCounter = 2;
\r
6206 else if ( cbReturnValue == 1 ) {
\r
6207 handle->drainCounter = 1;
\r
6208 handle->internalDrain = true;
\r
6213 DWORD currentWritePointer, safeWritePointer;
\r
6214 DWORD currentReadPointer, safeReadPointer;
\r
6215 UINT nextWritePointer;
\r
6217 LPVOID buffer1 = NULL;
\r
6218 LPVOID buffer2 = NULL;
\r
6219 DWORD bufferSize1 = 0;
\r
6220 DWORD bufferSize2 = 0;
\r
6225 if ( buffersRolling == false ) {
\r
6226 if ( stream_.mode == DUPLEX ) {
\r
6227 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6229 // It takes a while for the devices to get rolling. As a result,
\r
6230 // there's no guarantee that the capture and write device pointers
\r
6231 // will move in lockstep. Wait here for both devices to start
\r
6232 // rolling, and then set our buffer pointers accordingly.
\r
6233 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6234 // bytes later than the write buffer.
\r
6236 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6237 // take place between the two GetCurrentPosition calls... but I'm
\r
6238 // really not sure how to solve the problem. Temporarily boost to
\r
6239 // Realtime priority, maybe; but I'm not sure what priority the
\r
6240 // DirectSound service threads run at. We *should* be roughly
\r
6241 // within a ms or so of correct.
\r
6243 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6244 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6246 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6248 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6249 if ( FAILED( result ) ) {
\r
6250 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6251 errorText_ = errorStream_.str();
\r
6252 error( RtAudioError::SYSTEM_ERROR );
\r
6255 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6256 if ( FAILED( result ) ) {
\r
6257 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6258 errorText_ = errorStream_.str();
\r
6259 error( RtAudioError::SYSTEM_ERROR );
\r
6263 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6264 if ( FAILED( result ) ) {
\r
6265 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6266 errorText_ = errorStream_.str();
\r
6267 error( RtAudioError::SYSTEM_ERROR );
\r
6270 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6271 if ( FAILED( result ) ) {
\r
6272 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6273 errorText_ = errorStream_.str();
\r
6274 error( RtAudioError::SYSTEM_ERROR );
\r
6277 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6281 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6283 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6284 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6285 handle->bufferPointer[1] = safeReadPointer;
\r
6287 else if ( stream_.mode == OUTPUT ) {
\r
6289 // Set the proper nextWritePosition after initial startup.
\r
6290 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6291 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6292 if ( FAILED( result ) ) {
\r
6293 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6294 errorText_ = errorStream_.str();
\r
6295 error( RtAudioError::SYSTEM_ERROR );
\r
6298 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6299 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6302 buffersRolling = true;
\r
6305 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6307 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6309 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6310 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6311 bufferBytes *= formatBytes( stream_.userFormat );
\r
6312 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6315 // Setup parameters and do buffer conversion if necessary.
\r
6316 if ( stream_.doConvertBuffer[0] ) {
\r
6317 buffer = stream_.deviceBuffer;
\r
6318 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6319 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6320 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6323 buffer = stream_.userBuffer[0];
\r
6324 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6325 bufferBytes *= formatBytes( stream_.userFormat );
\r
6328 // No byte swapping necessary in DirectSound implementation.
\r
6330 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6331 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6333 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6334 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6336 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6337 nextWritePointer = handle->bufferPointer[0];
\r
6339 DWORD endWrite, leadPointer;
\r
6341 // Find out where the read and "safe write" pointers are.
\r
6342 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6343 if ( FAILED( result ) ) {
\r
6344 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6345 errorText_ = errorStream_.str();
\r
6346 error( RtAudioError::SYSTEM_ERROR );
\r
6350 // We will copy our output buffer into the region between
\r
6351 // safeWritePointer and leadPointer. If leadPointer is not
\r
6352 // beyond the next endWrite position, wait until it is.
\r
6353 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6354 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6355 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6356 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6357 endWrite = nextWritePointer + bufferBytes;
\r
6359 // Check whether the entire write region is behind the play pointer.
\r
6360 if ( leadPointer >= endWrite ) break;
\r
6362 // If we are here, then we must wait until the leadPointer advances
\r
6363 // beyond the end of our next write region. We use the
\r
6364 // Sleep() function to suspend operation until that happens.
\r
6365 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6366 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6367 if ( millis < 1.0 ) millis = 1.0;
\r
6368 Sleep( (DWORD) millis );
\r
6371 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6372 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6373 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6374 handle->xrun[0] = true;
\r
6375 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6376 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6377 handle->bufferPointer[0] = nextWritePointer;
\r
6378 endWrite = nextWritePointer + bufferBytes;
\r
6381 // Lock free space in the buffer
\r
6382 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6383 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6384 if ( FAILED( result ) ) {
\r
6385 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6386 errorText_ = errorStream_.str();
\r
6387 error( RtAudioError::SYSTEM_ERROR );
\r
6391 // Copy our buffer into the DS buffer
\r
6392 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6393 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6395 // Update our buffer offset and unlock sound buffer
\r
6396 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6397 if ( FAILED( result ) ) {
\r
6398 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6399 errorText_ = errorStream_.str();
\r
6400 error( RtAudioError::SYSTEM_ERROR );
\r
6403 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6404 handle->bufferPointer[0] = nextWritePointer;
\r
6406 if ( handle->drainCounter ) {
\r
6407 handle->drainCounter++;
\r
6412 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6414 // Setup parameters.
\r
6415 if ( stream_.doConvertBuffer[1] ) {
\r
6416 buffer = stream_.deviceBuffer;
\r
6417 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6418 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6421 buffer = stream_.userBuffer[1];
\r
6422 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6423 bufferBytes *= formatBytes( stream_.userFormat );
\r
6426 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6427 long nextReadPointer = handle->bufferPointer[1];
\r
6428 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6430 // Find out where the write and "safe read" pointers are.
\r
6431 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6432 if ( FAILED( result ) ) {
\r
6433 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6434 errorText_ = errorStream_.str();
\r
6435 error( RtAudioError::SYSTEM_ERROR );
\r
6439 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6440 DWORD endRead = nextReadPointer + bufferBytes;
\r
6442 // Handling depends on whether we are INPUT or DUPLEX.
\r
6443 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6444 // then a wait here will drag the write pointers into the forbidden zone.
\r
6446 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6447 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6448 // practical way to sync up the read and write pointers reliably, given the
\r
6449 // the very complex relationship between phase and increment of the read and write
\r
6452 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6453 // provide a pre-roll period of 0.5 seconds in which we return
\r
6454 // zeros from the read buffer while the pointers sync up.
\r
6456 if ( stream_.mode == DUPLEX ) {
\r
6457 if ( safeReadPointer < endRead ) {
\r
6458 if ( duplexPrerollBytes <= 0 ) {
\r
6459 // Pre-roll time over. Be more agressive.
\r
6460 int adjustment = endRead-safeReadPointer;
\r
6462 handle->xrun[1] = true;
\r
6464 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6465 // and perform fine adjustments later.
\r
6466 // - small adjustments: back off by twice as much.
\r
6467 if ( adjustment >= 2*bufferBytes )
\r
6468 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6470 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6472 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6476 // In pre=roll time. Just do it.
\r
6477 nextReadPointer = safeReadPointer - bufferBytes;
\r
6478 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6480 endRead = nextReadPointer + bufferBytes;
\r
6483 else { // mode == INPUT
\r
6484 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6485 // See comments for playback.
\r
6486 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6487 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6488 if ( millis < 1.0 ) millis = 1.0;
\r
6489 Sleep( (DWORD) millis );
\r
6491 // Wake up and find out where we are now.
\r
6492 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6493 if ( FAILED( result ) ) {
\r
6494 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6495 errorText_ = errorStream_.str();
\r
6496 error( RtAudioError::SYSTEM_ERROR );
\r
6500 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6504 // Lock free space in the buffer
\r
6505 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6506 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6507 if ( FAILED( result ) ) {
\r
6508 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6509 errorText_ = errorStream_.str();
\r
6510 error( RtAudioError::SYSTEM_ERROR );
\r
6514 if ( duplexPrerollBytes <= 0 ) {
\r
6515 // Copy our buffer into the DS buffer
\r
6516 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6517 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6520 memset( buffer, 0, bufferSize1 );
\r
6521 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6522 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6525 // Update our buffer offset and unlock sound buffer
\r
6526 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6527 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6528 if ( FAILED( result ) ) {
\r
6529 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6530 errorText_ = errorStream_.str();
\r
6531 error( RtAudioError::SYSTEM_ERROR );
\r
6534 handle->bufferPointer[1] = nextReadPointer;
\r
6536 // No byte swapping necessary in DirectSound implementation.
\r
6538 // If necessary, convert 8-bit data from unsigned to signed.
\r
6539 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6540 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6542 // Do buffer conversion if necessary.
\r
6543 if ( stream_.doConvertBuffer[1] )
\r
6544 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6548 RtApi::tickStreamTime();
\r
6551 // Definitions for utility functions and callbacks
\r
6552 // specific to the DirectSound implementation.
\r
6554 static unsigned __stdcall callbackHandler( void *ptr )
\r
6556 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6557 RtApiDs *object = (RtApiDs *) info->object;
\r
6558 bool* isRunning = &info->isRunning;
\r
6560 while ( *isRunning == true ) {
\r
6561 object->callbackEvent();
\r
6564 _endthreadex( 0 );
\r
6568 #include "tchar.h"
\r
6570 static std::string convertTChar( LPCTSTR name )
\r
6572 #if defined( UNICODE ) || defined( _UNICODE )
\r
6573 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6574 std::string s( length-1, '\0' );
\r
6575 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6577 std::string s( name );
\r
6583 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6584 LPCTSTR description,
\r
6585 LPCTSTR /*module*/,
\r
6586 LPVOID lpContext )
\r
6588 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6589 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6592 bool validDevice = false;
\r
6593 if ( probeInfo.isInput == true ) {
\r
6595 LPDIRECTSOUNDCAPTURE object;
\r
6597 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6598 if ( hr != DS_OK ) return TRUE;
\r
6600 caps.dwSize = sizeof(caps);
\r
6601 hr = object->GetCaps( &caps );
\r
6602 if ( hr == DS_OK ) {
\r
6603 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6604 validDevice = true;
\r
6606 object->Release();
\r
6610 LPDIRECTSOUND object;
\r
6611 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6612 if ( hr != DS_OK ) return TRUE;
\r
6614 caps.dwSize = sizeof(caps);
\r
6615 hr = object->GetCaps( &caps );
\r
6616 if ( hr == DS_OK ) {
\r
6617 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6618 validDevice = true;
\r
6620 object->Release();
\r
6623 // If good device, then save its name and guid.
\r
6624 std::string name = convertTChar( description );
\r
6625 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6626 if ( lpguid == NULL )
\r
6627 name = "Default Device";
\r
6628 if ( validDevice ) {
\r
6629 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6630 if ( dsDevices[i].name == name ) {
\r
6631 dsDevices[i].found = true;
\r
6632 if ( probeInfo.isInput ) {
\r
6633 dsDevices[i].id[1] = lpguid;
\r
6634 dsDevices[i].validId[1] = true;
\r
6637 dsDevices[i].id[0] = lpguid;
\r
6638 dsDevices[i].validId[0] = true;
\r
6645 device.name = name;
\r
6646 device.found = true;
\r
6647 if ( probeInfo.isInput ) {
\r
6648 device.id[1] = lpguid;
\r
6649 device.validId[1] = true;
\r
6652 device.id[0] = lpguid;
\r
6653 device.validId[0] = true;
\r
6655 dsDevices.push_back( device );
\r
6661 static const char* getErrorString( int code )
\r
6665 case DSERR_ALLOCATED:
\r
6666 return "Already allocated";
\r
6668 case DSERR_CONTROLUNAVAIL:
\r
6669 return "Control unavailable";
\r
6671 case DSERR_INVALIDPARAM:
\r
6672 return "Invalid parameter";
\r
6674 case DSERR_INVALIDCALL:
\r
6675 return "Invalid call";
\r
6677 case DSERR_GENERIC:
\r
6678 return "Generic error";
\r
6680 case DSERR_PRIOLEVELNEEDED:
\r
6681 return "Priority level needed";
\r
6683 case DSERR_OUTOFMEMORY:
\r
6684 return "Out of memory";
\r
6686 case DSERR_BADFORMAT:
\r
6687 return "The sample rate or the channel format is not supported";
\r
6689 case DSERR_UNSUPPORTED:
\r
6690 return "Not supported";
\r
6692 case DSERR_NODRIVER:
\r
6693 return "No driver";
\r
6695 case DSERR_ALREADYINITIALIZED:
\r
6696 return "Already initialized";
\r
6698 case DSERR_NOAGGREGATION:
\r
6699 return "No aggregation";
\r
6701 case DSERR_BUFFERLOST:
\r
6702 return "Buffer lost";
\r
6704 case DSERR_OTHERAPPHASPRIO:
\r
6705 return "Another application already has priority";
\r
6707 case DSERR_UNINITIALIZED:
\r
6708 return "Uninitialized";
\r
6711 return "DirectSound unknown error";
\r
6714 //******************** End of __WINDOWS_DS__ *********************//
\r
6718 #if defined(__LINUX_ALSA__)
\r
6720 #include <alsa/asoundlib.h>
\r
6721 #include <unistd.h>
\r
6723 // A structure to hold various information related to the ALSA API
\r
6724 // implementation.
\r
6725 struct AlsaHandle {
\r
6726 snd_pcm_t *handles[2];
\r
6727 bool synchronized;
\r
6729 pthread_cond_t runnable_cv;
\r
6733 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6736 static void *alsaCallbackHandler( void * ptr );
\r
6738 RtApiAlsa :: RtApiAlsa()
\r
6740 // Nothing to do here.
\r
6743 RtApiAlsa :: ~RtApiAlsa()
\r
6745 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6748 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6750 unsigned nDevices = 0;
\r
6751 int result, subdevice, card;
\r
6753 snd_ctl_t *handle;
\r
6755 // Count cards and devices
\r
6757 snd_card_next( &card );
\r
6758 while ( card >= 0 ) {
\r
6759 sprintf( name, "hw:%d", card );
\r
6760 result = snd_ctl_open( &handle, name, 0 );
\r
6761 if ( result < 0 ) {
\r
6762 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6763 errorText_ = errorStream_.str();
\r
6764 error( RtAudioError::WARNING );
\r
6769 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6770 if ( result < 0 ) {
\r
6771 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6772 errorText_ = errorStream_.str();
\r
6773 error( RtAudioError::WARNING );
\r
6776 if ( subdevice < 0 )
\r
6781 snd_ctl_close( handle );
\r
6782 snd_card_next( &card );
\r
6785 result = snd_ctl_open( &handle, "default", 0 );
\r
6786 if (result == 0) {
\r
6788 snd_ctl_close( handle );
\r
6794 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6796 RtAudio::DeviceInfo info;
\r
6797 info.probed = false;
\r
6799 unsigned nDevices = 0;
\r
6800 int result, subdevice, card;
\r
6802 snd_ctl_t *chandle;
\r
6804 // Count cards and devices
\r
6806 snd_card_next( &card );
\r
6807 while ( card >= 0 ) {
\r
6808 sprintf( name, "hw:%d", card );
\r
6809 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6810 if ( result < 0 ) {
\r
6811 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6812 errorText_ = errorStream_.str();
\r
6813 error( RtAudioError::WARNING );
\r
6818 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6819 if ( result < 0 ) {
\r
6820 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6821 errorText_ = errorStream_.str();
\r
6822 error( RtAudioError::WARNING );
\r
6825 if ( subdevice < 0 ) break;
\r
6826 if ( nDevices == device ) {
\r
6827 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6833 snd_ctl_close( chandle );
\r
6834 snd_card_next( &card );
\r
6837 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6838 if ( result == 0 ) {
\r
6839 if ( nDevices == device ) {
\r
6840 strcpy( name, "default" );
\r
6846 if ( nDevices == 0 ) {
\r
6847 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6848 error( RtAudioError::INVALID_USE );
\r
6852 if ( device >= nDevices ) {
\r
6853 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6854 error( RtAudioError::INVALID_USE );
\r
6860 // If a stream is already open, we cannot probe the stream devices.
\r
6861 // Thus, use the saved results.
\r
6862 if ( stream_.state != STREAM_CLOSED &&
\r
6863 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6864 snd_ctl_close( chandle );
\r
6865 if ( device >= devices_.size() ) {
\r
6866 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6867 error( RtAudioError::WARNING );
\r
6870 return devices_[ device ];
\r
6873 int openMode = SND_PCM_ASYNC;
\r
6874 snd_pcm_stream_t stream;
\r
6875 snd_pcm_info_t *pcminfo;
\r
6876 snd_pcm_info_alloca( &pcminfo );
\r
6877 snd_pcm_t *phandle;
\r
6878 snd_pcm_hw_params_t *params;
\r
6879 snd_pcm_hw_params_alloca( ¶ms );
\r
6881 // First try for playback unless default device (which has subdev -1)
\r
6882 stream = SND_PCM_STREAM_PLAYBACK;
\r
6883 snd_pcm_info_set_stream( pcminfo, stream );
\r
6884 if ( subdevice != -1 ) {
\r
6885 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6886 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6888 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6889 if ( result < 0 ) {
\r
6890 // Device probably doesn't support playback.
\r
6891 goto captureProbe;
\r
6895 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6896 if ( result < 0 ) {
\r
6897 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6898 errorText_ = errorStream_.str();
\r
6899 error( RtAudioError::WARNING );
\r
6900 goto captureProbe;
\r
6903 // The device is open ... fill the parameter structure.
\r
6904 result = snd_pcm_hw_params_any( phandle, params );
\r
6905 if ( result < 0 ) {
\r
6906 snd_pcm_close( phandle );
\r
6907 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6908 errorText_ = errorStream_.str();
\r
6909 error( RtAudioError::WARNING );
\r
6910 goto captureProbe;
\r
6913 // Get output channel information.
\r
6914 unsigned int value;
\r
6915 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6916 if ( result < 0 ) {
\r
6917 snd_pcm_close( phandle );
\r
6918 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6919 errorText_ = errorStream_.str();
\r
6920 error( RtAudioError::WARNING );
\r
6921 goto captureProbe;
\r
6923 info.outputChannels = value;
\r
6924 snd_pcm_close( phandle );
\r
6927 stream = SND_PCM_STREAM_CAPTURE;
\r
6928 snd_pcm_info_set_stream( pcminfo, stream );
\r
6930 // Now try for capture unless default device (with subdev = -1)
\r
6931 if ( subdevice != -1 ) {
\r
6932 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6933 snd_ctl_close( chandle );
\r
6934 if ( result < 0 ) {
\r
6935 // Device probably doesn't support capture.
\r
6936 if ( info.outputChannels == 0 ) return info;
\r
6937 goto probeParameters;
\r
6941 snd_ctl_close( chandle );
\r
6943 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6944 if ( result < 0 ) {
\r
6945 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6946 errorText_ = errorStream_.str();
\r
6947 error( RtAudioError::WARNING );
\r
6948 if ( info.outputChannels == 0 ) return info;
\r
6949 goto probeParameters;
\r
6952 // The device is open ... fill the parameter structure.
\r
6953 result = snd_pcm_hw_params_any( phandle, params );
\r
6954 if ( result < 0 ) {
\r
6955 snd_pcm_close( phandle );
\r
6956 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6957 errorText_ = errorStream_.str();
\r
6958 error( RtAudioError::WARNING );
\r
6959 if ( info.outputChannels == 0 ) return info;
\r
6960 goto probeParameters;
\r
6963 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6964 if ( result < 0 ) {
\r
6965 snd_pcm_close( phandle );
\r
6966 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6967 errorText_ = errorStream_.str();
\r
6968 error( RtAudioError::WARNING );
\r
6969 if ( info.outputChannels == 0 ) return info;
\r
6970 goto probeParameters;
\r
6972 info.inputChannels = value;
\r
6973 snd_pcm_close( phandle );
\r
6975 // If device opens for both playback and capture, we determine the channels.
\r
6976 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6977 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6979 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6980 if ( device == 0 && info.outputChannels > 0 )
\r
6981 info.isDefaultOutput = true;
\r
6982 if ( device == 0 && info.inputChannels > 0 )
\r
6983 info.isDefaultInput = true;
\r
6986 // At this point, we just need to figure out the supported data
\r
6987 // formats and sample rates. We'll proceed by opening the device in
\r
6988 // the direction with the maximum number of channels, or playback if
\r
6989 // they are equal. This might limit our sample rate options, but so
\r
6992 if ( info.outputChannels >= info.inputChannels )
\r
6993 stream = SND_PCM_STREAM_PLAYBACK;
\r
6995 stream = SND_PCM_STREAM_CAPTURE;
\r
6996 snd_pcm_info_set_stream( pcminfo, stream );
\r
6998 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6999 if ( result < 0 ) {
\r
7000 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7001 errorText_ = errorStream_.str();
\r
7002 error( RtAudioError::WARNING );
\r
7006 // The device is open ... fill the parameter structure.
\r
7007 result = snd_pcm_hw_params_any( phandle, params );
\r
7008 if ( result < 0 ) {
\r
7009 snd_pcm_close( phandle );
\r
7010 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7011 errorText_ = errorStream_.str();
\r
7012 error( RtAudioError::WARNING );
\r
7016 // Test our discrete set of sample rate values.
\r
7017 info.sampleRates.clear();
\r
7018 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7019 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7020 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7022 if ( info.sampleRates.size() == 0 ) {
\r
7023 snd_pcm_close( phandle );
\r
7024 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7025 errorText_ = errorStream_.str();
\r
7026 error( RtAudioError::WARNING );
\r
7030 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7031 snd_pcm_format_t format;
\r
7032 info.nativeFormats = 0;
\r
7033 format = SND_PCM_FORMAT_S8;
\r
7034 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7035 info.nativeFormats |= RTAUDIO_SINT8;
\r
7036 format = SND_PCM_FORMAT_S16;
\r
7037 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7038 info.nativeFormats |= RTAUDIO_SINT16;
\r
7039 format = SND_PCM_FORMAT_S24;
\r
7040 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7041 info.nativeFormats |= RTAUDIO_SINT24;
\r
7042 format = SND_PCM_FORMAT_S32;
\r
7043 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7044 info.nativeFormats |= RTAUDIO_SINT32;
\r
7045 format = SND_PCM_FORMAT_FLOAT;
\r
7046 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7047 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7048 format = SND_PCM_FORMAT_FLOAT64;
\r
7049 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7050 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7052 // Check that we have at least one supported format
\r
7053 if ( info.nativeFormats == 0 ) {
\r
7054 snd_pcm_close( phandle );
\r
7055 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7056 errorText_ = errorStream_.str();
\r
7057 error( RtAudioError::WARNING );
\r
7061 // Get the device name
\r
7063 result = snd_card_get_name( card, &cardname );
\r
7064 if ( result >= 0 ) {
\r
7065 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7070 // That's all ... close the device and return
\r
7071 snd_pcm_close( phandle );
\r
7072 info.probed = true;
\r
7076 void RtApiAlsa :: saveDeviceInfo( void )
\r
7080 unsigned int nDevices = getDeviceCount();
\r
7081 devices_.resize( nDevices );
\r
7082 for ( unsigned int i=0; i<nDevices; i++ )
\r
7083 devices_[i] = getDeviceInfo( i );
\r
7086 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7087 unsigned int firstChannel, unsigned int sampleRate,
\r
7088 RtAudioFormat format, unsigned int *bufferSize,
\r
7089 RtAudio::StreamOptions *options )
\r
7092 #if defined(__RTAUDIO_DEBUG__)
\r
7093 snd_output_t *out;
\r
7094 snd_output_stdio_attach(&out, stderr, 0);
\r
7097 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7099 unsigned nDevices = 0;
\r
7100 int result, subdevice, card;
\r
7102 snd_ctl_t *chandle;
\r
7104 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7105 snprintf(name, sizeof(name), "%s", "default");
\r
7107 // Count cards and devices
\r
7109 snd_card_next( &card );
\r
7110 while ( card >= 0 ) {
\r
7111 sprintf( name, "hw:%d", card );
\r
7112 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7113 if ( result < 0 ) {
\r
7114 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7115 errorText_ = errorStream_.str();
\r
7120 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7121 if ( result < 0 ) break;
\r
7122 if ( subdevice < 0 ) break;
\r
7123 if ( nDevices == device ) {
\r
7124 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7125 snd_ctl_close( chandle );
\r
7130 snd_ctl_close( chandle );
\r
7131 snd_card_next( &card );
\r
7134 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7135 if ( result == 0 ) {
\r
7136 if ( nDevices == device ) {
\r
7137 strcpy( name, "default" );
\r
7143 if ( nDevices == 0 ) {
\r
7144 // This should not happen because a check is made before this function is called.
\r
7145 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7149 if ( device >= nDevices ) {
\r
7150 // This should not happen because a check is made before this function is called.
\r
7151 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7158 // The getDeviceInfo() function will not work for a device that is
\r
7159 // already open. Thus, we'll probe the system before opening a
\r
7160 // stream and save the results for use by getDeviceInfo().
\r
7161 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7162 this->saveDeviceInfo();
\r
7164 snd_pcm_stream_t stream;
\r
7165 if ( mode == OUTPUT )
\r
7166 stream = SND_PCM_STREAM_PLAYBACK;
\r
7168 stream = SND_PCM_STREAM_CAPTURE;
\r
7170 snd_pcm_t *phandle;
\r
7171 int openMode = SND_PCM_ASYNC;
\r
7172 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7173 if ( result < 0 ) {
\r
7174 if ( mode == OUTPUT )
\r
7175 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7177 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7178 errorText_ = errorStream_.str();
\r
7182 // Fill the parameter structure.
\r
7183 snd_pcm_hw_params_t *hw_params;
\r
7184 snd_pcm_hw_params_alloca( &hw_params );
\r
7185 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7186 if ( result < 0 ) {
\r
7187 snd_pcm_close( phandle );
\r
7188 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7189 errorText_ = errorStream_.str();
\r
7193 #if defined(__RTAUDIO_DEBUG__)
\r
7194 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7195 snd_pcm_hw_params_dump( hw_params, out );
\r
7198 // Set access ... check user preference.
\r
7199 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7200 stream_.userInterleaved = false;
\r
7201 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7202 if ( result < 0 ) {
\r
7203 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7204 stream_.deviceInterleaved[mode] = true;
\r
7207 stream_.deviceInterleaved[mode] = false;
\r
7210 stream_.userInterleaved = true;
\r
7211 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7212 if ( result < 0 ) {
\r
7213 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7214 stream_.deviceInterleaved[mode] = false;
\r
7217 stream_.deviceInterleaved[mode] = true;
\r
7220 if ( result < 0 ) {
\r
7221 snd_pcm_close( phandle );
\r
7222 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7223 errorText_ = errorStream_.str();
\r
7227 // Determine how to set the device format.
\r
7228 stream_.userFormat = format;
\r
7229 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7231 if ( format == RTAUDIO_SINT8 )
\r
7232 deviceFormat = SND_PCM_FORMAT_S8;
\r
7233 else if ( format == RTAUDIO_SINT16 )
\r
7234 deviceFormat = SND_PCM_FORMAT_S16;
\r
7235 else if ( format == RTAUDIO_SINT24 )
\r
7236 deviceFormat = SND_PCM_FORMAT_S24;
\r
7237 else if ( format == RTAUDIO_SINT32 )
\r
7238 deviceFormat = SND_PCM_FORMAT_S32;
\r
7239 else if ( format == RTAUDIO_FLOAT32 )
\r
7240 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7241 else if ( format == RTAUDIO_FLOAT64 )
\r
7242 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7244 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7245 stream_.deviceFormat[mode] = format;
\r
7249 // The user requested format is not natively supported by the device.
\r
7250 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7251 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7252 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7256 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7257 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7258 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7262 deviceFormat = SND_PCM_FORMAT_S32;
\r
7263 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7264 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7268 deviceFormat = SND_PCM_FORMAT_S24;
\r
7269 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7270 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7274 deviceFormat = SND_PCM_FORMAT_S16;
\r
7275 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7276 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7280 deviceFormat = SND_PCM_FORMAT_S8;
\r
7281 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7282 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7286 // If we get here, no supported format was found.
\r
7287 snd_pcm_close( phandle );
\r
7288 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7289 errorText_ = errorStream_.str();
\r
7293 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7294 if ( result < 0 ) {
\r
7295 snd_pcm_close( phandle );
\r
7296 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7297 errorText_ = errorStream_.str();
\r
7301 // Determine whether byte-swaping is necessary.
\r
7302 stream_.doByteSwap[mode] = false;
\r
7303 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7304 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7305 if ( result == 0 )
\r
7306 stream_.doByteSwap[mode] = true;
\r
7307 else if (result < 0) {
\r
7308 snd_pcm_close( phandle );
\r
7309 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7310 errorText_ = errorStream_.str();
\r
7315 // Set the sample rate.
\r
7316 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7317 if ( result < 0 ) {
\r
7318 snd_pcm_close( phandle );
\r
7319 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7320 errorText_ = errorStream_.str();
\r
7324 // Determine the number of channels for this device. We support a possible
\r
7325 // minimum device channel number > than the value requested by the user.
\r
7326 stream_.nUserChannels[mode] = channels;
\r
7327 unsigned int value;
\r
7328 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7329 unsigned int deviceChannels = value;
\r
7330 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7331 snd_pcm_close( phandle );
\r
7332 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7333 errorText_ = errorStream_.str();
\r
7337 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7338 if ( result < 0 ) {
\r
7339 snd_pcm_close( phandle );
\r
7340 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7341 errorText_ = errorStream_.str();
\r
7344 deviceChannels = value;
\r
7345 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7346 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7348 // Set the device channels.
\r
7349 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7350 if ( result < 0 ) {
\r
7351 snd_pcm_close( phandle );
\r
7352 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7353 errorText_ = errorStream_.str();
\r
7357 // Set the buffer (or period) size.
\r
7359 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7360 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7361 if ( result < 0 ) {
\r
7362 snd_pcm_close( phandle );
\r
7363 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7364 errorText_ = errorStream_.str();
\r
7367 *bufferSize = periodSize;
\r
7369 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7370 unsigned int periods = 0;
\r
7371 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7372 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7373 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7374 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7375 if ( result < 0 ) {
\r
7376 snd_pcm_close( phandle );
\r
7377 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7378 errorText_ = errorStream_.str();
\r
7382 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7383 // MUST be the same in both directions!
\r
7384 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7385 snd_pcm_close( phandle );
\r
7386 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7387 errorText_ = errorStream_.str();
\r
7391 stream_.bufferSize = *bufferSize;
\r
7393 // Install the hardware configuration
\r
7394 result = snd_pcm_hw_params( phandle, hw_params );
\r
7395 if ( result < 0 ) {
\r
7396 snd_pcm_close( phandle );
\r
7397 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7398 errorText_ = errorStream_.str();
\r
7402 #if defined(__RTAUDIO_DEBUG__)
\r
7403 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7404 snd_pcm_hw_params_dump( hw_params, out );
\r
7407 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7408 snd_pcm_sw_params_t *sw_params = NULL;
\r
7409 snd_pcm_sw_params_alloca( &sw_params );
\r
7410 snd_pcm_sw_params_current( phandle, sw_params );
\r
7411 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7412 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7413 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7415 // The following two settings were suggested by Theo Veenker
\r
7416 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7417 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7419 // here are two options for a fix
\r
7420 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7421 snd_pcm_uframes_t val;
\r
7422 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7423 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7425 result = snd_pcm_sw_params( phandle, sw_params );
\r
7426 if ( result < 0 ) {
\r
7427 snd_pcm_close( phandle );
\r
7428 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7429 errorText_ = errorStream_.str();
\r
7433 #if defined(__RTAUDIO_DEBUG__)
\r
7434 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7435 snd_pcm_sw_params_dump( sw_params, out );
\r
7438 // Set flags for buffer conversion
\r
7439 stream_.doConvertBuffer[mode] = false;
\r
7440 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7441 stream_.doConvertBuffer[mode] = true;
\r
7442 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7443 stream_.doConvertBuffer[mode] = true;
\r
7444 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7445 stream_.nUserChannels[mode] > 1 )
\r
7446 stream_.doConvertBuffer[mode] = true;
\r
7448 // Allocate the ApiHandle if necessary and then save.
\r
7449 AlsaHandle *apiInfo = 0;
\r
7450 if ( stream_.apiHandle == 0 ) {
\r
7452 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7454 catch ( std::bad_alloc& ) {
\r
7455 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7459 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7460 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7464 stream_.apiHandle = (void *) apiInfo;
\r
7465 apiInfo->handles[0] = 0;
\r
7466 apiInfo->handles[1] = 0;
\r
7469 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7471 apiInfo->handles[mode] = phandle;
\r
7474 // Allocate necessary internal buffers.
\r
7475 unsigned long bufferBytes;
\r
7476 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7477 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7478 if ( stream_.userBuffer[mode] == NULL ) {
\r
7479 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7483 if ( stream_.doConvertBuffer[mode] ) {
\r
7485 bool makeBuffer = true;
\r
7486 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7487 if ( mode == INPUT ) {
\r
7488 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7489 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7490 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7494 if ( makeBuffer ) {
\r
7495 bufferBytes *= *bufferSize;
\r
7496 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7497 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7498 if ( stream_.deviceBuffer == NULL ) {
\r
7499 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7505 stream_.sampleRate = sampleRate;
\r
7506 stream_.nBuffers = periods;
\r
7507 stream_.device[mode] = device;
\r
7508 stream_.state = STREAM_STOPPED;
\r
7510 // Setup the buffer conversion information structure.
\r
7511 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7513 // Setup thread if necessary.
\r
7514 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7515 // We had already set up an output stream.
\r
7516 stream_.mode = DUPLEX;
\r
7517 // Link the streams if possible.
\r
7518 apiInfo->synchronized = false;
\r
7519 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7520 apiInfo->synchronized = true;
\r
7522 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7523 error( RtAudioError::WARNING );
\r
7527 stream_.mode = mode;
\r
7529 // Setup callback thread.
\r
7530 stream_.callbackInfo.object = (void *) this;
\r
7532 // Set the thread attributes for joinable and realtime scheduling
\r
7533 // priority (optional). The higher priority will only take affect
\r
7534 // if the program is run as root or suid. Note, under Linux
\r
7535 // processes with CAP_SYS_NICE privilege, a user can change
\r
7536 // scheduling policy and priority (thus need not be root). See
\r
7537 // POSIX "capabilities".
\r
7538 pthread_attr_t attr;
\r
7539 pthread_attr_init( &attr );
\r
7540 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7542 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7543 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7544 // We previously attempted to increase the audio callback priority
\r
7545 // to SCHED_RR here via the attributes. However, while no errors
\r
7546 // were reported in doing so, it did not work. So, now this is
\r
7547 // done in the alsaCallbackHandler function.
\r
7548 stream_.callbackInfo.doRealtime = true;
\r
7549 int priority = options->priority;
\r
7550 int min = sched_get_priority_min( SCHED_RR );
\r
7551 int max = sched_get_priority_max( SCHED_RR );
\r
7552 if ( priority < min ) priority = min;
\r
7553 else if ( priority > max ) priority = max;
\r
7554 stream_.callbackInfo.priority = priority;
\r
7558 stream_.callbackInfo.isRunning = true;
\r
7559 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7560 pthread_attr_destroy( &attr );
\r
7562 stream_.callbackInfo.isRunning = false;
\r
7563 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7572 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7573 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7574 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7576 stream_.apiHandle = 0;
\r
7579 if ( phandle) snd_pcm_close( phandle );
\r
7581 for ( int i=0; i<2; i++ ) {
\r
7582 if ( stream_.userBuffer[i] ) {
\r
7583 free( stream_.userBuffer[i] );
\r
7584 stream_.userBuffer[i] = 0;
\r
7588 if ( stream_.deviceBuffer ) {
\r
7589 free( stream_.deviceBuffer );
\r
7590 stream_.deviceBuffer = 0;
\r
7593 stream_.state = STREAM_CLOSED;
\r
7597 void RtApiAlsa :: closeStream()
\r
7599 if ( stream_.state == STREAM_CLOSED ) {
\r
7600 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7601 error( RtAudioError::WARNING );
\r
7605 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7606 stream_.callbackInfo.isRunning = false;
\r
7607 MUTEX_LOCK( &stream_.mutex );
\r
7608 if ( stream_.state == STREAM_STOPPED ) {
\r
7609 apiInfo->runnable = true;
\r
7610 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7612 MUTEX_UNLOCK( &stream_.mutex );
\r
7613 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7615 if ( stream_.state == STREAM_RUNNING ) {
\r
7616 stream_.state = STREAM_STOPPED;
\r
7617 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7618 snd_pcm_drop( apiInfo->handles[0] );
\r
7619 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7620 snd_pcm_drop( apiInfo->handles[1] );
\r
7624 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7625 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7626 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7628 stream_.apiHandle = 0;
\r
7631 for ( int i=0; i<2; i++ ) {
\r
7632 if ( stream_.userBuffer[i] ) {
\r
7633 free( stream_.userBuffer[i] );
\r
7634 stream_.userBuffer[i] = 0;
\r
7638 if ( stream_.deviceBuffer ) {
\r
7639 free( stream_.deviceBuffer );
\r
7640 stream_.deviceBuffer = 0;
\r
7643 stream_.mode = UNINITIALIZED;
\r
7644 stream_.state = STREAM_CLOSED;
\r
7647 void RtApiAlsa :: startStream()
\r
7649 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7652 if ( stream_.state == STREAM_RUNNING ) {
\r
7653 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7654 error( RtAudioError::WARNING );
\r
7658 MUTEX_LOCK( &stream_.mutex );
\r
7661 snd_pcm_state_t state;
\r
7662 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7663 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7664 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7665 state = snd_pcm_state( handle[0] );
\r
7666 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7667 result = snd_pcm_prepare( handle[0] );
\r
7668 if ( result < 0 ) {
\r
7669 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7670 errorText_ = errorStream_.str();
\r
7676 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7677 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7678 state = snd_pcm_state( handle[1] );
\r
7679 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7680 result = snd_pcm_prepare( handle[1] );
\r
7681 if ( result < 0 ) {
\r
7682 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7683 errorText_ = errorStream_.str();
\r
7689 stream_.state = STREAM_RUNNING;
\r
7692 apiInfo->runnable = true;
\r
7693 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7694 MUTEX_UNLOCK( &stream_.mutex );
\r
7696 if ( result >= 0 ) return;
\r
7697 error( RtAudioError::SYSTEM_ERROR );
\r
7700 void RtApiAlsa :: stopStream()
\r
7703 if ( stream_.state == STREAM_STOPPED ) {
\r
7704 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7705 error( RtAudioError::WARNING );
\r
7709 stream_.state = STREAM_STOPPED;
\r
7710 MUTEX_LOCK( &stream_.mutex );
\r
7713 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7714 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7715 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7716 if ( apiInfo->synchronized )
\r
7717 result = snd_pcm_drop( handle[0] );
\r
7719 result = snd_pcm_drain( handle[0] );
\r
7720 if ( result < 0 ) {
\r
7721 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7722 errorText_ = errorStream_.str();
\r
7727 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7728 result = snd_pcm_drop( handle[1] );
\r
7729 if ( result < 0 ) {
\r
7730 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7731 errorText_ = errorStream_.str();
\r
7737 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7738 MUTEX_UNLOCK( &stream_.mutex );
\r
7740 if ( result >= 0 ) return;
\r
7741 error( RtAudioError::SYSTEM_ERROR );
\r
7744 void RtApiAlsa :: abortStream()
\r
7747 if ( stream_.state == STREAM_STOPPED ) {
\r
7748 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7749 error( RtAudioError::WARNING );
\r
7753 stream_.state = STREAM_STOPPED;
\r
7754 MUTEX_LOCK( &stream_.mutex );
\r
7757 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7758 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7759 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7760 result = snd_pcm_drop( handle[0] );
\r
7761 if ( result < 0 ) {
\r
7762 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7763 errorText_ = errorStream_.str();
\r
7768 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7769 result = snd_pcm_drop( handle[1] );
\r
7770 if ( result < 0 ) {
\r
7771 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7772 errorText_ = errorStream_.str();
\r
7778 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7779 MUTEX_UNLOCK( &stream_.mutex );
\r
7781 if ( result >= 0 ) return;
\r
7782 error( RtAudioError::SYSTEM_ERROR );
\r
7785 void RtApiAlsa :: callbackEvent()
\r
7787 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7788 if ( stream_.state == STREAM_STOPPED ) {
\r
7789 MUTEX_LOCK( &stream_.mutex );
\r
7790 while ( !apiInfo->runnable )
\r
7791 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7793 if ( stream_.state != STREAM_RUNNING ) {
\r
7794 MUTEX_UNLOCK( &stream_.mutex );
\r
7797 MUTEX_UNLOCK( &stream_.mutex );
\r
7800 if ( stream_.state == STREAM_CLOSED ) {
\r
7801 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7802 error( RtAudioError::WARNING );
\r
7806 int doStopStream = 0;
\r
7807 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7808 double streamTime = getStreamTime();
\r
7809 RtAudioStreamStatus status = 0;
\r
7810 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7811 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7812 apiInfo->xrun[0] = false;
\r
7814 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7815 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7816 apiInfo->xrun[1] = false;
\r
7818 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7819 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7821 if ( doStopStream == 2 ) {
\r
7826 MUTEX_LOCK( &stream_.mutex );
\r
7828 // The state might change while waiting on a mutex.
\r
7829 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7834 snd_pcm_t **handle;
\r
7835 snd_pcm_sframes_t frames;
\r
7836 RtAudioFormat format;
\r
7837 handle = (snd_pcm_t **) apiInfo->handles;
\r
7839 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7841 // Setup parameters.
\r
7842 if ( stream_.doConvertBuffer[1] ) {
\r
7843 buffer = stream_.deviceBuffer;
\r
7844 channels = stream_.nDeviceChannels[1];
\r
7845 format = stream_.deviceFormat[1];
\r
7848 buffer = stream_.userBuffer[1];
\r
7849 channels = stream_.nUserChannels[1];
\r
7850 format = stream_.userFormat;
\r
7853 // Read samples from device in interleaved/non-interleaved format.
\r
7854 if ( stream_.deviceInterleaved[1] )
\r
7855 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7857 void *bufs[channels];
\r
7858 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7859 for ( int i=0; i<channels; i++ )
\r
7860 bufs[i] = (void *) (buffer + (i * offset));
\r
7861 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7864 if ( result < (int) stream_.bufferSize ) {
\r
7865 // Either an error or overrun occured.
\r
7866 if ( result == -EPIPE ) {
\r
7867 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7868 if ( state == SND_PCM_STATE_XRUN ) {
\r
7869 apiInfo->xrun[1] = true;
\r
7870 result = snd_pcm_prepare( handle[1] );
\r
7871 if ( result < 0 ) {
\r
7872 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7873 errorText_ = errorStream_.str();
\r
7877 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7878 errorText_ = errorStream_.str();
\r
7882 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7883 errorText_ = errorStream_.str();
\r
7885 error( RtAudioError::WARNING );
\r
7889 // Do byte swapping if necessary.
\r
7890 if ( stream_.doByteSwap[1] )
\r
7891 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7893 // Do buffer conversion if necessary.
\r
7894 if ( stream_.doConvertBuffer[1] )
\r
7895 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7897 // Check stream latency
\r
7898 result = snd_pcm_delay( handle[1], &frames );
\r
7899 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7904 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7906 // Setup parameters and do buffer conversion if necessary.
\r
7907 if ( stream_.doConvertBuffer[0] ) {
\r
7908 buffer = stream_.deviceBuffer;
\r
7909 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7910 channels = stream_.nDeviceChannels[0];
\r
7911 format = stream_.deviceFormat[0];
\r
7914 buffer = stream_.userBuffer[0];
\r
7915 channels = stream_.nUserChannels[0];
\r
7916 format = stream_.userFormat;
\r
7919 // Do byte swapping if necessary.
\r
7920 if ( stream_.doByteSwap[0] )
\r
7921 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7923 // Write samples to device in interleaved/non-interleaved format.
\r
7924 if ( stream_.deviceInterleaved[0] )
\r
7925 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7927 void *bufs[channels];
\r
7928 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7929 for ( int i=0; i<channels; i++ )
\r
7930 bufs[i] = (void *) (buffer + (i * offset));
\r
7931 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7934 if ( result < (int) stream_.bufferSize ) {
\r
7935 // Either an error or underrun occured.
\r
7936 if ( result == -EPIPE ) {
\r
7937 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7938 if ( state == SND_PCM_STATE_XRUN ) {
\r
7939 apiInfo->xrun[0] = true;
\r
7940 result = snd_pcm_prepare( handle[0] );
\r
7941 if ( result < 0 ) {
\r
7942 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7943 errorText_ = errorStream_.str();
\r
7947 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7948 errorText_ = errorStream_.str();
\r
7952 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7953 errorText_ = errorStream_.str();
\r
7955 error( RtAudioError::WARNING );
\r
7959 // Check stream latency
\r
7960 result = snd_pcm_delay( handle[0], &frames );
\r
7961 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7965 MUTEX_UNLOCK( &stream_.mutex );
\r
7967 RtApi::tickStreamTime();
\r
7968 if ( doStopStream == 1 ) this->stopStream();
\r
7971 static void *alsaCallbackHandler( void *ptr )
\r
7973 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7974 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7975 bool *isRunning = &info->isRunning;
\r
7977 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7978 if ( &info->doRealtime ) {
\r
7979 pthread_t tID = pthread_self(); // ID of this thread
\r
7980 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7981 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7985 while ( *isRunning == true ) {
\r
7986 pthread_testcancel();
\r
7987 object->callbackEvent();
\r
7990 pthread_exit( NULL );
\r
7993 //******************** End of __LINUX_ALSA__ *********************//
\r
7996 #if defined(__LINUX_PULSE__)
\r
7998 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
7999 // and Tristan Matthews.
\r
8001 #include <pulse/error.h>
\r
8002 #include <pulse/simple.h>
\r
8005 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8006 44100, 48000, 96000, 0};
\r
8008 struct rtaudio_pa_format_mapping_t {
\r
8009 RtAudioFormat rtaudio_format;
\r
8010 pa_sample_format_t pa_format;
\r
8013 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8014 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8015 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8016 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8017 {0, PA_SAMPLE_INVALID}};
\r
8019 struct PulseAudioHandle {
\r
8020 pa_simple *s_play;
\r
8023 pthread_cond_t runnable_cv;
\r
8025 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8028 RtApiPulse::~RtApiPulse()
\r
8030 if ( stream_.state != STREAM_CLOSED )
\r
8034 unsigned int RtApiPulse::getDeviceCount( void )
\r
8039 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8041 RtAudio::DeviceInfo info;
\r
8042 info.probed = true;
\r
8043 info.name = "PulseAudio";
\r
8044 info.outputChannels = 2;
\r
8045 info.inputChannels = 2;
\r
8046 info.duplexChannels = 2;
\r
8047 info.isDefaultOutput = true;
\r
8048 info.isDefaultInput = true;
\r
8050 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8051 info.sampleRates.push_back( *sr );
\r
8053 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8058 static void *pulseaudio_callback( void * user )
\r
8060 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8061 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8062 volatile bool *isRunning = &cbi->isRunning;
\r
8064 while ( *isRunning ) {
\r
8065 pthread_testcancel();
\r
8066 context->callbackEvent();
\r
8069 pthread_exit( NULL );
\r
8072 void RtApiPulse::closeStream( void )
\r
8074 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8076 stream_.callbackInfo.isRunning = false;
\r
8078 MUTEX_LOCK( &stream_.mutex );
\r
8079 if ( stream_.state == STREAM_STOPPED ) {
\r
8080 pah->runnable = true;
\r
8081 pthread_cond_signal( &pah->runnable_cv );
\r
8083 MUTEX_UNLOCK( &stream_.mutex );
\r
8085 pthread_join( pah->thread, 0 );
\r
8086 if ( pah->s_play ) {
\r
8087 pa_simple_flush( pah->s_play, NULL );
\r
8088 pa_simple_free( pah->s_play );
\r
8091 pa_simple_free( pah->s_rec );
\r
8093 pthread_cond_destroy( &pah->runnable_cv );
\r
8095 stream_.apiHandle = 0;
\r
8098 if ( stream_.userBuffer[0] ) {
\r
8099 free( stream_.userBuffer[0] );
\r
8100 stream_.userBuffer[0] = 0;
\r
8102 if ( stream_.userBuffer[1] ) {
\r
8103 free( stream_.userBuffer[1] );
\r
8104 stream_.userBuffer[1] = 0;
\r
8107 stream_.state = STREAM_CLOSED;
\r
8108 stream_.mode = UNINITIALIZED;
\r
8111 void RtApiPulse::callbackEvent( void )
\r
8113 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8115 if ( stream_.state == STREAM_STOPPED ) {
\r
8116 MUTEX_LOCK( &stream_.mutex );
\r
8117 while ( !pah->runnable )
\r
8118 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8120 if ( stream_.state != STREAM_RUNNING ) {
\r
8121 MUTEX_UNLOCK( &stream_.mutex );
\r
8124 MUTEX_UNLOCK( &stream_.mutex );
\r
8127 if ( stream_.state == STREAM_CLOSED ) {
\r
8128 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8129 "this shouldn't happen!";
\r
8130 error( RtAudioError::WARNING );
\r
8134 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8135 double streamTime = getStreamTime();
\r
8136 RtAudioStreamStatus status = 0;
\r
8137 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8138 stream_.bufferSize, streamTime, status,
\r
8139 stream_.callbackInfo.userData );
\r
8141 if ( doStopStream == 2 ) {
\r
8146 MUTEX_LOCK( &stream_.mutex );
\r
8147 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8148 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8150 if ( stream_.state != STREAM_RUNNING )
\r
8155 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8156 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8157 convertBuffer( stream_.deviceBuffer,
\r
8158 stream_.userBuffer[OUTPUT],
\r
8159 stream_.convertInfo[OUTPUT] );
\r
8160 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8161 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8163 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8164 formatBytes( stream_.userFormat );
\r
8166 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8167 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8168 pa_strerror( pa_error ) << ".";
\r
8169 errorText_ = errorStream_.str();
\r
8170 error( RtAudioError::WARNING );
\r
8174 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8175 if ( stream_.doConvertBuffer[INPUT] )
\r
8176 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8177 formatBytes( stream_.deviceFormat[INPUT] );
\r
8179 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8180 formatBytes( stream_.userFormat );
\r
8182 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8183 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8184 pa_strerror( pa_error ) << ".";
\r
8185 errorText_ = errorStream_.str();
\r
8186 error( RtAudioError::WARNING );
\r
8188 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8189 convertBuffer( stream_.userBuffer[INPUT],
\r
8190 stream_.deviceBuffer,
\r
8191 stream_.convertInfo[INPUT] );
\r
8196 MUTEX_UNLOCK( &stream_.mutex );
\r
8197 RtApi::tickStreamTime();
\r
8199 if ( doStopStream == 1 )
\r
8203 void RtApiPulse::startStream( void )
\r
8205 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8207 if ( stream_.state == STREAM_CLOSED ) {
\r
8208 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8209 error( RtAudioError::INVALID_USE );
\r
8212 if ( stream_.state == STREAM_RUNNING ) {
\r
8213 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8214 error( RtAudioError::WARNING );
\r
8218 MUTEX_LOCK( &stream_.mutex );
\r
8220 stream_.state = STREAM_RUNNING;
\r
8222 pah->runnable = true;
\r
8223 pthread_cond_signal( &pah->runnable_cv );
\r
8224 MUTEX_UNLOCK( &stream_.mutex );
\r
8227 void RtApiPulse::stopStream( void )
\r
8229 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8231 if ( stream_.state == STREAM_CLOSED ) {
\r
8232 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8233 error( RtAudioError::INVALID_USE );
\r
8236 if ( stream_.state == STREAM_STOPPED ) {
\r
8237 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8238 error( RtAudioError::WARNING );
\r
8242 stream_.state = STREAM_STOPPED;
\r
8243 MUTEX_LOCK( &stream_.mutex );
\r
8245 if ( pah && pah->s_play ) {
\r
8247 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8248 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8249 pa_strerror( pa_error ) << ".";
\r
8250 errorText_ = errorStream_.str();
\r
8251 MUTEX_UNLOCK( &stream_.mutex );
\r
8252 error( RtAudioError::SYSTEM_ERROR );
\r
8257 stream_.state = STREAM_STOPPED;
\r
8258 MUTEX_UNLOCK( &stream_.mutex );
\r
8261 void RtApiPulse::abortStream( void )
\r
8263 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8265 if ( stream_.state == STREAM_CLOSED ) {
\r
8266 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8267 error( RtAudioError::INVALID_USE );
\r
8270 if ( stream_.state == STREAM_STOPPED ) {
\r
8271 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8272 error( RtAudioError::WARNING );
\r
8276 stream_.state = STREAM_STOPPED;
\r
8277 MUTEX_LOCK( &stream_.mutex );
\r
8279 if ( pah && pah->s_play ) {
\r
8281 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8282 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8283 pa_strerror( pa_error ) << ".";
\r
8284 errorText_ = errorStream_.str();
\r
8285 MUTEX_UNLOCK( &stream_.mutex );
\r
8286 error( RtAudioError::SYSTEM_ERROR );
\r
8291 stream_.state = STREAM_STOPPED;
\r
8292 MUTEX_UNLOCK( &stream_.mutex );
\r
8295 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8296 unsigned int channels, unsigned int firstChannel,
\r
8297 unsigned int sampleRate, RtAudioFormat format,
\r
8298 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8300 PulseAudioHandle *pah = 0;
\r
8301 unsigned long bufferBytes = 0;
\r
8302 pa_sample_spec ss;
\r
8304 if ( device != 0 ) return false;
\r
8305 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8306 if ( channels != 1 && channels != 2 ) {
\r
8307 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8310 ss.channels = channels;
\r
8312 if ( firstChannel != 0 ) return false;
\r
8314 bool sr_found = false;
\r
8315 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8316 if ( sampleRate == *sr ) {
\r
8318 stream_.sampleRate = sampleRate;
\r
8319 ss.rate = sampleRate;
\r
8323 if ( !sr_found ) {
\r
8324 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8328 bool sf_found = 0;
\r
8329 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8330 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8331 if ( format == sf->rtaudio_format ) {
\r
8333 stream_.userFormat = sf->rtaudio_format;
\r
8334 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8335 ss.format = sf->pa_format;
\r
8339 if ( !sf_found ) { // Use internal data format conversion.
\r
8340 stream_.userFormat = format;
\r
8341 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8342 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8345 // Set other stream parameters.
\r
8346 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8347 else stream_.userInterleaved = true;
\r
8348 stream_.deviceInterleaved[mode] = true;
\r
8349 stream_.nBuffers = 1;
\r
8350 stream_.doByteSwap[mode] = false;
\r
8351 stream_.nUserChannels[mode] = channels;
\r
8352 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8353 stream_.channelOffset[mode] = 0;
\r
8354 std::string streamName = "RtAudio";
\r
8356 // Set flags for buffer conversion.
\r
8357 stream_.doConvertBuffer[mode] = false;
\r
8358 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8359 stream_.doConvertBuffer[mode] = true;
\r
8360 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8361 stream_.doConvertBuffer[mode] = true;
\r
8363 // Allocate necessary internal buffers.
\r
8364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8365 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8366 if ( stream_.userBuffer[mode] == NULL ) {
\r
8367 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8370 stream_.bufferSize = *bufferSize;
\r
8372 if ( stream_.doConvertBuffer[mode] ) {
\r
8374 bool makeBuffer = true;
\r
8375 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8376 if ( mode == INPUT ) {
\r
8377 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8378 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8379 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8383 if ( makeBuffer ) {
\r
8384 bufferBytes *= *bufferSize;
\r
8385 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8386 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8387 if ( stream_.deviceBuffer == NULL ) {
\r
8388 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8394 stream_.device[mode] = device;
\r
8396 // Setup the buffer conversion information structure.
\r
8397 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8399 if ( !stream_.apiHandle ) {
\r
8400 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8402 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8406 stream_.apiHandle = pah;
\r
8407 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8408 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8412 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8415 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8418 pa_buffer_attr buffer_attr;
\r
8419 buffer_attr.fragsize = bufferBytes;
\r
8420 buffer_attr.maxlength = -1;
\r
8422 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8423 if ( !pah->s_rec ) {
\r
8424 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8429 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8430 if ( !pah->s_play ) {
\r
8431 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8439 if ( stream_.mode == UNINITIALIZED )
\r
8440 stream_.mode = mode;
\r
8441 else if ( stream_.mode == mode )
\r
8444 stream_.mode = DUPLEX;
\r
8446 if ( !stream_.callbackInfo.isRunning ) {
\r
8447 stream_.callbackInfo.object = this;
\r
8448 stream_.callbackInfo.isRunning = true;
\r
8449 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8450 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8455 stream_.state = STREAM_STOPPED;
\r
8459 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8460 pthread_cond_destroy( &pah->runnable_cv );
\r
8462 stream_.apiHandle = 0;
\r
8465 for ( int i=0; i<2; i++ ) {
\r
8466 if ( stream_.userBuffer[i] ) {
\r
8467 free( stream_.userBuffer[i] );
\r
8468 stream_.userBuffer[i] = 0;
\r
8472 if ( stream_.deviceBuffer ) {
\r
8473 free( stream_.deviceBuffer );
\r
8474 stream_.deviceBuffer = 0;
\r
8480 //******************** End of __LINUX_PULSE__ *********************//
\r
8483 #if defined(__LINUX_OSS__)
\r
8485 #include <unistd.h>
\r
8486 #include <sys/ioctl.h>
\r
8487 #include <unistd.h>
\r
8488 #include <fcntl.h>
\r
8489 #include <sys/soundcard.h>
\r
8490 #include <errno.h>
\r
8493 static void *ossCallbackHandler(void * ptr);
\r
8495 // A structure to hold various information related to the OSS API
\r
8496 // implementation.
\r
8497 struct OssHandle {
\r
8498 int id[2]; // device ids
\r
8501 pthread_cond_t runnable;
\r
8504 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8507 RtApiOss :: RtApiOss()
\r
8509 // Nothing to do here.
\r
8512 RtApiOss :: ~RtApiOss()
\r
8514 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8517 unsigned int RtApiOss :: getDeviceCount( void )
\r
8519 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8520 if ( mixerfd == -1 ) {
\r
8521 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8522 error( RtAudioError::WARNING );
\r
8526 oss_sysinfo sysinfo;
\r
8527 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8529 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8530 error( RtAudioError::WARNING );
\r
8535 return sysinfo.numaudios;
\r
8538 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8540 RtAudio::DeviceInfo info;
\r
8541 info.probed = false;
\r
8543 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8544 if ( mixerfd == -1 ) {
\r
8545 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8546 error( RtAudioError::WARNING );
\r
8550 oss_sysinfo sysinfo;
\r
8551 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8552 if ( result == -1 ) {
\r
8554 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8555 error( RtAudioError::WARNING );
\r
8559 unsigned nDevices = sysinfo.numaudios;
\r
8560 if ( nDevices == 0 ) {
\r
8562 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8563 error( RtAudioError::INVALID_USE );
\r
8567 if ( device >= nDevices ) {
\r
8569 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8570 error( RtAudioError::INVALID_USE );
\r
8574 oss_audioinfo ainfo;
\r
8575 ainfo.dev = device;
\r
8576 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8578 if ( result == -1 ) {
\r
8579 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8580 errorText_ = errorStream_.str();
\r
8581 error( RtAudioError::WARNING );
\r
8586 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8587 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8588 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8589 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8590 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8593 // Probe data formats ... do for input
\r
8594 unsigned long mask = ainfo.iformats;
\r
8595 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8596 info.nativeFormats |= RTAUDIO_SINT16;
\r
8597 if ( mask & AFMT_S8 )
\r
8598 info.nativeFormats |= RTAUDIO_SINT8;
\r
8599 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8600 info.nativeFormats |= RTAUDIO_SINT32;
\r
8601 if ( mask & AFMT_FLOAT )
\r
8602 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8603 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8604 info.nativeFormats |= RTAUDIO_SINT24;
\r
8606 // Check that we have at least one supported format
\r
8607 if ( info.nativeFormats == 0 ) {
\r
8608 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8609 errorText_ = errorStream_.str();
\r
8610 error( RtAudioError::WARNING );
\r
8614 // Probe the supported sample rates.
\r
8615 info.sampleRates.clear();
\r
8616 if ( ainfo.nrates ) {
\r
8617 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8618 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8619 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8620 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8627 // Check min and max rate values;
\r
8628 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8629 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8630 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8634 if ( info.sampleRates.size() == 0 ) {
\r
8635 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8636 errorText_ = errorStream_.str();
\r
8637 error( RtAudioError::WARNING );
\r
8640 info.probed = true;
\r
8641 info.name = ainfo.name;
\r
8648 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8649 unsigned int firstChannel, unsigned int sampleRate,
\r
8650 RtAudioFormat format, unsigned int *bufferSize,
\r
8651 RtAudio::StreamOptions *options )
\r
8653 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8654 if ( mixerfd == -1 ) {
\r
8655 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8659 oss_sysinfo sysinfo;
\r
8660 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8661 if ( result == -1 ) {
\r
8663 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8667 unsigned nDevices = sysinfo.numaudios;
\r
8668 if ( nDevices == 0 ) {
\r
8669 // This should not happen because a check is made before this function is called.
\r
8671 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8675 if ( device >= nDevices ) {
\r
8676 // This should not happen because a check is made before this function is called.
\r
8678 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8682 oss_audioinfo ainfo;
\r
8683 ainfo.dev = device;
\r
8684 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8686 if ( result == -1 ) {
\r
8687 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8688 errorText_ = errorStream_.str();
\r
8692 // Check if device supports input or output
\r
8693 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8694 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8695 if ( mode == OUTPUT )
\r
8696 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8698 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8699 errorText_ = errorStream_.str();
\r
8704 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8705 if ( mode == OUTPUT )
\r
8706 flags |= O_WRONLY;
\r
8707 else { // mode == INPUT
\r
8708 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8709 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8710 close( handle->id[0] );
\r
8711 handle->id[0] = 0;
\r
8712 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8713 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8714 errorText_ = errorStream_.str();
\r
8717 // Check that the number previously set channels is the same.
\r
8718 if ( stream_.nUserChannels[0] != channels ) {
\r
8719 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8720 errorText_ = errorStream_.str();
\r
8726 flags |= O_RDONLY;
\r
8729 // Set exclusive access if specified.
\r
8730 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8732 // Try to open the device.
\r
8734 fd = open( ainfo.devnode, flags, 0 );
\r
8736 if ( errno == EBUSY )
\r
8737 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8739 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8740 errorText_ = errorStream_.str();
\r
8744 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8746 if ( flags | O_RDWR ) {
\r
8747 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8748 if ( result == -1) {
\r
8749 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8750 errorText_ = errorStream_.str();
\r
8756 // Check the device channel support.
\r
8757 stream_.nUserChannels[mode] = channels;
\r
8758 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8760 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8761 errorText_ = errorStream_.str();
\r
8765 // Set the number of channels.
\r
8766 int deviceChannels = channels + firstChannel;
\r
8767 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8768 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8770 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8771 errorText_ = errorStream_.str();
\r
8774 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8776 // Get the data format mask
\r
8778 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8779 if ( result == -1 ) {
\r
8781 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8782 errorText_ = errorStream_.str();
\r
8786 // Determine how to set the device format.
\r
8787 stream_.userFormat = format;
\r
8788 int deviceFormat = -1;
\r
8789 stream_.doByteSwap[mode] = false;
\r
8790 if ( format == RTAUDIO_SINT8 ) {
\r
8791 if ( mask & AFMT_S8 ) {
\r
8792 deviceFormat = AFMT_S8;
\r
8793 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8796 else if ( format == RTAUDIO_SINT16 ) {
\r
8797 if ( mask & AFMT_S16_NE ) {
\r
8798 deviceFormat = AFMT_S16_NE;
\r
8799 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8801 else if ( mask & AFMT_S16_OE ) {
\r
8802 deviceFormat = AFMT_S16_OE;
\r
8803 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8804 stream_.doByteSwap[mode] = true;
\r
8807 else if ( format == RTAUDIO_SINT24 ) {
\r
8808 if ( mask & AFMT_S24_NE ) {
\r
8809 deviceFormat = AFMT_S24_NE;
\r
8810 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8812 else if ( mask & AFMT_S24_OE ) {
\r
8813 deviceFormat = AFMT_S24_OE;
\r
8814 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8815 stream_.doByteSwap[mode] = true;
\r
8818 else if ( format == RTAUDIO_SINT32 ) {
\r
8819 if ( mask & AFMT_S32_NE ) {
\r
8820 deviceFormat = AFMT_S32_NE;
\r
8821 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8823 else if ( mask & AFMT_S32_OE ) {
\r
8824 deviceFormat = AFMT_S32_OE;
\r
8825 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8826 stream_.doByteSwap[mode] = true;
\r
8830 if ( deviceFormat == -1 ) {
\r
8831 // The user requested format is not natively supported by the device.
\r
8832 if ( mask & AFMT_S16_NE ) {
\r
8833 deviceFormat = AFMT_S16_NE;
\r
8834 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8836 else if ( mask & AFMT_S32_NE ) {
\r
8837 deviceFormat = AFMT_S32_NE;
\r
8838 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8840 else if ( mask & AFMT_S24_NE ) {
\r
8841 deviceFormat = AFMT_S24_NE;
\r
8842 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8844 else if ( mask & AFMT_S16_OE ) {
\r
8845 deviceFormat = AFMT_S16_OE;
\r
8846 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8847 stream_.doByteSwap[mode] = true;
\r
8849 else if ( mask & AFMT_S32_OE ) {
\r
8850 deviceFormat = AFMT_S32_OE;
\r
8851 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8852 stream_.doByteSwap[mode] = true;
\r
8854 else if ( mask & AFMT_S24_OE ) {
\r
8855 deviceFormat = AFMT_S24_OE;
\r
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8857 stream_.doByteSwap[mode] = true;
\r
8859 else if ( mask & AFMT_S8) {
\r
8860 deviceFormat = AFMT_S8;
\r
8861 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8865 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8866 // This really shouldn't happen ...
\r
8868 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8869 errorText_ = errorStream_.str();
\r
8873 // Set the data format.
\r
8874 int temp = deviceFormat;
\r
8875 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8876 if ( result == -1 || deviceFormat != temp ) {
\r
8878 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8879 errorText_ = errorStream_.str();
\r
8883 // Attempt to set the buffer size. According to OSS, the minimum
\r
8884 // number of buffers is two. The supposed minimum buffer size is 16
\r
8885 // bytes, so that will be our lower bound. The argument to this
\r
8886 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8887 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8888 // We'll check the actual value used near the end of the setup
\r
8890 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8891 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8893 if ( options ) buffers = options->numberOfBuffers;
\r
8894 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8895 if ( buffers < 2 ) buffers = 3;
\r
8896 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8897 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8898 if ( result == -1 ) {
\r
8900 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8901 errorText_ = errorStream_.str();
\r
8904 stream_.nBuffers = buffers;
\r
8906 // Save buffer size (in sample frames).
\r
8907 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8908 stream_.bufferSize = *bufferSize;
\r
8910 // Set the sample rate.
\r
8911 int srate = sampleRate;
\r
8912 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8913 if ( result == -1 ) {
\r
8915 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8916 errorText_ = errorStream_.str();
\r
8920 // Verify the sample rate setup worked.
\r
8921 if ( abs( srate - sampleRate ) > 100 ) {
\r
8923 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8924 errorText_ = errorStream_.str();
\r
8927 stream_.sampleRate = sampleRate;
\r
8929 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8930 // We're doing duplex setup here.
\r
8931 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8932 stream_.nDeviceChannels[0] = deviceChannels;
\r
8935 // Set interleaving parameters.
\r
8936 stream_.userInterleaved = true;
\r
8937 stream_.deviceInterleaved[mode] = true;
\r
8938 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8939 stream_.userInterleaved = false;
\r
8941 // Set flags for buffer conversion
\r
8942 stream_.doConvertBuffer[mode] = false;
\r
8943 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8944 stream_.doConvertBuffer[mode] = true;
\r
8945 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8946 stream_.doConvertBuffer[mode] = true;
\r
8947 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8948 stream_.nUserChannels[mode] > 1 )
\r
8949 stream_.doConvertBuffer[mode] = true;
\r
8951 // Allocate the stream handles if necessary and then save.
\r
8952 if ( stream_.apiHandle == 0 ) {
\r
8954 handle = new OssHandle;
\r
8956 catch ( std::bad_alloc& ) {
\r
8957 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8961 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8962 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8966 stream_.apiHandle = (void *) handle;
\r
8969 handle = (OssHandle *) stream_.apiHandle;
\r
8971 handle->id[mode] = fd;
\r
8973 // Allocate necessary internal buffers.
\r
8974 unsigned long bufferBytes;
\r
8975 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8976 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8977 if ( stream_.userBuffer[mode] == NULL ) {
\r
8978 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8982 if ( stream_.doConvertBuffer[mode] ) {
\r
8984 bool makeBuffer = true;
\r
8985 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8986 if ( mode == INPUT ) {
\r
8987 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8988 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8989 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8993 if ( makeBuffer ) {
\r
8994 bufferBytes *= *bufferSize;
\r
8995 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8996 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8997 if ( stream_.deviceBuffer == NULL ) {
\r
8998 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9004 stream_.device[mode] = device;
\r
9005 stream_.state = STREAM_STOPPED;
\r
9007 // Setup the buffer conversion information structure.
\r
9008 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9010 // Setup thread if necessary.
\r
9011 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9012 // We had already set up an output stream.
\r
9013 stream_.mode = DUPLEX;
\r
9014 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9017 stream_.mode = mode;
\r
9019 // Setup callback thread.
\r
9020 stream_.callbackInfo.object = (void *) this;
\r
9022 // Set the thread attributes for joinable and realtime scheduling
\r
9023 // priority. The higher priority will only take affect if the
\r
9024 // program is run as root or suid.
\r
9025 pthread_attr_t attr;
\r
9026 pthread_attr_init( &attr );
\r
9027 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9028 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9029 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9030 struct sched_param param;
\r
9031 int priority = options->priority;
\r
9032 int min = sched_get_priority_min( SCHED_RR );
\r
9033 int max = sched_get_priority_max( SCHED_RR );
\r
9034 if ( priority < min ) priority = min;
\r
9035 else if ( priority > max ) priority = max;
\r
9036 param.sched_priority = priority;
\r
9037 pthread_attr_setschedparam( &attr, ¶m );
\r
9038 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9041 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9043 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9046 stream_.callbackInfo.isRunning = true;
\r
9047 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9048 pthread_attr_destroy( &attr );
\r
9050 stream_.callbackInfo.isRunning = false;
\r
9051 errorText_ = "RtApiOss::error creating callback thread!";
\r
9060 pthread_cond_destroy( &handle->runnable );
\r
9061 if ( handle->id[0] ) close( handle->id[0] );
\r
9062 if ( handle->id[1] ) close( handle->id[1] );
\r
9064 stream_.apiHandle = 0;
\r
9067 for ( int i=0; i<2; i++ ) {
\r
9068 if ( stream_.userBuffer[i] ) {
\r
9069 free( stream_.userBuffer[i] );
\r
9070 stream_.userBuffer[i] = 0;
\r
9074 if ( stream_.deviceBuffer ) {
\r
9075 free( stream_.deviceBuffer );
\r
9076 stream_.deviceBuffer = 0;
\r
9082 void RtApiOss :: closeStream()
\r
9084 if ( stream_.state == STREAM_CLOSED ) {
\r
9085 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9086 error( RtAudioError::WARNING );
\r
9090 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9091 stream_.callbackInfo.isRunning = false;
\r
9092 MUTEX_LOCK( &stream_.mutex );
\r
9093 if ( stream_.state == STREAM_STOPPED )
\r
9094 pthread_cond_signal( &handle->runnable );
\r
9095 MUTEX_UNLOCK( &stream_.mutex );
\r
9096 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9098 if ( stream_.state == STREAM_RUNNING ) {
\r
9099 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9100 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9102 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9103 stream_.state = STREAM_STOPPED;
\r
9107 pthread_cond_destroy( &handle->runnable );
\r
9108 if ( handle->id[0] ) close( handle->id[0] );
\r
9109 if ( handle->id[1] ) close( handle->id[1] );
\r
9111 stream_.apiHandle = 0;
\r
9114 for ( int i=0; i<2; i++ ) {
\r
9115 if ( stream_.userBuffer[i] ) {
\r
9116 free( stream_.userBuffer[i] );
\r
9117 stream_.userBuffer[i] = 0;
\r
9121 if ( stream_.deviceBuffer ) {
\r
9122 free( stream_.deviceBuffer );
\r
9123 stream_.deviceBuffer = 0;
\r
9126 stream_.mode = UNINITIALIZED;
\r
9127 stream_.state = STREAM_CLOSED;
\r
9130 void RtApiOss :: startStream()
\r
9133 if ( stream_.state == STREAM_RUNNING ) {
\r
9134 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9135 error( RtAudioError::WARNING );
\r
9139 MUTEX_LOCK( &stream_.mutex );
\r
9141 stream_.state = STREAM_RUNNING;
\r
9143 // No need to do anything else here ... OSS automatically starts
\r
9144 // when fed samples.
\r
9146 MUTEX_UNLOCK( &stream_.mutex );
\r
9148 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9149 pthread_cond_signal( &handle->runnable );
\r
9152 void RtApiOss :: stopStream()
\r
9155 if ( stream_.state == STREAM_STOPPED ) {
\r
9156 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9157 error( RtAudioError::WARNING );
\r
9161 MUTEX_LOCK( &stream_.mutex );
\r
9163 // The state might change while waiting on a mutex.
\r
9164 if ( stream_.state == STREAM_STOPPED ) {
\r
9165 MUTEX_UNLOCK( &stream_.mutex );
\r
9170 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9171 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9173 // Flush the output with zeros a few times.
\r
9176 RtAudioFormat format;
\r
9178 if ( stream_.doConvertBuffer[0] ) {
\r
9179 buffer = stream_.deviceBuffer;
\r
9180 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9181 format = stream_.deviceFormat[0];
\r
9184 buffer = stream_.userBuffer[0];
\r
9185 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9186 format = stream_.userFormat;
\r
9189 memset( buffer, 0, samples * formatBytes(format) );
\r
9190 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9191 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9192 if ( result == -1 ) {
\r
9193 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9194 error( RtAudioError::WARNING );
\r
9198 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9199 if ( result == -1 ) {
\r
9200 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9201 errorText_ = errorStream_.str();
\r
9204 handle->triggered = false;
\r
9207 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9208 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9209 if ( result == -1 ) {
\r
9210 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9211 errorText_ = errorStream_.str();
\r
9217 stream_.state = STREAM_STOPPED;
\r
9218 MUTEX_UNLOCK( &stream_.mutex );
\r
9220 if ( result != -1 ) return;
\r
9221 error( RtAudioError::SYSTEM_ERROR );
\r
9224 void RtApiOss :: abortStream()
\r
9227 if ( stream_.state == STREAM_STOPPED ) {
\r
9228 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9229 error( RtAudioError::WARNING );
\r
9233 MUTEX_LOCK( &stream_.mutex );
\r
9235 // The state might change while waiting on a mutex.
\r
9236 if ( stream_.state == STREAM_STOPPED ) {
\r
9237 MUTEX_UNLOCK( &stream_.mutex );
\r
9242 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9243 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9244 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9245 if ( result == -1 ) {
\r
9246 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9247 errorText_ = errorStream_.str();
\r
9250 handle->triggered = false;
\r
9253 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9254 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9255 if ( result == -1 ) {
\r
9256 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9257 errorText_ = errorStream_.str();
\r
9263 stream_.state = STREAM_STOPPED;
\r
9264 MUTEX_UNLOCK( &stream_.mutex );
\r
9266 if ( result != -1 ) return;
\r
9267 error( RtAudioError::SYSTEM_ERROR );
\r
9270 void RtApiOss :: callbackEvent()
\r
9272 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9273 if ( stream_.state == STREAM_STOPPED ) {
\r
9274 MUTEX_LOCK( &stream_.mutex );
\r
9275 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9276 if ( stream_.state != STREAM_RUNNING ) {
\r
9277 MUTEX_UNLOCK( &stream_.mutex );
\r
9280 MUTEX_UNLOCK( &stream_.mutex );
\r
9283 if ( stream_.state == STREAM_CLOSED ) {
\r
9284 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9285 error( RtAudioError::WARNING );
\r
9289 // Invoke user callback to get fresh output data.
\r
9290 int doStopStream = 0;
\r
9291 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9292 double streamTime = getStreamTime();
\r
9293 RtAudioStreamStatus status = 0;
\r
9294 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9295 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9296 handle->xrun[0] = false;
\r
9298 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9299 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9300 handle->xrun[1] = false;
\r
9302 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9303 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9304 if ( doStopStream == 2 ) {
\r
9305 this->abortStream();
\r
9309 MUTEX_LOCK( &stream_.mutex );
\r
9311 // The state might change while waiting on a mutex.
\r
9312 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9317 RtAudioFormat format;
\r
9319 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9321 // Setup parameters and do buffer conversion if necessary.
\r
9322 if ( stream_.doConvertBuffer[0] ) {
\r
9323 buffer = stream_.deviceBuffer;
\r
9324 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9325 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9326 format = stream_.deviceFormat[0];
\r
9329 buffer = stream_.userBuffer[0];
\r
9330 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9331 format = stream_.userFormat;
\r
9334 // Do byte swapping if necessary.
\r
9335 if ( stream_.doByteSwap[0] )
\r
9336 byteSwapBuffer( buffer, samples, format );
\r
9338 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9340 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9341 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9342 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9343 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9344 handle->triggered = true;
\r
9347 // Write samples to device.
\r
9348 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9350 if ( result == -1 ) {
\r
9351 // We'll assume this is an underrun, though there isn't a
\r
9352 // specific means for determining that.
\r
9353 handle->xrun[0] = true;
\r
9354 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9355 error( RtAudioError::WARNING );
\r
9356 // Continue on to input section.
\r
9360 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9362 // Setup parameters.
\r
9363 if ( stream_.doConvertBuffer[1] ) {
\r
9364 buffer = stream_.deviceBuffer;
\r
9365 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9366 format = stream_.deviceFormat[1];
\r
9369 buffer = stream_.userBuffer[1];
\r
9370 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9371 format = stream_.userFormat;
\r
9374 // Read samples from device.
\r
9375 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9377 if ( result == -1 ) {
\r
9378 // We'll assume this is an overrun, though there isn't a
\r
9379 // specific means for determining that.
\r
9380 handle->xrun[1] = true;
\r
9381 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9382 error( RtAudioError::WARNING );
\r
9386 // Do byte swapping if necessary.
\r
9387 if ( stream_.doByteSwap[1] )
\r
9388 byteSwapBuffer( buffer, samples, format );
\r
9390 // Do buffer conversion if necessary.
\r
9391 if ( stream_.doConvertBuffer[1] )
\r
9392 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9396 MUTEX_UNLOCK( &stream_.mutex );
\r
9398 RtApi::tickStreamTime();
\r
9399 if ( doStopStream == 1 ) this->stopStream();
\r
9402 static void *ossCallbackHandler( void *ptr )
\r
9404 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9405 RtApiOss *object = (RtApiOss *) info->object;
\r
9406 bool *isRunning = &info->isRunning;
\r
9408 while ( *isRunning == true ) {
\r
9409 pthread_testcancel();
\r
9410 object->callbackEvent();
\r
9413 pthread_exit( NULL );
\r
9416 //******************** End of __LINUX_OSS__ *********************//
\r
9420 // *************************************************** //
\r
9422 // Protected common (OS-independent) RtAudio methods.
\r
9424 // *************************************************** //
\r
9426 // This method can be modified to control the behavior of error
\r
9427 // message printing.
\r
9428 void RtApi :: error( RtAudioError::Type type )
\r
9430 errorStream_.str(""); // clear the ostringstream
\r
9432 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9433 if ( errorCallback ) {
\r
9434 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9436 if ( firstErrorOccurred_ )
\r
9439 firstErrorOccurred_ = true;
\r
9440 const std::string errorMessage = errorText_;
\r
9442 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9443 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9447 errorCallback( type, errorMessage );
\r
9448 firstErrorOccurred_ = false;
\r
9452 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9453 std::cerr << '\n' << errorText_ << "\n\n";
\r
9454 else if ( type != RtAudioError::WARNING )
\r
9455 throw( RtAudioError( errorText_, type ) );
\r
9458 void RtApi :: verifyStream()
\r
9460 if ( stream_.state == STREAM_CLOSED ) {
\r
9461 errorText_ = "RtApi:: a stream is not open!";
\r
9462 error( RtAudioError::INVALID_USE );
\r
9466 void RtApi :: clearStreamInfo()
\r
9468 stream_.mode = UNINITIALIZED;
\r
9469 stream_.state = STREAM_CLOSED;
\r
9470 stream_.sampleRate = 0;
\r
9471 stream_.bufferSize = 0;
\r
9472 stream_.nBuffers = 0;
\r
9473 stream_.userFormat = 0;
\r
9474 stream_.userInterleaved = true;
\r
9475 stream_.streamTime = 0.0;
\r
9476 stream_.apiHandle = 0;
\r
9477 stream_.deviceBuffer = 0;
\r
9478 stream_.callbackInfo.callback = 0;
\r
9479 stream_.callbackInfo.userData = 0;
\r
9480 stream_.callbackInfo.isRunning = false;
\r
9481 stream_.callbackInfo.errorCallback = 0;
\r
9482 for ( int i=0; i<2; i++ ) {
\r
9483 stream_.device[i] = 11111;
\r
9484 stream_.doConvertBuffer[i] = false;
\r
9485 stream_.deviceInterleaved[i] = true;
\r
9486 stream_.doByteSwap[i] = false;
\r
9487 stream_.nUserChannels[i] = 0;
\r
9488 stream_.nDeviceChannels[i] = 0;
\r
9489 stream_.channelOffset[i] = 0;
\r
9490 stream_.deviceFormat[i] = 0;
\r
9491 stream_.latency[i] = 0;
\r
9492 stream_.userBuffer[i] = 0;
\r
9493 stream_.convertInfo[i].channels = 0;
\r
9494 stream_.convertInfo[i].inJump = 0;
\r
9495 stream_.convertInfo[i].outJump = 0;
\r
9496 stream_.convertInfo[i].inFormat = 0;
\r
9497 stream_.convertInfo[i].outFormat = 0;
\r
9498 stream_.convertInfo[i].inOffset.clear();
\r
9499 stream_.convertInfo[i].outOffset.clear();
\r
9503 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9505 if ( format == RTAUDIO_SINT16 )
\r
9507 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9509 else if ( format == RTAUDIO_FLOAT64 )
\r
9511 else if ( format == RTAUDIO_SINT24 )
\r
9513 else if ( format == RTAUDIO_SINT8 )
\r
9516 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9517 error( RtAudioError::WARNING );
\r
9522 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9524 if ( mode == INPUT ) { // convert device to user buffer
\r
9525 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9526 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9527 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9528 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9530 else { // convert user to device buffer
\r
9531 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9532 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9533 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9534 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9537 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9538 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9540 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9542 // Set up the interleave/deinterleave offsets.
\r
9543 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9544 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9545 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9546 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9547 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9548 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9549 stream_.convertInfo[mode].inJump = 1;
\r
9553 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9554 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9555 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9556 stream_.convertInfo[mode].outJump = 1;
\r
9560 else { // no (de)interleaving
\r
9561 if ( stream_.userInterleaved ) {
\r
9562 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9563 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9564 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9568 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9569 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9570 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9571 stream_.convertInfo[mode].inJump = 1;
\r
9572 stream_.convertInfo[mode].outJump = 1;
\r
9577 // Add channel offset.
\r
9578 if ( firstChannel > 0 ) {
\r
9579 if ( stream_.deviceInterleaved[mode] ) {
\r
9580 if ( mode == OUTPUT ) {
\r
9581 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9582 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9585 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9586 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9590 if ( mode == OUTPUT ) {
\r
9591 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9592 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9595 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9596 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9602 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9604 // This function does format conversion, input/output channel compensation, and
\r
9605 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9606 // the lower three bytes of a 32-bit integer.
\r
9608 // Clear our device buffer when in/out duplex device channels are different
\r
9609 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9610 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9611 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9614 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9616 Float64 *out = (Float64 *)outBuffer;
\r
9618 if (info.inFormat == RTAUDIO_SINT8) {
\r
9619 signed char *in = (signed char *)inBuffer;
\r
9620 scale = 1.0 / 127.5;
\r
9621 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9622 for (j=0; j<info.channels; j++) {
\r
9623 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9624 out[info.outOffset[j]] += 0.5;
\r
9625 out[info.outOffset[j]] *= scale;
\r
9627 in += info.inJump;
\r
9628 out += info.outJump;
\r
9631 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9632 Int16 *in = (Int16 *)inBuffer;
\r
9633 scale = 1.0 / 32767.5;
\r
9634 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9635 for (j=0; j<info.channels; j++) {
\r
9636 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9637 out[info.outOffset[j]] += 0.5;
\r
9638 out[info.outOffset[j]] *= scale;
\r
9640 in += info.inJump;
\r
9641 out += info.outJump;
\r
9644 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9645 Int24 *in = (Int24 *)inBuffer;
\r
9646 scale = 1.0 / 8388607.5;
\r
9647 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9648 for (j=0; j<info.channels; j++) {
\r
9649 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9650 out[info.outOffset[j]] += 0.5;
\r
9651 out[info.outOffset[j]] *= scale;
\r
9653 in += info.inJump;
\r
9654 out += info.outJump;
\r
9657 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9658 Int32 *in = (Int32 *)inBuffer;
\r
9659 scale = 1.0 / 2147483647.5;
\r
9660 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9661 for (j=0; j<info.channels; j++) {
\r
9662 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9663 out[info.outOffset[j]] += 0.5;
\r
9664 out[info.outOffset[j]] *= scale;
\r
9666 in += info.inJump;
\r
9667 out += info.outJump;
\r
9670 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9671 Float32 *in = (Float32 *)inBuffer;
\r
9672 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9673 for (j=0; j<info.channels; j++) {
\r
9674 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9676 in += info.inJump;
\r
9677 out += info.outJump;
\r
9680 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9681 // Channel compensation and/or (de)interleaving only.
\r
9682 Float64 *in = (Float64 *)inBuffer;
\r
9683 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9684 for (j=0; j<info.channels; j++) {
\r
9685 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9687 in += info.inJump;
\r
9688 out += info.outJump;
\r
9692 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9694 Float32 *out = (Float32 *)outBuffer;
\r
9696 if (info.inFormat == RTAUDIO_SINT8) {
\r
9697 signed char *in = (signed char *)inBuffer;
\r
9698 scale = (Float32) ( 1.0 / 127.5 );
\r
9699 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9700 for (j=0; j<info.channels; j++) {
\r
9701 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9702 out[info.outOffset[j]] += 0.5;
\r
9703 out[info.outOffset[j]] *= scale;
\r
9705 in += info.inJump;
\r
9706 out += info.outJump;
\r
9709 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9710 Int16 *in = (Int16 *)inBuffer;
\r
9711 scale = (Float32) ( 1.0 / 32767.5 );
\r
9712 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9713 for (j=0; j<info.channels; j++) {
\r
9714 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9715 out[info.outOffset[j]] += 0.5;
\r
9716 out[info.outOffset[j]] *= scale;
\r
9718 in += info.inJump;
\r
9719 out += info.outJump;
\r
9722 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9723 Int24 *in = (Int24 *)inBuffer;
\r
9724 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9725 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9726 for (j=0; j<info.channels; j++) {
\r
9727 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9728 out[info.outOffset[j]] += 0.5;
\r
9729 out[info.outOffset[j]] *= scale;
\r
9731 in += info.inJump;
\r
9732 out += info.outJump;
\r
9735 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9736 Int32 *in = (Int32 *)inBuffer;
\r
9737 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9738 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9739 for (j=0; j<info.channels; j++) {
\r
9740 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9741 out[info.outOffset[j]] += 0.5;
\r
9742 out[info.outOffset[j]] *= scale;
\r
9744 in += info.inJump;
\r
9745 out += info.outJump;
\r
9748 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9749 // Channel compensation and/or (de)interleaving only.
\r
9750 Float32 *in = (Float32 *)inBuffer;
\r
9751 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9752 for (j=0; j<info.channels; j++) {
\r
9753 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9755 in += info.inJump;
\r
9756 out += info.outJump;
\r
9759 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9760 Float64 *in = (Float64 *)inBuffer;
\r
9761 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9762 for (j=0; j<info.channels; j++) {
\r
9763 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9765 in += info.inJump;
\r
9766 out += info.outJump;
\r
9770 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9771 Int32 *out = (Int32 *)outBuffer;
\r
9772 if (info.inFormat == RTAUDIO_SINT8) {
\r
9773 signed char *in = (signed char *)inBuffer;
\r
9774 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9775 for (j=0; j<info.channels; j++) {
\r
9776 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9777 out[info.outOffset[j]] <<= 24;
\r
9779 in += info.inJump;
\r
9780 out += info.outJump;
\r
9783 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9784 Int16 *in = (Int16 *)inBuffer;
\r
9785 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9786 for (j=0; j<info.channels; j++) {
\r
9787 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9788 out[info.outOffset[j]] <<= 16;
\r
9790 in += info.inJump;
\r
9791 out += info.outJump;
\r
9794 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9795 Int24 *in = (Int24 *)inBuffer;
\r
9796 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9797 for (j=0; j<info.channels; j++) {
\r
9798 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9799 out[info.outOffset[j]] <<= 8;
\r
9801 in += info.inJump;
\r
9802 out += info.outJump;
\r
9805 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9806 // Channel compensation and/or (de)interleaving only.
\r
9807 Int32 *in = (Int32 *)inBuffer;
\r
9808 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9809 for (j=0; j<info.channels; j++) {
\r
9810 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9812 in += info.inJump;
\r
9813 out += info.outJump;
\r
9816 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9817 Float32 *in = (Float32 *)inBuffer;
\r
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9819 for (j=0; j<info.channels; j++) {
\r
9820 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9822 in += info.inJump;
\r
9823 out += info.outJump;
\r
9826 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9827 Float64 *in = (Float64 *)inBuffer;
\r
9828 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9829 for (j=0; j<info.channels; j++) {
\r
9830 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9832 in += info.inJump;
\r
9833 out += info.outJump;
\r
9837 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9838 Int24 *out = (Int24 *)outBuffer;
\r
9839 if (info.inFormat == RTAUDIO_SINT8) {
\r
9840 signed char *in = (signed char *)inBuffer;
\r
9841 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9842 for (j=0; j<info.channels; j++) {
\r
9843 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9844 //out[info.outOffset[j]] <<= 16;
\r
9846 in += info.inJump;
\r
9847 out += info.outJump;
\r
9850 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9851 Int16 *in = (Int16 *)inBuffer;
\r
9852 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9853 for (j=0; j<info.channels; j++) {
\r
9854 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9855 //out[info.outOffset[j]] <<= 8;
\r
9857 in += info.inJump;
\r
9858 out += info.outJump;
\r
9861 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9862 // Channel compensation and/or (de)interleaving only.
\r
9863 Int24 *in = (Int24 *)inBuffer;
\r
9864 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9865 for (j=0; j<info.channels; j++) {
\r
9866 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9868 in += info.inJump;
\r
9869 out += info.outJump;
\r
9872 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9873 Int32 *in = (Int32 *)inBuffer;
\r
9874 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9875 for (j=0; j<info.channels; j++) {
\r
9876 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9877 //out[info.outOffset[j]] >>= 8;
\r
9879 in += info.inJump;
\r
9880 out += info.outJump;
\r
9883 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9884 Float32 *in = (Float32 *)inBuffer;
\r
9885 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9886 for (j=0; j<info.channels; j++) {
\r
9887 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9889 in += info.inJump;
\r
9890 out += info.outJump;
\r
9893 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9894 Float64 *in = (Float64 *)inBuffer;
\r
9895 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9896 for (j=0; j<info.channels; j++) {
\r
9897 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9899 in += info.inJump;
\r
9900 out += info.outJump;
\r
9904 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9905 Int16 *out = (Int16 *)outBuffer;
\r
9906 if (info.inFormat == RTAUDIO_SINT8) {
\r
9907 signed char *in = (signed char *)inBuffer;
\r
9908 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9909 for (j=0; j<info.channels; j++) {
\r
9910 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9911 out[info.outOffset[j]] <<= 8;
\r
9913 in += info.inJump;
\r
9914 out += info.outJump;
\r
9917 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9918 // Channel compensation and/or (de)interleaving only.
\r
9919 Int16 *in = (Int16 *)inBuffer;
\r
9920 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9921 for (j=0; j<info.channels; j++) {
\r
9922 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9924 in += info.inJump;
\r
9925 out += info.outJump;
\r
9928 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9929 Int24 *in = (Int24 *)inBuffer;
\r
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9931 for (j=0; j<info.channels; j++) {
\r
9932 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9934 in += info.inJump;
\r
9935 out += info.outJump;
\r
9938 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9939 Int32 *in = (Int32 *)inBuffer;
\r
9940 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9941 for (j=0; j<info.channels; j++) {
\r
9942 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9944 in += info.inJump;
\r
9945 out += info.outJump;
\r
9948 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9949 Float32 *in = (Float32 *)inBuffer;
\r
9950 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9951 for (j=0; j<info.channels; j++) {
\r
9952 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9954 in += info.inJump;
\r
9955 out += info.outJump;
\r
9958 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9959 Float64 *in = (Float64 *)inBuffer;
\r
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9961 for (j=0; j<info.channels; j++) {
\r
9962 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9964 in += info.inJump;
\r
9965 out += info.outJump;
\r
9969 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9970 signed char *out = (signed char *)outBuffer;
\r
9971 if (info.inFormat == RTAUDIO_SINT8) {
\r
9972 // Channel compensation and/or (de)interleaving only.
\r
9973 signed char *in = (signed char *)inBuffer;
\r
9974 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9975 for (j=0; j<info.channels; j++) {
\r
9976 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9978 in += info.inJump;
\r
9979 out += info.outJump;
\r
9982 if (info.inFormat == RTAUDIO_SINT16) {
\r
9983 Int16 *in = (Int16 *)inBuffer;
\r
9984 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9985 for (j=0; j<info.channels; j++) {
\r
9986 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9988 in += info.inJump;
\r
9989 out += info.outJump;
\r
9992 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9993 Int24 *in = (Int24 *)inBuffer;
\r
9994 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9995 for (j=0; j<info.channels; j++) {
\r
9996 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
9998 in += info.inJump;
\r
9999 out += info.outJump;
\r
10002 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10003 Int32 *in = (Int32 *)inBuffer;
\r
10004 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10005 for (j=0; j<info.channels; j++) {
\r
10006 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10008 in += info.inJump;
\r
10009 out += info.outJump;
\r
10012 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10013 Float32 *in = (Float32 *)inBuffer;
\r
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10015 for (j=0; j<info.channels; j++) {
\r
10016 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10018 in += info.inJump;
\r
10019 out += info.outJump;
\r
10022 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10023 Float64 *in = (Float64 *)inBuffer;
\r
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10025 for (j=0; j<info.channels; j++) {
\r
10026 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10028 in += info.inJump;
\r
10029 out += info.outJump;
\r
10035 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10036 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10037 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10039 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10041 register char val;
\r
10042 register char *ptr;
\r
10045 if ( format == RTAUDIO_SINT16 ) {
\r
10046 for ( unsigned int i=0; i<samples; i++ ) {
\r
10047 // Swap 1st and 2nd bytes.
\r
10049 *(ptr) = *(ptr+1);
\r
10052 // Increment 2 bytes.
\r
10056 else if ( format == RTAUDIO_SINT32 ||
\r
10057 format == RTAUDIO_FLOAT32 ) {
\r
10058 for ( unsigned int i=0; i<samples; i++ ) {
\r
10059 // Swap 1st and 4th bytes.
\r
10061 *(ptr) = *(ptr+3);
\r
10064 // Swap 2nd and 3rd bytes.
\r
10067 *(ptr) = *(ptr+1);
\r
10070 // Increment 3 more bytes.
\r
10074 else if ( format == RTAUDIO_SINT24 ) {
\r
10075 for ( unsigned int i=0; i<samples; i++ ) {
\r
10076 // Swap 1st and 3rd bytes.
\r
10078 *(ptr) = *(ptr+2);
\r
10081 // Increment 2 more bytes.
\r
10085 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10086 for ( unsigned int i=0; i<samples; i++ ) {
\r
10087 // Swap 1st and 8th bytes
\r
10089 *(ptr) = *(ptr+7);
\r
10092 // Swap 2nd and 7th bytes
\r
10095 *(ptr) = *(ptr+5);
\r
10098 // Swap 3rd and 6th bytes
\r
10101 *(ptr) = *(ptr+3);
\r
10104 // Swap 4th and 5th bytes
\r
10107 *(ptr) = *(ptr+1);
\r
10110 // Increment 5 more bytes.
\r
10116 // Indentation settings for Vim and Emacs
\r
10118 // Local Variables:
\r
10119 // c-basic-offset: 2
\r
10120 // indent-tabs-mode: nil
\r
10123 // vim: et sts=2 sw=2
\r