1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 void RtApi :: setStreamTime( double time )
\r
412 stream_.streamTime = time;
\r
415 unsigned int RtApi :: getStreamSampleRate( void )
\r
419 return stream_.sampleRate;
\r
423 // *************************************************** //
\r
425 // OS/API-specific methods.
\r
427 // *************************************************** //
\r
429 #if defined(__MACOSX_CORE__)
\r
431 // The OS X CoreAudio API is designed to use a separate callback
\r
432 // procedure for each of its audio devices. A single RtAudio duplex
\r
433 // stream using two different devices is supported here, though it
\r
434 // cannot be guaranteed to always behave correctly because we cannot
\r
435 // synchronize these two callbacks.
\r
437 // A property listener is installed for over/underrun information.
\r
438 // However, no functionality is currently provided to allow property
\r
439 // listeners to trigger user handlers because it is unclear what could
\r
440 // be done if a critical stream parameter (buffer size, sample rate,
\r
441 // device disconnect) notification arrived. The listeners entail
\r
442 // quite a bit of extra code and most likely, a user program wouldn't
\r
443 // be prepared for the result anyway. However, we do provide a flag
\r
444 // to the client callback function to inform of an over/underrun.
\r
446 // A structure to hold various information related to the CoreAudio API
\r
448 struct CoreHandle {
\r
449 AudioDeviceID id[2]; // device ids
\r
450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
451 AudioDeviceIOProcID procId[2];
\r
453 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
454 UInt32 nStreams[2]; // number of streams to use
\r
456 char *deviceBuffer;
\r
457 pthread_cond_t condition;
\r
458 int drainCounter; // Tracks callback counts when draining
\r
459 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
462 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
465 RtApiCore:: RtApiCore()
\r
467 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
468 // This is a largely undocumented but absolutely necessary
\r
469 // requirement starting with OS-X 10.6. If not called, queries and
\r
470 // updates to various audio device properties are not handled
\r
472 CFRunLoopRef theRunLoop = NULL;
\r
473 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
474 kAudioObjectPropertyScopeGlobal,
\r
475 kAudioObjectPropertyElementMaster };
\r
476 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
477 if ( result != noErr ) {
\r
478 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
479 error( RtAudioError::WARNING );
\r
484 RtApiCore :: ~RtApiCore()
\r
486 // The subclass destructor gets called before the base class
\r
487 // destructor, so close an existing stream before deallocating
\r
488 // apiDeviceId memory.
\r
489 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
492 unsigned int RtApiCore :: getDeviceCount( void )
\r
494 // Find out how many audio devices there are, if any.
\r
496 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
497 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
498 if ( result != noErr ) {
\r
499 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
500 error( RtAudioError::WARNING );
\r
504 return dataSize / sizeof( AudioDeviceID );
\r
507 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
509 unsigned int nDevices = getDeviceCount();
\r
510 if ( nDevices <= 1 ) return 0;
\r
513 UInt32 dataSize = sizeof( AudioDeviceID );
\r
514 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
515 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
516 if ( result != noErr ) {
\r
517 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
518 error( RtAudioError::WARNING );
\r
522 dataSize *= nDevices;
\r
523 AudioDeviceID deviceList[ nDevices ];
\r
524 property.mSelector = kAudioHardwarePropertyDevices;
\r
525 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
526 if ( result != noErr ) {
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
528 error( RtAudioError::WARNING );
\r
532 for ( unsigned int i=0; i<nDevices; i++ )
\r
533 if ( id == deviceList[i] ) return i;
\r
535 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
536 error( RtAudioError::WARNING );
\r
540 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
542 unsigned int nDevices = getDeviceCount();
\r
543 if ( nDevices <= 1 ) return 0;
\r
546 UInt32 dataSize = sizeof( AudioDeviceID );
\r
547 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
548 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
549 if ( result != noErr ) {
\r
550 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
551 error( RtAudioError::WARNING );
\r
555 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
556 AudioDeviceID deviceList[ nDevices ];
\r
557 property.mSelector = kAudioHardwarePropertyDevices;
\r
558 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
559 if ( result != noErr ) {
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
561 error( RtAudioError::WARNING );
\r
565 for ( unsigned int i=0; i<nDevices; i++ )
\r
566 if ( id == deviceList[i] ) return i;
\r
568 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
569 error( RtAudioError::WARNING );
\r
573 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
575 RtAudio::DeviceInfo info;
\r
576 info.probed = false;
\r
579 unsigned int nDevices = getDeviceCount();
\r
580 if ( nDevices == 0 ) {
\r
581 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
582 error( RtAudioError::INVALID_USE );
\r
586 if ( device >= nDevices ) {
\r
587 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
588 error( RtAudioError::INVALID_USE );
\r
592 AudioDeviceID deviceList[ nDevices ];
\r
593 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
594 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
595 kAudioObjectPropertyScopeGlobal,
\r
596 kAudioObjectPropertyElementMaster };
\r
597 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
598 0, NULL, &dataSize, (void *) &deviceList );
\r
599 if ( result != noErr ) {
\r
600 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
601 error( RtAudioError::WARNING );
\r
605 AudioDeviceID id = deviceList[ device ];
\r
607 // Get the device name.
\r
609 CFStringRef cfname;
\r
610 dataSize = sizeof( CFStringRef );
\r
611 property.mSelector = kAudioObjectPropertyManufacturer;
\r
612 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
613 if ( result != noErr ) {
\r
614 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
615 errorText_ = errorStream_.str();
\r
616 error( RtAudioError::WARNING );
\r
620 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
621 int length = CFStringGetLength(cfname);
\r
622 char *mname = (char *)malloc(length * 3 + 1);
\r
623 #if defined( UNICODE ) || defined( _UNICODE )
\r
624 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
626 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
628 info.name.append( (const char *)mname, strlen(mname) );
\r
629 info.name.append( ": " );
\r
630 CFRelease( cfname );
\r
633 property.mSelector = kAudioObjectPropertyName;
\r
634 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
635 if ( result != noErr ) {
\r
636 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
637 errorText_ = errorStream_.str();
\r
638 error( RtAudioError::WARNING );
\r
642 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
643 length = CFStringGetLength(cfname);
\r
644 char *name = (char *)malloc(length * 3 + 1);
\r
645 #if defined( UNICODE ) || defined( _UNICODE )
\r
646 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
648 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
650 info.name.append( (const char *)name, strlen(name) );
\r
651 CFRelease( cfname );
\r
654 // Get the output stream "configuration".
\r
655 AudioBufferList *bufferList = nil;
\r
656 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
657 property.mScope = kAudioDevicePropertyScopeOutput;
\r
658 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
660 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
661 if ( result != noErr || dataSize == 0 ) {
\r
662 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
663 errorText_ = errorStream_.str();
\r
664 error( RtAudioError::WARNING );
\r
668 // Allocate the AudioBufferList.
\r
669 bufferList = (AudioBufferList *) malloc( dataSize );
\r
670 if ( bufferList == NULL ) {
\r
671 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
672 error( RtAudioError::WARNING );
\r
676 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
677 if ( result != noErr || dataSize == 0 ) {
\r
678 free( bufferList );
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Get output channel information.
\r
686 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
687 for ( i=0; i<nStreams; i++ )
\r
688 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
689 free( bufferList );
\r
691 // Get the input stream "configuration".
\r
692 property.mScope = kAudioDevicePropertyScopeInput;
\r
693 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
696 errorText_ = errorStream_.str();
\r
697 error( RtAudioError::WARNING );
\r
701 // Allocate the AudioBufferList.
\r
702 bufferList = (AudioBufferList *) malloc( dataSize );
\r
703 if ( bufferList == NULL ) {
\r
704 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
705 error( RtAudioError::WARNING );
\r
709 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
710 if (result != noErr || dataSize == 0) {
\r
711 free( bufferList );
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Get input channel information.
\r
719 nStreams = bufferList->mNumberBuffers;
\r
720 for ( i=0; i<nStreams; i++ )
\r
721 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
722 free( bufferList );
\r
724 // If device opens for both playback and capture, we determine the channels.
\r
725 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
726 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
728 // Probe the device sample rates.
\r
729 bool isInput = false;
\r
730 if ( info.outputChannels == 0 ) isInput = true;
\r
732 // Determine the supported sample rates.
\r
733 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
734 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
735 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
736 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
737 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
738 errorText_ = errorStream_.str();
\r
739 error( RtAudioError::WARNING );
\r
743 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
744 AudioValueRange rangeList[ nRanges ];
\r
745 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
746 if ( result != kAudioHardwareNoError ) {
\r
747 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
748 errorText_ = errorStream_.str();
\r
749 error( RtAudioError::WARNING );
\r
753 // The sample rate reporting mechanism is a bit of a mystery. It
\r
754 // seems that it can either return individual rates or a range of
\r
755 // rates. I assume that if the min / max range values are the same,
\r
756 // then that represents a single supported rate and if the min / max
\r
757 // range values are different, the device supports an arbitrary
\r
758 // range of values (though there might be multiple ranges, so we'll
\r
759 // use the most conservative range).
\r
760 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
761 bool haveValueRange = false;
\r
762 info.sampleRates.clear();
\r
763 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
764 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
765 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
767 haveValueRange = true;
\r
768 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
769 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
773 if ( haveValueRange ) {
\r
774 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
775 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
776 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
780 // Sort and remove any redundant values
\r
781 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
782 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
784 if ( info.sampleRates.size() == 0 ) {
\r
785 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
786 errorText_ = errorStream_.str();
\r
787 error( RtAudioError::WARNING );
\r
791 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
792 // Thus, any other "physical" formats supported by the device are of
\r
793 // no interest to the client.
\r
794 info.nativeFormats = RTAUDIO_FLOAT32;
\r
796 if ( info.outputChannels > 0 )
\r
797 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
798 if ( info.inputChannels > 0 )
\r
799 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
801 info.probed = true;
\r
805 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
806 const AudioTimeStamp* /*inNow*/,
\r
807 const AudioBufferList* inInputData,
\r
808 const AudioTimeStamp* /*inInputTime*/,
\r
809 AudioBufferList* outOutputData,
\r
810 const AudioTimeStamp* /*inOutputTime*/,
\r
811 void* infoPointer )
\r
813 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
815 RtApiCore *object = (RtApiCore *) info->object;
\r
816 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
817 return kAudioHardwareUnspecifiedError;
\r
819 return kAudioHardwareNoError;
\r
822 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
824 const AudioObjectPropertyAddress properties[],
\r
825 void* handlePointer )
\r
827 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
828 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
829 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
830 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
831 handle->xrun[1] = true;
\r
833 handle->xrun[0] = true;
\r
837 return kAudioHardwareNoError;
\r
840 static OSStatus rateListener( AudioObjectID inDevice,
\r
841 UInt32 /*nAddresses*/,
\r
842 const AudioObjectPropertyAddress /*properties*/[],
\r
843 void* ratePointer )
\r
845 Float64 *rate = (Float64 *) ratePointer;
\r
846 UInt32 dataSize = sizeof( Float64 );
\r
847 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
848 kAudioObjectPropertyScopeGlobal,
\r
849 kAudioObjectPropertyElementMaster };
\r
850 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
851 return kAudioHardwareNoError;
\r
854 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
855 unsigned int firstChannel, unsigned int sampleRate,
\r
856 RtAudioFormat format, unsigned int *bufferSize,
\r
857 RtAudio::StreamOptions *options )
\r
860 unsigned int nDevices = getDeviceCount();
\r
861 if ( nDevices == 0 ) {
\r
862 // This should not happen because a check is made before this function is called.
\r
863 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
867 if ( device >= nDevices ) {
\r
868 // This should not happen because a check is made before this function is called.
\r
869 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
873 AudioDeviceID deviceList[ nDevices ];
\r
874 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
875 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
876 kAudioObjectPropertyScopeGlobal,
\r
877 kAudioObjectPropertyElementMaster };
\r
878 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
879 0, NULL, &dataSize, (void *) &deviceList );
\r
880 if ( result != noErr ) {
\r
881 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
885 AudioDeviceID id = deviceList[ device ];
\r
887 // Setup for stream mode.
\r
888 bool isInput = false;
\r
889 if ( mode == INPUT ) {
\r
891 property.mScope = kAudioDevicePropertyScopeInput;
\r
894 property.mScope = kAudioDevicePropertyScopeOutput;
\r
896 // Get the stream "configuration".
\r
897 AudioBufferList *bufferList = nil;
\r
899 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
900 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
901 if ( result != noErr || dataSize == 0 ) {
\r
902 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
903 errorText_ = errorStream_.str();
\r
907 // Allocate the AudioBufferList.
\r
908 bufferList = (AudioBufferList *) malloc( dataSize );
\r
909 if ( bufferList == NULL ) {
\r
910 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
914 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
915 if (result != noErr || dataSize == 0) {
\r
916 free( bufferList );
\r
917 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
918 errorText_ = errorStream_.str();
\r
922 // Search for one or more streams that contain the desired number of
\r
923 // channels. CoreAudio devices can have an arbitrary number of
\r
924 // streams and each stream can have an arbitrary number of channels.
\r
925 // For each stream, a single buffer of interleaved samples is
\r
926 // provided. RtAudio prefers the use of one stream of interleaved
\r
927 // data or multiple consecutive single-channel streams. However, we
\r
928 // now support multiple consecutive multi-channel streams of
\r
929 // interleaved data as well.
\r
930 UInt32 iStream, offsetCounter = firstChannel;
\r
931 UInt32 nStreams = bufferList->mNumberBuffers;
\r
932 bool monoMode = false;
\r
933 bool foundStream = false;
\r
935 // First check that the device supports the requested number of
\r
937 UInt32 deviceChannels = 0;
\r
938 for ( iStream=0; iStream<nStreams; iStream++ )
\r
939 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
941 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
944 errorText_ = errorStream_.str();
\r
948 // Look for a single stream meeting our needs.
\r
949 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
950 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
951 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
952 if ( streamChannels >= channels + offsetCounter ) {
\r
953 firstStream = iStream;
\r
954 channelOffset = offsetCounter;
\r
955 foundStream = true;
\r
958 if ( streamChannels > offsetCounter ) break;
\r
959 offsetCounter -= streamChannels;
\r
962 // If we didn't find a single stream above, then we should be able
\r
963 // to meet the channel specification with multiple streams.
\r
964 if ( foundStream == false ) {
\r
966 offsetCounter = firstChannel;
\r
967 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
968 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
969 if ( streamChannels > offsetCounter ) break;
\r
970 offsetCounter -= streamChannels;
\r
973 firstStream = iStream;
\r
974 channelOffset = offsetCounter;
\r
975 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
977 if ( streamChannels > 1 ) monoMode = false;
\r
978 while ( channelCounter > 0 ) {
\r
979 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
980 if ( streamChannels > 1 ) monoMode = false;
\r
981 channelCounter -= streamChannels;
\r
986 free( bufferList );
\r
988 // Determine the buffer size.
\r
989 AudioValueRange bufferRange;
\r
990 dataSize = sizeof( AudioValueRange );
\r
991 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
992 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
994 if ( result != noErr ) {
\r
995 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
996 errorText_ = errorStream_.str();
\r
1000 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1001 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1002 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1004 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1005 // need to make this setting for the master channel.
\r
1006 UInt32 theSize = (UInt32) *bufferSize;
\r
1007 dataSize = sizeof( UInt32 );
\r
1008 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1009 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1011 if ( result != noErr ) {
\r
1012 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1013 errorText_ = errorStream_.str();
\r
1017 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1018 // MUST be the same in both directions!
\r
1019 *bufferSize = theSize;
\r
1020 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 stream_.bufferSize = *bufferSize;
\r
1027 stream_.nBuffers = 1;
\r
1029 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1030 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1032 dataSize = sizeof( hog_pid );
\r
1033 property.mSelector = kAudioDevicePropertyHogMode;
\r
1034 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1035 if ( result != noErr ) {
\r
1036 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1037 errorText_ = errorStream_.str();
\r
1041 if ( hog_pid != getpid() ) {
\r
1042 hog_pid = getpid();
\r
1043 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1044 if ( result != noErr ) {
\r
1045 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1046 errorText_ = errorStream_.str();
\r
1052 // Check and if necessary, change the sample rate for the device.
\r
1053 Float64 nominalRate;
\r
1054 dataSize = sizeof( Float64 );
\r
1055 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1056 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1057 if ( result != noErr ) {
\r
1058 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1059 errorText_ = errorStream_.str();
\r
1063 // Only change the sample rate if off by more than 1 Hz.
\r
1064 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1066 // Set a property listener for the sample rate change
\r
1067 Float64 reportedRate = 0.0;
\r
1068 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1069 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1072 errorText_ = errorStream_.str();
\r
1076 nominalRate = (Float64) sampleRate;
\r
1077 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1078 if ( result != noErr ) {
\r
1079 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1080 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1081 errorText_ = errorStream_.str();
\r
1085 // Now wait until the reported nominal rate is what we just set.
\r
1086 UInt32 microCounter = 0;
\r
1087 while ( reportedRate != nominalRate ) {
\r
1088 microCounter += 5000;
\r
1089 if ( microCounter > 5000000 ) break;
\r
1093 // Remove the property listener.
\r
1094 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( microCounter > 5000000 ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1103 // Now set the stream format for all streams. Also, check the
\r
1104 // physical format of the device and change that if necessary.
\r
1105 AudioStreamBasicDescription description;
\r
1106 dataSize = sizeof( AudioStreamBasicDescription );
\r
1107 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1108 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1109 if ( result != noErr ) {
\r
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1111 errorText_ = errorStream_.str();
\r
1115 // Set the sample rate and data format id. However, only make the
\r
1116 // change if the sample rate is not within 1.0 of the desired
\r
1117 // rate and the format is not linear pcm.
\r
1118 bool updateFormat = false;
\r
1119 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1120 description.mSampleRate = (Float64) sampleRate;
\r
1121 updateFormat = true;
\r
1124 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1125 description.mFormatID = kAudioFormatLinearPCM;
\r
1126 updateFormat = true;
\r
1129 if ( updateFormat ) {
\r
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1131 if ( result != noErr ) {
\r
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1133 errorText_ = errorStream_.str();
\r
1138 // Now check the physical format.
\r
1139 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1140 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1141 if ( result != noErr ) {
\r
1142 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1143 errorText_ = errorStream_.str();
\r
1147 //std::cout << "Current physical stream format:" << std::endl;
\r
1148 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1149 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1150 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1151 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1153 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1154 description.mFormatID = kAudioFormatLinearPCM;
\r
1155 //description.mSampleRate = (Float64) sampleRate;
\r
1156 AudioStreamBasicDescription testDescription = description;
\r
1157 UInt32 formatFlags;
\r
1159 // We'll try higher bit rates first and then work our way down.
\r
1160 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1161 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1162 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1163 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1165 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1166 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1167 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1168 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1169 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1170 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1171 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1172 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1174 bool setPhysicalFormat = false;
\r
1175 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1176 testDescription = description;
\r
1177 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1178 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1179 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1180 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1182 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1183 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1184 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1185 if ( result == noErr ) {
\r
1186 setPhysicalFormat = true;
\r
1187 //std::cout << "Updated physical stream format:" << std::endl;
\r
1188 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1189 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1190 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1191 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1196 if ( !setPhysicalFormat ) {
\r
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1198 errorText_ = errorStream_.str();
\r
1201 } // done setting virtual/physical formats.
\r
1203 // Get the stream / device latency.
\r
1205 dataSize = sizeof( UInt32 );
\r
1206 property.mSelector = kAudioDevicePropertyLatency;
\r
1207 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1208 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1209 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1211 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1212 errorText_ = errorStream_.str();
\r
1213 error( RtAudioError::WARNING );
\r
1217 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1218 // always be presented in native-endian format, so we should never
\r
1219 // need to byte swap.
\r
1220 stream_.doByteSwap[mode] = false;
\r
1222 // From the CoreAudio documentation, PCM data must be supplied as
\r
1224 stream_.userFormat = format;
\r
1225 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1227 if ( streamCount == 1 )
\r
1228 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1229 else // multiple streams
\r
1230 stream_.nDeviceChannels[mode] = channels;
\r
1231 stream_.nUserChannels[mode] = channels;
\r
1232 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1233 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1234 else stream_.userInterleaved = true;
\r
1235 stream_.deviceInterleaved[mode] = true;
\r
1236 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1238 // Set flags for buffer conversion.
\r
1239 stream_.doConvertBuffer[mode] = false;
\r
1240 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1241 stream_.doConvertBuffer[mode] = true;
\r
1242 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1243 stream_.doConvertBuffer[mode] = true;
\r
1244 if ( streamCount == 1 ) {
\r
1245 if ( stream_.nUserChannels[mode] > 1 &&
\r
1246 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1247 stream_.doConvertBuffer[mode] = true;
\r
1249 else if ( monoMode && stream_.userInterleaved )
\r
1250 stream_.doConvertBuffer[mode] = true;
\r
1252 // Allocate our CoreHandle structure for the stream.
\r
1253 CoreHandle *handle = 0;
\r
1254 if ( stream_.apiHandle == 0 ) {
\r
1256 handle = new CoreHandle;
\r
1258 catch ( std::bad_alloc& ) {
\r
1259 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1263 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1264 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1267 stream_.apiHandle = (void *) handle;
\r
1270 handle = (CoreHandle *) stream_.apiHandle;
\r
1271 handle->iStream[mode] = firstStream;
\r
1272 handle->nStreams[mode] = streamCount;
\r
1273 handle->id[mode] = id;
\r
1275 // Allocate necessary internal buffers.
\r
1276 unsigned long bufferBytes;
\r
1277 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1278 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1279 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1280 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1281 if ( stream_.userBuffer[mode] == NULL ) {
\r
1282 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1286 // If possible, we will make use of the CoreAudio stream buffers as
\r
1287 // "device buffers". However, we can't do this if using multiple
\r
1289 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1291 bool makeBuffer = true;
\r
1292 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1293 if ( mode == INPUT ) {
\r
1294 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1295 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1296 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1300 if ( makeBuffer ) {
\r
1301 bufferBytes *= *bufferSize;
\r
1302 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1303 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1304 if ( stream_.deviceBuffer == NULL ) {
\r
1305 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1311 stream_.sampleRate = sampleRate;
\r
1312 stream_.device[mode] = device;
\r
1313 stream_.state = STREAM_STOPPED;
\r
1314 stream_.callbackInfo.object = (void *) this;
\r
1316 // Setup the buffer conversion information structure.
\r
1317 if ( stream_.doConvertBuffer[mode] ) {
\r
1318 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1319 else setConvertInfo( mode, channelOffset );
\r
1322 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1323 // Only one callback procedure per device.
\r
1324 stream_.mode = DUPLEX;
\r
1326 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1327 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1329 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1330 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1332 if ( result != noErr ) {
\r
1333 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1334 errorText_ = errorStream_.str();
\r
1337 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1338 stream_.mode = DUPLEX;
\r
1340 stream_.mode = mode;
\r
1343 // Setup the device property listener for over/underload.
\r
1344 property.mSelector = kAudioDeviceProcessorOverload;
\r
1345 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1346 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1352 pthread_cond_destroy( &handle->condition );
\r
1354 stream_.apiHandle = 0;
\r
1357 for ( int i=0; i<2; i++ ) {
\r
1358 if ( stream_.userBuffer[i] ) {
\r
1359 free( stream_.userBuffer[i] );
\r
1360 stream_.userBuffer[i] = 0;
\r
1364 if ( stream_.deviceBuffer ) {
\r
1365 free( stream_.deviceBuffer );
\r
1366 stream_.deviceBuffer = 0;
\r
1369 stream_.state = STREAM_CLOSED;
\r
1373 void RtApiCore :: closeStream( void )
\r
1375 if ( stream_.state == STREAM_CLOSED ) {
\r
1376 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1377 error( RtAudioError::WARNING );
\r
1381 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1382 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1383 if ( stream_.state == STREAM_RUNNING )
\r
1384 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1385 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1386 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1388 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1389 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1393 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1394 if ( stream_.state == STREAM_RUNNING )
\r
1395 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1396 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1397 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1399 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1400 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1404 for ( int i=0; i<2; i++ ) {
\r
1405 if ( stream_.userBuffer[i] ) {
\r
1406 free( stream_.userBuffer[i] );
\r
1407 stream_.userBuffer[i] = 0;
\r
1411 if ( stream_.deviceBuffer ) {
\r
1412 free( stream_.deviceBuffer );
\r
1413 stream_.deviceBuffer = 0;
\r
1416 // Destroy pthread condition variable.
\r
1417 pthread_cond_destroy( &handle->condition );
\r
1419 stream_.apiHandle = 0;
\r
1421 stream_.mode = UNINITIALIZED;
\r
1422 stream_.state = STREAM_CLOSED;
\r
1425 void RtApiCore :: startStream( void )
\r
1428 if ( stream_.state == STREAM_RUNNING ) {
\r
1429 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1430 error( RtAudioError::WARNING );
\r
1434 OSStatus result = noErr;
\r
1435 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1436 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1438 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1439 if ( result != noErr ) {
\r
1440 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1441 errorText_ = errorStream_.str();
\r
1446 if ( stream_.mode == INPUT ||
\r
1447 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1449 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1450 if ( result != noErr ) {
\r
1451 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1452 errorText_ = errorStream_.str();
\r
1457 handle->drainCounter = 0;
\r
1458 handle->internalDrain = false;
\r
1459 stream_.state = STREAM_RUNNING;
\r
1462 if ( result == noErr ) return;
\r
1463 error( RtAudioError::SYSTEM_ERROR );
\r
1466 void RtApiCore :: stopStream( void )
\r
1469 if ( stream_.state == STREAM_STOPPED ) {
\r
1470 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1471 error( RtAudioError::WARNING );
\r
1475 OSStatus result = noErr;
\r
1476 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1477 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1479 if ( handle->drainCounter == 0 ) {
\r
1480 handle->drainCounter = 2;
\r
1481 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1484 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1485 if ( result != noErr ) {
\r
1486 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1487 errorText_ = errorStream_.str();
\r
1492 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1494 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1495 if ( result != noErr ) {
\r
1496 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1497 errorText_ = errorStream_.str();
\r
1502 stream_.state = STREAM_STOPPED;
\r
1505 if ( result == noErr ) return;
\r
1506 error( RtAudioError::SYSTEM_ERROR );
\r
1509 void RtApiCore :: abortStream( void )
\r
1512 if ( stream_.state == STREAM_STOPPED ) {
\r
1513 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1514 error( RtAudioError::WARNING );
\r
1518 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1519 handle->drainCounter = 2;
\r
1524 // This function will be called by a spawned thread when the user
\r
1525 // callback function signals that the stream should be stopped or
\r
1526 // aborted. It is better to handle it this way because the
\r
1527 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1528 // function is called.
\r
1529 static void *coreStopStream( void *ptr )
\r
1531 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1532 RtApiCore *object = (RtApiCore *) info->object;
\r
1534 object->stopStream();
\r
1535 pthread_exit( NULL );
\r
1538 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1539 const AudioBufferList *inBufferList,
\r
1540 const AudioBufferList *outBufferList )
\r
1542 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1543 if ( stream_.state == STREAM_CLOSED ) {
\r
1544 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1545 error( RtAudioError::WARNING );
\r
1549 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1550 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1552 // Check if we were draining the stream and signal is finished.
\r
1553 if ( handle->drainCounter > 3 ) {
\r
1554 ThreadHandle threadId;
\r
1556 stream_.state = STREAM_STOPPING;
\r
1557 if ( handle->internalDrain == true )
\r
1558 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1559 else // external call to stopStream()
\r
1560 pthread_cond_signal( &handle->condition );
\r
1564 AudioDeviceID outputDevice = handle->id[0];
\r
1566 // Invoke user callback to get fresh output data UNLESS we are
\r
1567 // draining stream or duplex mode AND the input/output devices are
\r
1568 // different AND this function is called for the input device.
\r
1569 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1570 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1571 double streamTime = getStreamTime();
\r
1572 RtAudioStreamStatus status = 0;
\r
1573 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1574 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1575 handle->xrun[0] = false;
\r
1577 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1578 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1579 handle->xrun[1] = false;
\r
1582 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1583 stream_.bufferSize, streamTime, status, info->userData );
\r
1584 if ( cbReturnValue == 2 ) {
\r
1585 stream_.state = STREAM_STOPPING;
\r
1586 handle->drainCounter = 2;
\r
1590 else if ( cbReturnValue == 1 ) {
\r
1591 handle->drainCounter = 1;
\r
1592 handle->internalDrain = true;
\r
1596 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1598 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1600 if ( handle->nStreams[0] == 1 ) {
\r
1601 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1603 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1605 else { // fill multiple streams with zeros
\r
1606 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1607 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1609 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1613 else if ( handle->nStreams[0] == 1 ) {
\r
1614 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1615 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1616 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1618 else { // copy from user buffer
\r
1619 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1620 stream_.userBuffer[0],
\r
1621 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1624 else { // fill multiple streams
\r
1625 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1626 if ( stream_.doConvertBuffer[0] ) {
\r
1627 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1628 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1631 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1632 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1633 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1634 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1635 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1638 else { // fill multiple multi-channel streams with interleaved data
\r
1639 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1640 Float32 *out, *in;
\r
1642 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1643 UInt32 inChannels = stream_.nUserChannels[0];
\r
1644 if ( stream_.doConvertBuffer[0] ) {
\r
1645 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1646 inChannels = stream_.nDeviceChannels[0];
\r
1649 if ( inInterleaved ) inOffset = 1;
\r
1650 else inOffset = stream_.bufferSize;
\r
1652 channelsLeft = inChannels;
\r
1653 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1655 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1656 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1659 // Account for possible channel offset in first stream
\r
1660 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1661 streamChannels -= stream_.channelOffset[0];
\r
1662 outJump = stream_.channelOffset[0];
\r
1666 // Account for possible unfilled channels at end of the last stream
\r
1667 if ( streamChannels > channelsLeft ) {
\r
1668 outJump = streamChannels - channelsLeft;
\r
1669 streamChannels = channelsLeft;
\r
1672 // Determine input buffer offsets and skips
\r
1673 if ( inInterleaved ) {
\r
1674 inJump = inChannels;
\r
1675 in += inChannels - channelsLeft;
\r
1679 in += (inChannels - channelsLeft) * inOffset;
\r
1682 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1683 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1684 *out++ = in[j*inOffset];
\r
1689 channelsLeft -= streamChannels;
\r
1695 // Don't bother draining input
\r
1696 if ( handle->drainCounter ) {
\r
1697 handle->drainCounter++;
\r
1701 AudioDeviceID inputDevice;
\r
1702 inputDevice = handle->id[1];
\r
1703 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1705 if ( handle->nStreams[1] == 1 ) {
\r
1706 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1707 convertBuffer( stream_.userBuffer[1],
\r
1708 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1709 stream_.convertInfo[1] );
\r
1711 else { // copy to user buffer
\r
1712 memcpy( stream_.userBuffer[1],
\r
1713 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1714 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1717 else { // read from multiple streams
\r
1718 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1719 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1721 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1722 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1723 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1724 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1725 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1728 else { // read from multiple multi-channel streams
\r
1729 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1730 Float32 *out, *in;
\r
1732 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1733 UInt32 outChannels = stream_.nUserChannels[1];
\r
1734 if ( stream_.doConvertBuffer[1] ) {
\r
1735 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1736 outChannels = stream_.nDeviceChannels[1];
\r
1739 if ( outInterleaved ) outOffset = 1;
\r
1740 else outOffset = stream_.bufferSize;
\r
1742 channelsLeft = outChannels;
\r
1743 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1745 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1746 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1749 // Account for possible channel offset in first stream
\r
1750 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1751 streamChannels -= stream_.channelOffset[1];
\r
1752 inJump = stream_.channelOffset[1];
\r
1756 // Account for possible unread channels at end of the last stream
\r
1757 if ( streamChannels > channelsLeft ) {
\r
1758 inJump = streamChannels - channelsLeft;
\r
1759 streamChannels = channelsLeft;
\r
1762 // Determine output buffer offsets and skips
\r
1763 if ( outInterleaved ) {
\r
1764 outJump = outChannels;
\r
1765 out += outChannels - channelsLeft;
\r
1769 out += (outChannels - channelsLeft) * outOffset;
\r
1772 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1773 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1774 out[j*outOffset] = *in++;
\r
1779 channelsLeft -= streamChannels;
\r
1783 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1784 convertBuffer( stream_.userBuffer[1],
\r
1785 stream_.deviceBuffer,
\r
1786 stream_.convertInfo[1] );
\r
1792 //MUTEX_UNLOCK( &stream_.mutex );
\r
1794 RtApi::tickStreamTime();
\r
1798 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1802 case kAudioHardwareNotRunningError:
\r
1803 return "kAudioHardwareNotRunningError";
\r
1805 case kAudioHardwareUnspecifiedError:
\r
1806 return "kAudioHardwareUnspecifiedError";
\r
1808 case kAudioHardwareUnknownPropertyError:
\r
1809 return "kAudioHardwareUnknownPropertyError";
\r
1811 case kAudioHardwareBadPropertySizeError:
\r
1812 return "kAudioHardwareBadPropertySizeError";
\r
1814 case kAudioHardwareIllegalOperationError:
\r
1815 return "kAudioHardwareIllegalOperationError";
\r
1817 case kAudioHardwareBadObjectError:
\r
1818 return "kAudioHardwareBadObjectError";
\r
1820 case kAudioHardwareBadDeviceError:
\r
1821 return "kAudioHardwareBadDeviceError";
\r
1823 case kAudioHardwareBadStreamError:
\r
1824 return "kAudioHardwareBadStreamError";
\r
1826 case kAudioHardwareUnsupportedOperationError:
\r
1827 return "kAudioHardwareUnsupportedOperationError";
\r
1829 case kAudioDeviceUnsupportedFormatError:
\r
1830 return "kAudioDeviceUnsupportedFormatError";
\r
1832 case kAudioDevicePermissionsError:
\r
1833 return "kAudioDevicePermissionsError";
\r
1836 return "CoreAudio unknown error";
\r
1840 //******************** End of __MACOSX_CORE__ *********************//
\r
1843 #if defined(__UNIX_JACK__)
\r
1845 // JACK is a low-latency audio server, originally written for the
\r
1846 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1847 // connect a number of different applications to an audio device, as
\r
1848 // well as allowing them to share audio between themselves.
\r
1850 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1851 // have ports connected to the server. The JACK server is typically
\r
1852 // started in a terminal as follows:
\r
1854 // .jackd -d alsa -d hw:0
\r
1856 // or through an interface program such as qjackctl. Many of the
\r
1857 // parameters normally set for a stream are fixed by the JACK server
\r
1858 // and can be specified when the JACK server is started. In
\r
1861 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1863 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1864 // frames, and number of buffers = 4. Once the server is running, it
\r
1865 // is not possible to override these values. If the values are not
\r
1866 // specified in the command-line, the JACK server uses default values.
\r
1868 // The JACK server does not have to be running when an instance of
\r
1869 // RtApiJack is created, though the function getDeviceCount() will
\r
1870 // report 0 devices found until JACK has been started. When no
\r
1871 // devices are available (i.e., the JACK server is not running), a
\r
1872 // stream cannot be opened.
\r
1874 #include <jack/jack.h>
\r
1875 #include <unistd.h>
\r
1878 // A structure to hold various information related to the Jack API
\r
1879 // implementation.
\r
1880 struct JackHandle {
\r
1881 jack_client_t *client;
\r
1882 jack_port_t **ports[2];
\r
1883 std::string deviceName[2];
\r
1885 pthread_cond_t condition;
\r
1886 int drainCounter; // Tracks callback counts when draining
\r
1887 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1890 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1893 static void jackSilentError( const char * ) {};
\r
1895 RtApiJack :: RtApiJack()
\r
1897 // Nothing to do here.
\r
1898 #if !defined(__RTAUDIO_DEBUG__)
\r
1899 // Turn off Jack's internal error reporting.
\r
1900 jack_set_error_function( &jackSilentError );
\r
1904 RtApiJack :: ~RtApiJack()
\r
1906 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1909 unsigned int RtApiJack :: getDeviceCount( void )
\r
1911 // See if we can become a jack client.
\r
1912 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1913 jack_status_t *status = NULL;
\r
1914 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1915 if ( client == 0 ) return 0;
\r
1917 const char **ports;
\r
1918 std::string port, previousPort;
\r
1919 unsigned int nChannels = 0, nDevices = 0;
\r
1920 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1922 // Parse the port names up to the first colon (:).
\r
1923 size_t iColon = 0;
\r
1925 port = (char *) ports[ nChannels ];
\r
1926 iColon = port.find(":");
\r
1927 if ( iColon != std::string::npos ) {
\r
1928 port = port.substr( 0, iColon + 1 );
\r
1929 if ( port != previousPort ) {
\r
1931 previousPort = port;
\r
1934 } while ( ports[++nChannels] );
\r
1938 jack_client_close( client );
\r
1942 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1944 RtAudio::DeviceInfo info;
\r
1945 info.probed = false;
\r
1947 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1948 jack_status_t *status = NULL;
\r
1949 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1950 if ( client == 0 ) {
\r
1951 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1952 error( RtAudioError::WARNING );
\r
1956 const char **ports;
\r
1957 std::string port, previousPort;
\r
1958 unsigned int nPorts = 0, nDevices = 0;
\r
1959 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1961 // Parse the port names up to the first colon (:).
\r
1962 size_t iColon = 0;
\r
1964 port = (char *) ports[ nPorts ];
\r
1965 iColon = port.find(":");
\r
1966 if ( iColon != std::string::npos ) {
\r
1967 port = port.substr( 0, iColon );
\r
1968 if ( port != previousPort ) {
\r
1969 if ( nDevices == device ) info.name = port;
\r
1971 previousPort = port;
\r
1974 } while ( ports[++nPorts] );
\r
1978 if ( device >= nDevices ) {
\r
1979 jack_client_close( client );
\r
1980 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1981 error( RtAudioError::INVALID_USE );
\r
1985 // Get the current jack server sample rate.
\r
1986 info.sampleRates.clear();
\r
1987 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1989 // Count the available ports containing the client name as device
\r
1990 // channels. Jack "input ports" equal RtAudio output channels.
\r
1991 unsigned int nChannels = 0;
\r
1992 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1994 while ( ports[ nChannels ] ) nChannels++;
\r
1996 info.outputChannels = nChannels;
\r
1999 // Jack "output ports" equal RtAudio input channels.
\r
2001 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2003 while ( ports[ nChannels ] ) nChannels++;
\r
2005 info.inputChannels = nChannels;
\r
2008 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2009 jack_client_close(client);
\r
2010 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2011 error( RtAudioError::WARNING );
\r
2015 // If device opens for both playback and capture, we determine the channels.
\r
2016 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2017 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2019 // Jack always uses 32-bit floats.
\r
2020 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2022 // Jack doesn't provide default devices so we'll use the first available one.
\r
2023 if ( device == 0 && info.outputChannels > 0 )
\r
2024 info.isDefaultOutput = true;
\r
2025 if ( device == 0 && info.inputChannels > 0 )
\r
2026 info.isDefaultInput = true;
\r
2028 jack_client_close(client);
\r
2029 info.probed = true;
\r
2033 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2035 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2037 RtApiJack *object = (RtApiJack *) info->object;
\r
2038 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2043 // This function will be called by a spawned thread when the Jack
\r
2044 // server signals that it is shutting down. It is necessary to handle
\r
2045 // it this way because the jackShutdown() function must return before
\r
2046 // the jack_deactivate() function (in closeStream()) will return.
\r
2047 static void *jackCloseStream( void *ptr )
\r
2049 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2050 RtApiJack *object = (RtApiJack *) info->object;
\r
2052 object->closeStream();
\r
2054 pthread_exit( NULL );
\r
2056 static void jackShutdown( void *infoPointer )
\r
2058 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2059 RtApiJack *object = (RtApiJack *) info->object;
\r
2061 // Check current stream state. If stopped, then we'll assume this
\r
2062 // was called as a result of a call to RtApiJack::stopStream (the
\r
2063 // deactivation of a client handle causes this function to be called).
\r
2064 // If not, we'll assume the Jack server is shutting down or some
\r
2065 // other problem occurred and we should close the stream.
\r
2066 if ( object->isStreamRunning() == false ) return;
\r
2068 ThreadHandle threadId;
\r
2069 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2070 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2073 static int jackXrun( void *infoPointer )
\r
2075 JackHandle *handle = (JackHandle *) infoPointer;
\r
2077 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2078 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2083 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2084 unsigned int firstChannel, unsigned int sampleRate,
\r
2085 RtAudioFormat format, unsigned int *bufferSize,
\r
2086 RtAudio::StreamOptions *options )
\r
2088 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2090 // Look for jack server and try to become a client (only do once per stream).
\r
2091 jack_client_t *client = 0;
\r
2092 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2093 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2094 jack_status_t *status = NULL;
\r
2095 if ( options && !options->streamName.empty() )
\r
2096 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2098 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2099 if ( client == 0 ) {
\r
2100 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2101 error( RtAudioError::WARNING );
\r
2106 // The handle must have been created on an earlier pass.
\r
2107 client = handle->client;
\r
2110 const char **ports;
\r
2111 std::string port, previousPort, deviceName;
\r
2112 unsigned int nPorts = 0, nDevices = 0;
\r
2113 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2115 // Parse the port names up to the first colon (:).
\r
2116 size_t iColon = 0;
\r
2118 port = (char *) ports[ nPorts ];
\r
2119 iColon = port.find(":");
\r
2120 if ( iColon != std::string::npos ) {
\r
2121 port = port.substr( 0, iColon );
\r
2122 if ( port != previousPort ) {
\r
2123 if ( nDevices == device ) deviceName = port;
\r
2125 previousPort = port;
\r
2128 } while ( ports[++nPorts] );
\r
2132 if ( device >= nDevices ) {
\r
2133 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2137 // Count the available ports containing the client name as device
\r
2138 // channels. Jack "input ports" equal RtAudio output channels.
\r
2139 unsigned int nChannels = 0;
\r
2140 unsigned long flag = JackPortIsInput;
\r
2141 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2142 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2144 while ( ports[ nChannels ] ) nChannels++;
\r
2148 // Compare the jack ports for specified client to the requested number of channels.
\r
2149 if ( nChannels < (channels + firstChannel) ) {
\r
2150 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2151 errorText_ = errorStream_.str();
\r
2155 // Check the jack server sample rate.
\r
2156 unsigned int jackRate = jack_get_sample_rate( client );
\r
2157 if ( sampleRate != jackRate ) {
\r
2158 jack_client_close( client );
\r
2159 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2160 errorText_ = errorStream_.str();
\r
2163 stream_.sampleRate = jackRate;
\r
2165 // Get the latency of the JACK port.
\r
2166 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2167 if ( ports[ firstChannel ] ) {
\r
2168 // Added by Ge Wang
\r
2169 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2170 // the range (usually the min and max are equal)
\r
2171 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2172 // get the latency range
\r
2173 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2174 // be optimistic, use the min!
\r
2175 stream_.latency[mode] = latrange.min;
\r
2176 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2180 // The jack server always uses 32-bit floating-point data.
\r
2181 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2182 stream_.userFormat = format;
\r
2184 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2185 else stream_.userInterleaved = true;
\r
2187 // Jack always uses non-interleaved buffers.
\r
2188 stream_.deviceInterleaved[mode] = false;
\r
2190 // Jack always provides host byte-ordered data.
\r
2191 stream_.doByteSwap[mode] = false;
\r
2193 // Get the buffer size. The buffer size and number of buffers
\r
2194 // (periods) is set when the jack server is started.
\r
2195 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2196 *bufferSize = stream_.bufferSize;
\r
2198 stream_.nDeviceChannels[mode] = channels;
\r
2199 stream_.nUserChannels[mode] = channels;
\r
2201 // Set flags for buffer conversion.
\r
2202 stream_.doConvertBuffer[mode] = false;
\r
2203 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2204 stream_.doConvertBuffer[mode] = true;
\r
2205 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2206 stream_.nUserChannels[mode] > 1 )
\r
2207 stream_.doConvertBuffer[mode] = true;
\r
2209 // Allocate our JackHandle structure for the stream.
\r
2210 if ( handle == 0 ) {
\r
2212 handle = new JackHandle;
\r
2214 catch ( std::bad_alloc& ) {
\r
2215 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2219 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2220 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2223 stream_.apiHandle = (void *) handle;
\r
2224 handle->client = client;
\r
2226 handle->deviceName[mode] = deviceName;
\r
2228 // Allocate necessary internal buffers.
\r
2229 unsigned long bufferBytes;
\r
2230 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2231 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2232 if ( stream_.userBuffer[mode] == NULL ) {
\r
2233 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2237 if ( stream_.doConvertBuffer[mode] ) {
\r
2239 bool makeBuffer = true;
\r
2240 if ( mode == OUTPUT )
\r
2241 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2242 else { // mode == INPUT
\r
2243 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2244 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2245 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2246 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2250 if ( makeBuffer ) {
\r
2251 bufferBytes *= *bufferSize;
\r
2252 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2253 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2254 if ( stream_.deviceBuffer == NULL ) {
\r
2255 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2261 // Allocate memory for the Jack ports (channels) identifiers.
\r
2262 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2263 if ( handle->ports[mode] == NULL ) {
\r
2264 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2268 stream_.device[mode] = device;
\r
2269 stream_.channelOffset[mode] = firstChannel;
\r
2270 stream_.state = STREAM_STOPPED;
\r
2271 stream_.callbackInfo.object = (void *) this;
\r
2273 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2274 // We had already set up the stream for output.
\r
2275 stream_.mode = DUPLEX;
\r
2277 stream_.mode = mode;
\r
2278 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2279 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2280 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2283 // Register our ports.
\r
2285 if ( mode == OUTPUT ) {
\r
2286 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2287 snprintf( label, 64, "outport %d", i );
\r
2288 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2289 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2293 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2294 snprintf( label, 64, "inport %d", i );
\r
2295 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2296 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2300 // Setup the buffer conversion information structure. We don't use
\r
2301 // buffers to do channel offsets, so we override that parameter
\r
2303 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2309 pthread_cond_destroy( &handle->condition );
\r
2310 jack_client_close( handle->client );
\r
2312 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2313 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2316 stream_.apiHandle = 0;
\r
2319 for ( int i=0; i<2; i++ ) {
\r
2320 if ( stream_.userBuffer[i] ) {
\r
2321 free( stream_.userBuffer[i] );
\r
2322 stream_.userBuffer[i] = 0;
\r
2326 if ( stream_.deviceBuffer ) {
\r
2327 free( stream_.deviceBuffer );
\r
2328 stream_.deviceBuffer = 0;
\r
2334 void RtApiJack :: closeStream( void )
\r
2336 if ( stream_.state == STREAM_CLOSED ) {
\r
2337 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2338 error( RtAudioError::WARNING );
\r
2342 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2345 if ( stream_.state == STREAM_RUNNING )
\r
2346 jack_deactivate( handle->client );
\r
2348 jack_client_close( handle->client );
\r
2352 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2353 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2354 pthread_cond_destroy( &handle->condition );
\r
2356 stream_.apiHandle = 0;
\r
2359 for ( int i=0; i<2; i++ ) {
\r
2360 if ( stream_.userBuffer[i] ) {
\r
2361 free( stream_.userBuffer[i] );
\r
2362 stream_.userBuffer[i] = 0;
\r
2366 if ( stream_.deviceBuffer ) {
\r
2367 free( stream_.deviceBuffer );
\r
2368 stream_.deviceBuffer = 0;
\r
2371 stream_.mode = UNINITIALIZED;
\r
2372 stream_.state = STREAM_CLOSED;
\r
2375 void RtApiJack :: startStream( void )
\r
2378 if ( stream_.state == STREAM_RUNNING ) {
\r
2379 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2380 error( RtAudioError::WARNING );
\r
2384 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2385 int result = jack_activate( handle->client );
\r
2387 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2391 const char **ports;
\r
2393 // Get the list of available ports.
\r
2394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2396 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2397 if ( ports == NULL) {
\r
2398 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2402 // Now make the port connections. Since RtAudio wasn't designed to
\r
2403 // allow the user to select particular channels of a device, we'll
\r
2404 // just open the first "nChannels" ports with offset.
\r
2405 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2407 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2408 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2411 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2418 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2420 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2421 if ( ports == NULL) {
\r
2422 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2426 // Now make the port connections. See note above.
\r
2427 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2429 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2430 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2433 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2440 handle->drainCounter = 0;
\r
2441 handle->internalDrain = false;
\r
2442 stream_.state = STREAM_RUNNING;
\r
2445 if ( result == 0 ) return;
\r
2446 error( RtAudioError::SYSTEM_ERROR );
\r
2449 void RtApiJack :: stopStream( void )
\r
2452 if ( stream_.state == STREAM_STOPPED ) {
\r
2453 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2454 error( RtAudioError::WARNING );
\r
2458 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2459 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2461 if ( handle->drainCounter == 0 ) {
\r
2462 handle->drainCounter = 2;
\r
2463 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2467 jack_deactivate( handle->client );
\r
2468 stream_.state = STREAM_STOPPED;
\r
2471 void RtApiJack :: abortStream( void )
\r
2474 if ( stream_.state == STREAM_STOPPED ) {
\r
2475 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2476 error( RtAudioError::WARNING );
\r
2480 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2481 handle->drainCounter = 2;
\r
2486 // This function will be called by a spawned thread when the user
\r
2487 // callback function signals that the stream should be stopped or
\r
2488 // aborted. It is necessary to handle it this way because the
\r
2489 // callbackEvent() function must return before the jack_deactivate()
\r
2490 // function will return.
\r
2491 static void *jackStopStream( void *ptr )
\r
2493 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2494 RtApiJack *object = (RtApiJack *) info->object;
\r
2496 object->stopStream();
\r
2497 pthread_exit( NULL );
\r
2500 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2502 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2503 if ( stream_.state == STREAM_CLOSED ) {
\r
2504 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2505 error( RtAudioError::WARNING );
\r
2508 if ( stream_.bufferSize != nframes ) {
\r
2509 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2510 error( RtAudioError::WARNING );
\r
2514 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2515 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2517 // Check if we were draining the stream and signal is finished.
\r
2518 if ( handle->drainCounter > 3 ) {
\r
2519 ThreadHandle threadId;
\r
2521 stream_.state = STREAM_STOPPING;
\r
2522 if ( handle->internalDrain == true )
\r
2523 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2525 pthread_cond_signal( &handle->condition );
\r
2529 // Invoke user callback first, to get fresh output data.
\r
2530 if ( handle->drainCounter == 0 ) {
\r
2531 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2532 double streamTime = getStreamTime();
\r
2533 RtAudioStreamStatus status = 0;
\r
2534 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2535 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2536 handle->xrun[0] = false;
\r
2538 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2539 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2540 handle->xrun[1] = false;
\r
2542 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2543 stream_.bufferSize, streamTime, status, info->userData );
\r
2544 if ( cbReturnValue == 2 ) {
\r
2545 stream_.state = STREAM_STOPPING;
\r
2546 handle->drainCounter = 2;
\r
2548 pthread_create( &id, NULL, jackStopStream, info );
\r
2551 else if ( cbReturnValue == 1 ) {
\r
2552 handle->drainCounter = 1;
\r
2553 handle->internalDrain = true;
\r
2557 jack_default_audio_sample_t *jackbuffer;
\r
2558 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2559 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2561 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2563 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2564 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2565 memset( jackbuffer, 0, bufferBytes );
\r
2569 else if ( stream_.doConvertBuffer[0] ) {
\r
2571 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2573 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2574 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2575 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2578 else { // no buffer conversion
\r
2579 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2580 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2581 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2586 // Don't bother draining input
\r
2587 if ( handle->drainCounter ) {
\r
2588 handle->drainCounter++;
\r
2592 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2594 if ( stream_.doConvertBuffer[1] ) {
\r
2595 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2596 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2597 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2599 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2601 else { // no buffer conversion
\r
2602 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2603 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2604 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2610 RtApi::tickStreamTime();
\r
2613 //******************** End of __UNIX_JACK__ *********************//
\r
2616 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2618 // The ASIO API is designed around a callback scheme, so this
\r
2619 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2620 // Jack. The primary constraint with ASIO is that it only allows
\r
2621 // access to a single driver at a time. Thus, it is not possible to
\r
2622 // have more than one simultaneous RtAudio stream.
\r
2624 // This implementation also requires a number of external ASIO files
\r
2625 // and a few global variables. The ASIO callback scheme does not
\r
2626 // allow for the passing of user data, so we must create a global
\r
2627 // pointer to our callbackInfo structure.
\r
2629 // On unix systems, we make use of a pthread condition variable.
\r
2630 // Since there is no equivalent in Windows, I hacked something based
\r
2631 // on information found in
\r
2632 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2634 #include "asiosys.h"
\r
2636 #include "iasiothiscallresolver.h"
\r
2637 #include "asiodrivers.h"
\r
2640 static AsioDrivers drivers;
\r
2641 static ASIOCallbacks asioCallbacks;
\r
2642 static ASIODriverInfo driverInfo;
\r
2643 static CallbackInfo *asioCallbackInfo;
\r
2644 static bool asioXRun;
\r
2646 struct AsioHandle {
\r
2647 int drainCounter; // Tracks callback counts when draining
\r
2648 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2649 ASIOBufferInfo *bufferInfos;
\r
2653 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2656 // Function declarations (definitions at end of section)
\r
2657 static const char* getAsioErrorString( ASIOError result );
\r
2658 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2659 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2661 RtApiAsio :: RtApiAsio()
\r
2663 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2664 // CoInitialize beforehand, but it must be for appartment threading
\r
2665 // (in which case, CoInitilialize will return S_FALSE here).
\r
2666 coInitialized_ = false;
\r
2667 HRESULT hr = CoInitialize( NULL );
\r
2668 if ( FAILED(hr) ) {
\r
2669 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2670 error( RtAudioError::WARNING );
\r
2672 coInitialized_ = true;
\r
2674 drivers.removeCurrentDriver();
\r
2675 driverInfo.asioVersion = 2;
\r
2677 // See note in DirectSound implementation about GetDesktopWindow().
\r
2678 driverInfo.sysRef = GetForegroundWindow();
\r
2681 RtApiAsio :: ~RtApiAsio()
\r
2683 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2684 if ( coInitialized_ ) CoUninitialize();
\r
2687 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2689 return (unsigned int) drivers.asioGetNumDev();
\r
2692 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2694 RtAudio::DeviceInfo info;
\r
2695 info.probed = false;
\r
2698 unsigned int nDevices = getDeviceCount();
\r
2699 if ( nDevices == 0 ) {
\r
2700 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2701 error( RtAudioError::INVALID_USE );
\r
2705 if ( device >= nDevices ) {
\r
2706 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2707 error( RtAudioError::INVALID_USE );
\r
2711 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2712 if ( stream_.state != STREAM_CLOSED ) {
\r
2713 if ( device >= devices_.size() ) {
\r
2714 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2715 error( RtAudioError::WARNING );
\r
2718 return devices_[ device ];
\r
2721 char driverName[32];
\r
2722 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2723 if ( result != ASE_OK ) {
\r
2724 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2725 errorText_ = errorStream_.str();
\r
2726 error( RtAudioError::WARNING );
\r
2730 info.name = driverName;
\r
2732 if ( !drivers.loadDriver( driverName ) ) {
\r
2733 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2734 errorText_ = errorStream_.str();
\r
2735 error( RtAudioError::WARNING );
\r
2739 result = ASIOInit( &driverInfo );
\r
2740 if ( result != ASE_OK ) {
\r
2741 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2742 errorText_ = errorStream_.str();
\r
2743 error( RtAudioError::WARNING );
\r
2747 // Determine the device channel information.
\r
2748 long inputChannels, outputChannels;
\r
2749 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2750 if ( result != ASE_OK ) {
\r
2751 drivers.removeCurrentDriver();
\r
2752 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2753 errorText_ = errorStream_.str();
\r
2754 error( RtAudioError::WARNING );
\r
2758 info.outputChannels = outputChannels;
\r
2759 info.inputChannels = inputChannels;
\r
2760 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2761 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2763 // Determine the supported sample rates.
\r
2764 info.sampleRates.clear();
\r
2765 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2766 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2767 if ( result == ASE_OK )
\r
2768 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2771 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2772 ASIOChannelInfo channelInfo;
\r
2773 channelInfo.channel = 0;
\r
2774 channelInfo.isInput = true;
\r
2775 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2776 result = ASIOGetChannelInfo( &channelInfo );
\r
2777 if ( result != ASE_OK ) {
\r
2778 drivers.removeCurrentDriver();
\r
2779 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2780 errorText_ = errorStream_.str();
\r
2781 error( RtAudioError::WARNING );
\r
2785 info.nativeFormats = 0;
\r
2786 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2787 info.nativeFormats |= RTAUDIO_SINT16;
\r
2788 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2789 info.nativeFormats |= RTAUDIO_SINT32;
\r
2790 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2791 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2792 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2793 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2794 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2795 info.nativeFormats |= RTAUDIO_SINT24;
\r
2797 if ( info.outputChannels > 0 )
\r
2798 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2799 if ( info.inputChannels > 0 )
\r
2800 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2802 info.probed = true;
\r
2803 drivers.removeCurrentDriver();
\r
2807 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2809 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2810 object->callbackEvent( index );
\r
2813 void RtApiAsio :: saveDeviceInfo( void )
\r
2817 unsigned int nDevices = getDeviceCount();
\r
2818 devices_.resize( nDevices );
\r
2819 for ( unsigned int i=0; i<nDevices; i++ )
\r
2820 devices_[i] = getDeviceInfo( i );
\r
2823 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2824 unsigned int firstChannel, unsigned int sampleRate,
\r
2825 RtAudioFormat format, unsigned int *bufferSize,
\r
2826 RtAudio::StreamOptions *options )
\r
2828 // For ASIO, a duplex stream MUST use the same driver.
\r
2829 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2830 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2834 char driverName[32];
\r
2835 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2836 if ( result != ASE_OK ) {
\r
2837 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2838 errorText_ = errorStream_.str();
\r
2842 // Only load the driver once for duplex stream.
\r
2843 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2844 // The getDeviceInfo() function will not work when a stream is open
\r
2845 // because ASIO does not allow multiple devices to run at the same
\r
2846 // time. Thus, we'll probe the system before opening a stream and
\r
2847 // save the results for use by getDeviceInfo().
\r
2848 this->saveDeviceInfo();
\r
2850 if ( !drivers.loadDriver( driverName ) ) {
\r
2851 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2852 errorText_ = errorStream_.str();
\r
2856 result = ASIOInit( &driverInfo );
\r
2857 if ( result != ASE_OK ) {
\r
2858 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2859 errorText_ = errorStream_.str();
\r
2864 // Check the device channel count.
\r
2865 long inputChannels, outputChannels;
\r
2866 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2867 if ( result != ASE_OK ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2870 errorText_ = errorStream_.str();
\r
2874 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2875 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2876 drivers.removeCurrentDriver();
\r
2877 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2878 errorText_ = errorStream_.str();
\r
2881 stream_.nDeviceChannels[mode] = channels;
\r
2882 stream_.nUserChannels[mode] = channels;
\r
2883 stream_.channelOffset[mode] = firstChannel;
\r
2885 // Verify the sample rate is supported.
\r
2886 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2890 errorText_ = errorStream_.str();
\r
2894 // Get the current sample rate
\r
2895 ASIOSampleRate currentRate;
\r
2896 result = ASIOGetSampleRate( ¤tRate );
\r
2897 if ( result != ASE_OK ) {
\r
2898 drivers.removeCurrentDriver();
\r
2899 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2900 errorText_ = errorStream_.str();
\r
2904 // Set the sample rate only if necessary
\r
2905 if ( currentRate != sampleRate ) {
\r
2906 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2907 if ( result != ASE_OK ) {
\r
2908 drivers.removeCurrentDriver();
\r
2909 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2910 errorText_ = errorStream_.str();
\r
2915 // Determine the driver data type.
\r
2916 ASIOChannelInfo channelInfo;
\r
2917 channelInfo.channel = 0;
\r
2918 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2919 else channelInfo.isInput = true;
\r
2920 result = ASIOGetChannelInfo( &channelInfo );
\r
2921 if ( result != ASE_OK ) {
\r
2922 drivers.removeCurrentDriver();
\r
2923 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2924 errorText_ = errorStream_.str();
\r
2928 // Assuming WINDOWS host is always little-endian.
\r
2929 stream_.doByteSwap[mode] = false;
\r
2930 stream_.userFormat = format;
\r
2931 stream_.deviceFormat[mode] = 0;
\r
2932 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2933 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2934 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2936 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2937 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2938 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2940 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2941 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2942 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2944 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2945 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2946 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2948 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2949 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2950 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2953 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2954 drivers.removeCurrentDriver();
\r
2955 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2956 errorText_ = errorStream_.str();
\r
2960 // Set the buffer size. For a duplex stream, this will end up
\r
2961 // setting the buffer size based on the input constraints, which
\r
2963 long minSize, maxSize, preferSize, granularity;
\r
2964 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2965 if ( result != ASE_OK ) {
\r
2966 drivers.removeCurrentDriver();
\r
2967 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2968 errorText_ = errorStream_.str();
\r
2972 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2973 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2974 else if ( granularity == -1 ) {
\r
2975 // Make sure bufferSize is a power of two.
\r
2976 int log2_of_min_size = 0;
\r
2977 int log2_of_max_size = 0;
\r
2979 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2980 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2981 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2984 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2985 int min_delta_num = log2_of_min_size;
\r
2987 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2988 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2989 if (current_delta < min_delta) {
\r
2990 min_delta = current_delta;
\r
2991 min_delta_num = i;
\r
2995 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2996 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2997 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2999 else if ( granularity != 0 ) {
\r
3000 // Set to an even multiple of granularity, rounding up.
\r
3001 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3004 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
3005 drivers.removeCurrentDriver();
\r
3006 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3010 stream_.bufferSize = *bufferSize;
\r
3011 stream_.nBuffers = 2;
\r
3013 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3014 else stream_.userInterleaved = true;
\r
3016 // ASIO always uses non-interleaved buffers.
\r
3017 stream_.deviceInterleaved[mode] = false;
\r
3019 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3020 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3021 if ( handle == 0 ) {
\r
3023 handle = new AsioHandle;
\r
3025 catch ( std::bad_alloc& ) {
\r
3026 //if ( handle == NULL ) {
\r
3027 drivers.removeCurrentDriver();
\r
3028 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3031 handle->bufferInfos = 0;
\r
3033 // Create a manual-reset event.
\r
3034 handle->condition = CreateEvent( NULL, // no security
\r
3035 TRUE, // manual-reset
\r
3036 FALSE, // non-signaled initially
\r
3037 NULL ); // unnamed
\r
3038 stream_.apiHandle = (void *) handle;
\r
3041 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3042 // and output separately, we'll have to dispose of previously
\r
3043 // created output buffers for a duplex stream.
\r
3044 long inputLatency, outputLatency;
\r
3045 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3046 ASIODisposeBuffers();
\r
3047 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3050 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3051 bool buffersAllocated = false;
\r
3052 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3053 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3054 if ( handle->bufferInfos == NULL ) {
\r
3055 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3056 errorText_ = errorStream_.str();
\r
3060 ASIOBufferInfo *infos;
\r
3061 infos = handle->bufferInfos;
\r
3062 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3063 infos->isInput = ASIOFalse;
\r
3064 infos->channelNum = i + stream_.channelOffset[0];
\r
3065 infos->buffers[0] = infos->buffers[1] = 0;
\r
3067 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3068 infos->isInput = ASIOTrue;
\r
3069 infos->channelNum = i + stream_.channelOffset[1];
\r
3070 infos->buffers[0] = infos->buffers[1] = 0;
\r
3073 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3074 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3075 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3076 asioCallbacks.asioMessage = &asioMessages;
\r
3077 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3078 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3079 if ( result != ASE_OK ) {
\r
3080 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3081 errorText_ = errorStream_.str();
\r
3084 buffersAllocated = true;
\r
3086 // Set flags for buffer conversion.
\r
3087 stream_.doConvertBuffer[mode] = false;
\r
3088 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3089 stream_.doConvertBuffer[mode] = true;
\r
3090 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3091 stream_.nUserChannels[mode] > 1 )
\r
3092 stream_.doConvertBuffer[mode] = true;
\r
3094 // Allocate necessary internal buffers
\r
3095 unsigned long bufferBytes;
\r
3096 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3097 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3098 if ( stream_.userBuffer[mode] == NULL ) {
\r
3099 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3103 if ( stream_.doConvertBuffer[mode] ) {
\r
3105 bool makeBuffer = true;
\r
3106 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3107 if ( mode == INPUT ) {
\r
3108 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3109 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3110 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3114 if ( makeBuffer ) {
\r
3115 bufferBytes *= *bufferSize;
\r
3116 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3117 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3118 if ( stream_.deviceBuffer == NULL ) {
\r
3119 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3125 stream_.sampleRate = sampleRate;
\r
3126 stream_.device[mode] = device;
\r
3127 stream_.state = STREAM_STOPPED;
\r
3128 asioCallbackInfo = &stream_.callbackInfo;
\r
3129 stream_.callbackInfo.object = (void *) this;
\r
3130 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3131 // We had already set up an output stream.
\r
3132 stream_.mode = DUPLEX;
\r
3134 stream_.mode = mode;
\r
3136 // Determine device latencies
\r
3137 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3138 if ( result != ASE_OK ) {
\r
3139 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3140 errorText_ = errorStream_.str();
\r
3141 error( RtAudioError::WARNING); // warn but don't fail
\r
3144 stream_.latency[0] = outputLatency;
\r
3145 stream_.latency[1] = inputLatency;
\r
3148 // Setup the buffer conversion information structure. We don't use
\r
3149 // buffers to do channel offsets, so we override that parameter
\r
3151 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3156 if ( buffersAllocated )
\r
3157 ASIODisposeBuffers();
\r
3158 drivers.removeCurrentDriver();
\r
3161 CloseHandle( handle->condition );
\r
3162 if ( handle->bufferInfos )
\r
3163 free( handle->bufferInfos );
\r
3165 stream_.apiHandle = 0;
\r
3168 for ( int i=0; i<2; i++ ) {
\r
3169 if ( stream_.userBuffer[i] ) {
\r
3170 free( stream_.userBuffer[i] );
\r
3171 stream_.userBuffer[i] = 0;
\r
3175 if ( stream_.deviceBuffer ) {
\r
3176 free( stream_.deviceBuffer );
\r
3177 stream_.deviceBuffer = 0;
\r
3183 void RtApiAsio :: closeStream()
\r
3185 if ( stream_.state == STREAM_CLOSED ) {
\r
3186 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3187 error( RtAudioError::WARNING );
\r
3191 if ( stream_.state == STREAM_RUNNING ) {
\r
3192 stream_.state = STREAM_STOPPED;
\r
3195 ASIODisposeBuffers();
\r
3196 drivers.removeCurrentDriver();
\r
3198 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3200 CloseHandle( handle->condition );
\r
3201 if ( handle->bufferInfos )
\r
3202 free( handle->bufferInfos );
\r
3204 stream_.apiHandle = 0;
\r
3207 for ( int i=0; i<2; i++ ) {
\r
3208 if ( stream_.userBuffer[i] ) {
\r
3209 free( stream_.userBuffer[i] );
\r
3210 stream_.userBuffer[i] = 0;
\r
3214 if ( stream_.deviceBuffer ) {
\r
3215 free( stream_.deviceBuffer );
\r
3216 stream_.deviceBuffer = 0;
\r
3219 stream_.mode = UNINITIALIZED;
\r
3220 stream_.state = STREAM_CLOSED;
\r
3223 bool stopThreadCalled = false;
\r
3225 void RtApiAsio :: startStream()
\r
3228 if ( stream_.state == STREAM_RUNNING ) {
\r
3229 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3230 error( RtAudioError::WARNING );
\r
3234 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3235 ASIOError result = ASIOStart();
\r
3236 if ( result != ASE_OK ) {
\r
3237 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3238 errorText_ = errorStream_.str();
\r
3242 handle->drainCounter = 0;
\r
3243 handle->internalDrain = false;
\r
3244 ResetEvent( handle->condition );
\r
3245 stream_.state = STREAM_RUNNING;
\r
3249 stopThreadCalled = false;
\r
3251 if ( result == ASE_OK ) return;
\r
3252 error( RtAudioError::SYSTEM_ERROR );
\r
3255 void RtApiAsio :: stopStream()
\r
3258 if ( stream_.state == STREAM_STOPPED ) {
\r
3259 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3260 error( RtAudioError::WARNING );
\r
3264 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3265 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3266 if ( handle->drainCounter == 0 ) {
\r
3267 handle->drainCounter = 2;
\r
3268 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3272 stream_.state = STREAM_STOPPED;
\r
3274 ASIOError result = ASIOStop();
\r
3275 if ( result != ASE_OK ) {
\r
3276 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3277 errorText_ = errorStream_.str();
\r
3280 if ( result == ASE_OK ) return;
\r
3281 error( RtAudioError::SYSTEM_ERROR );
\r
3284 void RtApiAsio :: abortStream()
\r
3287 if ( stream_.state == STREAM_STOPPED ) {
\r
3288 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3289 error( RtAudioError::WARNING );
\r
3293 // The following lines were commented-out because some behavior was
\r
3294 // noted where the device buffers need to be zeroed to avoid
\r
3295 // continuing sound, even when the device buffers are completely
\r
3296 // disposed. So now, calling abort is the same as calling stop.
\r
3297 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3298 // handle->drainCounter = 2;
\r
3302 // This function will be called by a spawned thread when the user
\r
3303 // callback function signals that the stream should be stopped or
\r
3304 // aborted. It is necessary to handle it this way because the
\r
3305 // callbackEvent() function must return before the ASIOStop()
\r
3306 // function will return.
\r
3307 static unsigned __stdcall asioStopStream( void *ptr )
\r
3309 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3310 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3312 object->stopStream();
\r
3313 _endthreadex( 0 );
\r
3317 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3319 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3320 if ( stream_.state == STREAM_CLOSED ) {
\r
3321 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3322 error( RtAudioError::WARNING );
\r
3326 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3327 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3329 // Check if we were draining the stream and signal if finished.
\r
3330 if ( handle->drainCounter > 3 ) {
\r
3332 stream_.state = STREAM_STOPPING;
\r
3333 if ( handle->internalDrain == false )
\r
3334 SetEvent( handle->condition );
\r
3335 else { // spawn a thread to stop the stream
\r
3336 unsigned threadId;
\r
3337 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3338 &stream_.callbackInfo, 0, &threadId );
\r
3343 // Invoke user callback to get fresh output data UNLESS we are
\r
3344 // draining stream.
\r
3345 if ( handle->drainCounter == 0 ) {
\r
3346 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3347 double streamTime = getStreamTime();
\r
3348 RtAudioStreamStatus status = 0;
\r
3349 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3350 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3353 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3354 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3357 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3358 stream_.bufferSize, streamTime, status, info->userData );
\r
3359 if ( cbReturnValue == 2 ) {
\r
3360 stream_.state = STREAM_STOPPING;
\r
3361 handle->drainCounter = 2;
\r
3362 unsigned threadId;
\r
3363 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3364 &stream_.callbackInfo, 0, &threadId );
\r
3367 else if ( cbReturnValue == 1 ) {
\r
3368 handle->drainCounter = 1;
\r
3369 handle->internalDrain = true;
\r
3373 unsigned int nChannels, bufferBytes, i, j;
\r
3374 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3375 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3377 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3379 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3381 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3382 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3383 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3387 else if ( stream_.doConvertBuffer[0] ) {
\r
3389 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3390 if ( stream_.doByteSwap[0] )
\r
3391 byteSwapBuffer( stream_.deviceBuffer,
\r
3392 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3393 stream_.deviceFormat[0] );
\r
3395 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3396 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3397 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3398 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3404 if ( stream_.doByteSwap[0] )
\r
3405 byteSwapBuffer( stream_.userBuffer[0],
\r
3406 stream_.bufferSize * stream_.nUserChannels[0],
\r
3407 stream_.userFormat );
\r
3409 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3410 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3411 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3412 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3418 // Don't bother draining input
\r
3419 if ( handle->drainCounter ) {
\r
3420 handle->drainCounter++;
\r
3424 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3426 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3428 if (stream_.doConvertBuffer[1]) {
\r
3430 // Always interleave ASIO input data.
\r
3431 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3432 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3433 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3434 handle->bufferInfos[i].buffers[bufferIndex],
\r
3438 if ( stream_.doByteSwap[1] )
\r
3439 byteSwapBuffer( stream_.deviceBuffer,
\r
3440 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3441 stream_.deviceFormat[1] );
\r
3442 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3446 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3447 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3448 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3449 handle->bufferInfos[i].buffers[bufferIndex],
\r
3454 if ( stream_.doByteSwap[1] )
\r
3455 byteSwapBuffer( stream_.userBuffer[1],
\r
3456 stream_.bufferSize * stream_.nUserChannels[1],
\r
3457 stream_.userFormat );
\r
3462 // The following call was suggested by Malte Clasen. While the API
\r
3463 // documentation indicates it should not be required, some device
\r
3464 // drivers apparently do not function correctly without it.
\r
3465 ASIOOutputReady();
\r
3467 RtApi::tickStreamTime();
\r
3471 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3473 // The ASIO documentation says that this usually only happens during
\r
3474 // external sync. Audio processing is not stopped by the driver,
\r
3475 // actual sample rate might not have even changed, maybe only the
\r
3476 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3479 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3481 object->stopStream();
\r
3483 catch ( RtAudioError &exception ) {
\r
3484 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3488 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3491 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3495 switch( selector ) {
\r
3496 case kAsioSelectorSupported:
\r
3497 if ( value == kAsioResetRequest
\r
3498 || value == kAsioEngineVersion
\r
3499 || value == kAsioResyncRequest
\r
3500 || value == kAsioLatenciesChanged
\r
3501 // The following three were added for ASIO 2.0, you don't
\r
3502 // necessarily have to support them.
\r
3503 || value == kAsioSupportsTimeInfo
\r
3504 || value == kAsioSupportsTimeCode
\r
3505 || value == kAsioSupportsInputMonitor)
\r
3508 case kAsioResetRequest:
\r
3509 // Defer the task and perform the reset of the driver during the
\r
3510 // next "safe" situation. You cannot reset the driver right now,
\r
3511 // as this code is called from the driver. Reset the driver is
\r
3512 // done by completely destruct is. I.e. ASIOStop(),
\r
3513 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3515 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3518 case kAsioResyncRequest:
\r
3519 // This informs the application that the driver encountered some
\r
3520 // non-fatal data loss. It is used for synchronization purposes
\r
3521 // of different media. Added mainly to work around the Win16Mutex
\r
3522 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3523 // which could lose data because the Mutex was held too long by
\r
3524 // another thread. However a driver can issue it in other
\r
3525 // situations, too.
\r
3526 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3530 case kAsioLatenciesChanged:
\r
3531 // This will inform the host application that the drivers were
\r
3532 // latencies changed. Beware, it this does not mean that the
\r
3533 // buffer sizes have changed! You might need to update internal
\r
3535 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3538 case kAsioEngineVersion:
\r
3539 // Return the supported ASIO version of the host application. If
\r
3540 // a host application does not implement this selector, ASIO 1.0
\r
3541 // is assumed by the driver.
\r
3544 case kAsioSupportsTimeInfo:
\r
3545 // Informs the driver whether the
\r
3546 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3547 // For compatibility with ASIO 1.0 drivers the host application
\r
3548 // should always support the "old" bufferSwitch method, too.
\r
3551 case kAsioSupportsTimeCode:
\r
3552 // Informs the driver whether application is interested in time
\r
3553 // code info. If an application does not need to know about time
\r
3554 // code, the driver has less work to do.
\r
3561 static const char* getAsioErrorString( ASIOError result )
\r
3566 const char*message;
\r
3569 static const Messages m[] =
\r
3571 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3572 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3573 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3574 { ASE_InvalidMode, "Invalid mode." },
\r
3575 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3576 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3577 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3580 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3581 if ( m[i].value == result ) return m[i].message;
\r
3583 return "Unknown error.";
\r
3586 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3590 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3592 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3593 // - Introduces support for the Windows WASAPI API
\r
3594 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3595 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3596 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3601 #include <audioclient.h>
\r
3603 #include <mmdeviceapi.h>
\r
3604 #include <functiondiscoverykeys_devpkey.h>
\r
3606 //=============================================================================
\r
3608 #define SAFE_RELEASE( objectPtr )\
\r
3611 objectPtr->Release();\
\r
3612 objectPtr = NULL;\
\r
3615 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3617 //-----------------------------------------------------------------------------
\r
3619 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3620 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3621 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3622 // provide intermediate storage for read / write synchronization.
\r
3623 class WasapiBuffer
\r
3627 : buffer_( NULL ),
\r
3636 // sets the length of the internal ring buffer
\r
3637 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3640 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3642 bufferSize_ = bufferSize;
\r
3647 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3648 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3650 if ( !buffer || // incoming buffer is NULL
\r
3651 bufferSize == 0 || // incoming buffer has no data
\r
3652 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3657 unsigned int relOutIndex = outIndex_;
\r
3658 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3659 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3660 relOutIndex += bufferSize_;
\r
3663 // "in" index can end on the "out" index but cannot begin at it
\r
3664 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3665 return false; // not enough space between "in" index and "out" index
\r
3668 // copy buffer from external to internal
\r
3669 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3670 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3671 int fromInSize = bufferSize - fromZeroSize;
\r
3675 case RTAUDIO_SINT8:
\r
3676 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3677 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3679 case RTAUDIO_SINT16:
\r
3680 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3681 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3683 case RTAUDIO_SINT24:
\r
3684 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3685 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3687 case RTAUDIO_SINT32:
\r
3688 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3689 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3691 case RTAUDIO_FLOAT32:
\r
3692 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3693 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3695 case RTAUDIO_FLOAT64:
\r
3696 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3697 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3701 // update "in" index
\r
3702 inIndex_ += bufferSize;
\r
3703 inIndex_ %= bufferSize_;
\r
3708 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3709 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3711 if ( !buffer || // incoming buffer is NULL
\r
3712 bufferSize == 0 || // incoming buffer has no data
\r
3713 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3718 unsigned int relInIndex = inIndex_;
\r
3719 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3720 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3721 relInIndex += bufferSize_;
\r
3724 // "out" index can begin at and end on the "in" index
\r
3725 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3726 return false; // not enough space between "out" index and "in" index
\r
3729 // copy buffer from internal to external
\r
3730 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3731 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3732 int fromOutSize = bufferSize - fromZeroSize;
\r
3736 case RTAUDIO_SINT8:
\r
3737 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3738 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3740 case RTAUDIO_SINT16:
\r
3741 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3742 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3744 case RTAUDIO_SINT24:
\r
3745 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3746 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3748 case RTAUDIO_SINT32:
\r
3749 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3750 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3752 case RTAUDIO_FLOAT32:
\r
3753 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3754 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3756 case RTAUDIO_FLOAT64:
\r
3757 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3758 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3762 // update "out" index
\r
3763 outIndex_ += bufferSize;
\r
3764 outIndex_ %= bufferSize_;
\r
3771 unsigned int bufferSize_;
\r
3772 unsigned int inIndex_;
\r
3773 unsigned int outIndex_;
\r
3776 //-----------------------------------------------------------------------------
\r
3778 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3779 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3780 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3781 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3782 // one rate and its multiple.
\r
3783 void convertBufferWasapi( char* outBuffer,
\r
3784 const char* inBuffer,
\r
3785 const unsigned int& channelCount,
\r
3786 const unsigned int& inSampleRate,
\r
3787 const unsigned int& outSampleRate,
\r
3788 const unsigned int& inSampleCount,
\r
3789 unsigned int& outSampleCount,
\r
3790 const RtAudioFormat& format )
\r
3792 // calculate the new outSampleCount and relative sampleStep
\r
3793 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3794 float sampleStep = 1.0f / sampleRatio;
\r
3795 float inSampleFraction = 0.0f;
\r
3797 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3799 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3800 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3802 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3806 case RTAUDIO_SINT8:
\r
3807 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3809 case RTAUDIO_SINT16:
\r
3810 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3812 case RTAUDIO_SINT24:
\r
3813 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3815 case RTAUDIO_SINT32:
\r
3816 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3818 case RTAUDIO_FLOAT32:
\r
3819 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3821 case RTAUDIO_FLOAT64:
\r
3822 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3826 // jump to next in sample
\r
3827 inSampleFraction += sampleStep;
\r
3831 //-----------------------------------------------------------------------------
\r
3833 // A structure to hold various information related to the WASAPI implementation.
\r
3834 struct WasapiHandle
\r
3836 IAudioClient* captureAudioClient;
\r
3837 IAudioClient* renderAudioClient;
\r
3838 IAudioCaptureClient* captureClient;
\r
3839 IAudioRenderClient* renderClient;
\r
3840 HANDLE captureEvent;
\r
3841 HANDLE renderEvent;
\r
3844 : captureAudioClient( NULL ),
\r
3845 renderAudioClient( NULL ),
\r
3846 captureClient( NULL ),
\r
3847 renderClient( NULL ),
\r
3848 captureEvent( NULL ),
\r
3849 renderEvent( NULL ) {}
\r
3852 //=============================================================================
\r
3854 RtApiWasapi::RtApiWasapi()
\r
3855 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3857 // WASAPI can run either apartment or multi-threaded
\r
3858 HRESULT hr = CoInitialize( NULL );
\r
3859 if ( !FAILED( hr ) )
\r
3860 coInitialized_ = true;
\r
3862 // Instantiate device enumerator
\r
3863 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3864 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3865 ( void** ) &deviceEnumerator_ );
\r
3867 if ( FAILED( hr ) ) {
\r
3868 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3869 error( RtAudioError::DRIVER_ERROR );
\r
3873 //-----------------------------------------------------------------------------
\r
3875 RtApiWasapi::~RtApiWasapi()
\r
3877 if ( stream_.state != STREAM_CLOSED )
\r
3880 SAFE_RELEASE( deviceEnumerator_ );
\r
3882 // If this object previously called CoInitialize()
\r
3883 if ( coInitialized_ )
\r
3887 //=============================================================================
\r
3889 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3891 unsigned int captureDeviceCount = 0;
\r
3892 unsigned int renderDeviceCount = 0;
\r
3894 IMMDeviceCollection* captureDevices = NULL;
\r
3895 IMMDeviceCollection* renderDevices = NULL;
\r
3897 // Count capture devices
\r
3898 errorText_.clear();
\r
3899 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3900 if ( FAILED( hr ) ) {
\r
3901 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3905 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3906 if ( FAILED( hr ) ) {
\r
3907 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3911 // Count render devices
\r
3912 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3913 if ( FAILED( hr ) ) {
\r
3914 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3918 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3919 if ( FAILED( hr ) ) {
\r
3920 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3925 // release all references
\r
3926 SAFE_RELEASE( captureDevices );
\r
3927 SAFE_RELEASE( renderDevices );
\r
3929 if ( errorText_.empty() )
\r
3930 return captureDeviceCount + renderDeviceCount;
\r
3932 error( RtAudioError::DRIVER_ERROR );
\r
3936 //-----------------------------------------------------------------------------
\r
3938 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3940 RtAudio::DeviceInfo info;
\r
3941 unsigned int captureDeviceCount = 0;
\r
3942 unsigned int renderDeviceCount = 0;
\r
3943 std::wstring deviceName;
\r
3944 std::string defaultDeviceName;
\r
3945 bool isCaptureDevice = false;
\r
3947 PROPVARIANT deviceNameProp;
\r
3948 PROPVARIANT defaultDeviceNameProp;
\r
3950 IMMDeviceCollection* captureDevices = NULL;
\r
3951 IMMDeviceCollection* renderDevices = NULL;
\r
3952 IMMDevice* devicePtr = NULL;
\r
3953 IMMDevice* defaultDevicePtr = NULL;
\r
3954 IAudioClient* audioClient = NULL;
\r
3955 IPropertyStore* devicePropStore = NULL;
\r
3956 IPropertyStore* defaultDevicePropStore = NULL;
\r
3958 WAVEFORMATEX* deviceFormat = NULL;
\r
3959 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3962 info.probed = false;
\r
3964 // Count capture devices
\r
3965 errorText_.clear();
\r
3966 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3967 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3968 if ( FAILED( hr ) ) {
\r
3969 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3973 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3974 if ( FAILED( hr ) ) {
\r
3975 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3979 // Count render devices
\r
3980 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3981 if ( FAILED( hr ) ) {
\r
3982 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3986 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3987 if ( FAILED( hr ) ) {
\r
3988 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3992 // validate device index
\r
3993 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3994 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3995 errorType = RtAudioError::INVALID_USE;
\r
3999 // determine whether index falls within capture or render devices
\r
4000 if ( device >= renderDeviceCount ) {
\r
4001 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4002 if ( FAILED( hr ) ) {
\r
4003 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4006 isCaptureDevice = true;
\r
4009 hr = renderDevices->Item( device, &devicePtr );
\r
4010 if ( FAILED( hr ) ) {
\r
4011 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4014 isCaptureDevice = false;
\r
4017 // get default device name
\r
4018 if ( isCaptureDevice ) {
\r
4019 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4020 if ( FAILED( hr ) ) {
\r
4021 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4026 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4027 if ( FAILED( hr ) ) {
\r
4028 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4033 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4034 if ( FAILED( hr ) ) {
\r
4035 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4038 PropVariantInit( &defaultDeviceNameProp );
\r
4040 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4041 if ( FAILED( hr ) ) {
\r
4042 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4046 deviceName = defaultDeviceNameProp.pwszVal;
\r
4047 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4050 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4051 if ( FAILED( hr ) ) {
\r
4052 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4056 PropVariantInit( &deviceNameProp );
\r
4058 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4059 if ( FAILED( hr ) ) {
\r
4060 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4064 deviceName = deviceNameProp.pwszVal;
\r
4065 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4068 if ( isCaptureDevice ) {
\r
4069 info.isDefaultInput = info.name == defaultDeviceName;
\r
4070 info.isDefaultOutput = false;
\r
4073 info.isDefaultInput = false;
\r
4074 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4078 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4079 if ( FAILED( hr ) ) {
\r
4080 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4084 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4085 if ( FAILED( hr ) ) {
\r
4086 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4090 if ( isCaptureDevice ) {
\r
4091 info.inputChannels = deviceFormat->nChannels;
\r
4092 info.outputChannels = 0;
\r
4093 info.duplexChannels = 0;
\r
4096 info.inputChannels = 0;
\r
4097 info.outputChannels = deviceFormat->nChannels;
\r
4098 info.duplexChannels = 0;
\r
4102 info.sampleRates.clear();
\r
4104 // allow support for all sample rates as we have a built-in sample rate converter
\r
4105 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4106 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4110 info.nativeFormats = 0;
\r
4112 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4113 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4114 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4116 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4117 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4119 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4120 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4123 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4124 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4125 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4127 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4128 info.nativeFormats |= RTAUDIO_SINT8;
\r
4130 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4131 info.nativeFormats |= RTAUDIO_SINT16;
\r
4133 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4134 info.nativeFormats |= RTAUDIO_SINT24;
\r
4136 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4137 info.nativeFormats |= RTAUDIO_SINT32;
\r
4142 info.probed = true;
\r
4145 // release all references
\r
4146 PropVariantClear( &deviceNameProp );
\r
4147 PropVariantClear( &defaultDeviceNameProp );
\r
4149 SAFE_RELEASE( captureDevices );
\r
4150 SAFE_RELEASE( renderDevices );
\r
4151 SAFE_RELEASE( devicePtr );
\r
4152 SAFE_RELEASE( defaultDevicePtr );
\r
4153 SAFE_RELEASE( audioClient );
\r
4154 SAFE_RELEASE( devicePropStore );
\r
4155 SAFE_RELEASE( defaultDevicePropStore );
\r
4157 CoTaskMemFree( deviceFormat );
\r
4158 CoTaskMemFree( closestMatchFormat );
\r
4160 if ( !errorText_.empty() )
\r
4161 error( errorType );
\r
4165 //-----------------------------------------------------------------------------
\r
4167 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4169 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4170 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4178 //-----------------------------------------------------------------------------
\r
4180 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4182 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4183 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4191 //-----------------------------------------------------------------------------
\r
4193 void RtApiWasapi::closeStream( void )
\r
4195 if ( stream_.state == STREAM_CLOSED ) {
\r
4196 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4197 error( RtAudioError::WARNING );
\r
4201 if ( stream_.state != STREAM_STOPPED )
\r
4204 // clean up stream memory
\r
4205 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4206 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4208 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4209 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4211 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4212 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4214 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4215 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4217 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4218 stream_.apiHandle = NULL;
\r
4220 for ( int i = 0; i < 2; i++ ) {
\r
4221 if ( stream_.userBuffer[i] ) {
\r
4222 free( stream_.userBuffer[i] );
\r
4223 stream_.userBuffer[i] = 0;
\r
4227 if ( stream_.deviceBuffer ) {
\r
4228 free( stream_.deviceBuffer );
\r
4229 stream_.deviceBuffer = 0;
\r
4232 // update stream state
\r
4233 stream_.state = STREAM_CLOSED;
\r
4236 //-----------------------------------------------------------------------------
\r
4238 void RtApiWasapi::startStream( void )
\r
4242 if ( stream_.state == STREAM_RUNNING ) {
\r
4243 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4244 error( RtAudioError::WARNING );
\r
4248 // update stream state
\r
4249 stream_.state = STREAM_RUNNING;
\r
4251 // create WASAPI stream thread
\r
4252 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4254 if ( !stream_.callbackInfo.thread ) {
\r
4255 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4256 error( RtAudioError::THREAD_ERROR );
\r
4259 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4260 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4264 //-----------------------------------------------------------------------------
\r
4266 void RtApiWasapi::stopStream( void )
\r
4270 if ( stream_.state == STREAM_STOPPED ) {
\r
4271 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4272 error( RtAudioError::WARNING );
\r
4276 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4277 stream_.state = STREAM_STOPPING;
\r
4279 // wait until stream thread is stopped
\r
4280 while( stream_.state != STREAM_STOPPED ) {
\r
4284 // Wait for the last buffer to play before stopping.
\r
4285 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4287 // stop capture client if applicable
\r
4288 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4289 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4290 if ( FAILED( hr ) ) {
\r
4291 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4292 error( RtAudioError::DRIVER_ERROR );
\r
4297 // stop render client if applicable
\r
4298 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4299 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4300 if ( FAILED( hr ) ) {
\r
4301 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4302 error( RtAudioError::DRIVER_ERROR );
\r
4307 // close thread handle
\r
4308 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4309 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4310 error( RtAudioError::THREAD_ERROR );
\r
4314 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4317 //-----------------------------------------------------------------------------
\r
4319 void RtApiWasapi::abortStream( void )
\r
4323 if ( stream_.state == STREAM_STOPPED ) {
\r
4324 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4325 error( RtAudioError::WARNING );
\r
4329 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4330 stream_.state = STREAM_STOPPING;
\r
4332 // wait until stream thread is stopped
\r
4333 while ( stream_.state != STREAM_STOPPED ) {
\r
4337 // stop capture client if applicable
\r
4338 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4339 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4340 if ( FAILED( hr ) ) {
\r
4341 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4342 error( RtAudioError::DRIVER_ERROR );
\r
4347 // stop render client if applicable
\r
4348 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4349 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4350 if ( FAILED( hr ) ) {
\r
4351 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4352 error( RtAudioError::DRIVER_ERROR );
\r
4357 // close thread handle
\r
4358 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4359 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4360 error( RtAudioError::THREAD_ERROR );
\r
4364 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4367 //-----------------------------------------------------------------------------
\r
4369 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4370 unsigned int firstChannel, unsigned int sampleRate,
\r
4371 RtAudioFormat format, unsigned int* bufferSize,
\r
4372 RtAudio::StreamOptions* options )
\r
4374 bool methodResult = FAILURE;
\r
4375 unsigned int captureDeviceCount = 0;
\r
4376 unsigned int renderDeviceCount = 0;
\r
4378 IMMDeviceCollection* captureDevices = NULL;
\r
4379 IMMDeviceCollection* renderDevices = NULL;
\r
4380 IMMDevice* devicePtr = NULL;
\r
4381 WAVEFORMATEX* deviceFormat = NULL;
\r
4382 unsigned int bufferBytes;
\r
4383 stream_.state = STREAM_STOPPED;
\r
4385 // create API Handle if not already created
\r
4386 if ( !stream_.apiHandle )
\r
4387 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4389 // Count capture devices
\r
4390 errorText_.clear();
\r
4391 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4392 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4393 if ( FAILED( hr ) ) {
\r
4394 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4398 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4399 if ( FAILED( hr ) ) {
\r
4400 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4404 // Count render devices
\r
4405 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4406 if ( FAILED( hr ) ) {
\r
4407 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4411 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4412 if ( FAILED( hr ) ) {
\r
4413 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4417 // validate device index
\r
4418 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4419 errorType = RtAudioError::INVALID_USE;
\r
4420 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4424 // determine whether index falls within capture or render devices
\r
4425 if ( device >= renderDeviceCount ) {
\r
4426 if ( mode != INPUT ) {
\r
4427 errorType = RtAudioError::INVALID_USE;
\r
4428 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4432 // retrieve captureAudioClient from devicePtr
\r
4433 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4435 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4436 if ( FAILED( hr ) ) {
\r
4437 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4441 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4442 NULL, ( void** ) &captureAudioClient );
\r
4443 if ( FAILED( hr ) ) {
\r
4444 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4448 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4449 if ( FAILED( hr ) ) {
\r
4450 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4454 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4455 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4458 if ( mode != OUTPUT ) {
\r
4459 errorType = RtAudioError::INVALID_USE;
\r
4460 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4464 // retrieve renderAudioClient from devicePtr
\r
4465 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4467 hr = renderDevices->Item( device, &devicePtr );
\r
4468 if ( FAILED( hr ) ) {
\r
4469 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4473 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4474 NULL, ( void** ) &renderAudioClient );
\r
4475 if ( FAILED( hr ) ) {
\r
4476 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4480 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4481 if ( FAILED( hr ) ) {
\r
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4486 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4487 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4490 // fill stream data
\r
4491 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4492 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4493 stream_.mode = DUPLEX;
\r
4496 stream_.mode = mode;
\r
4499 stream_.device[mode] = device;
\r
4500 stream_.doByteSwap[mode] = false;
\r
4501 stream_.sampleRate = sampleRate;
\r
4502 stream_.bufferSize = *bufferSize;
\r
4503 stream_.nBuffers = 1;
\r
4504 stream_.nUserChannels[mode] = channels;
\r
4505 stream_.channelOffset[mode] = firstChannel;
\r
4506 stream_.userFormat = format;
\r
4507 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4509 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4510 stream_.userInterleaved = false;
\r
4512 stream_.userInterleaved = true;
\r
4513 stream_.deviceInterleaved[mode] = true;
\r
4515 // Set flags for buffer conversion.
\r
4516 stream_.doConvertBuffer[mode] = false;
\r
4517 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4518 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4519 stream_.doConvertBuffer[mode] = true;
\r
4520 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4521 stream_.nUserChannels[mode] > 1 )
\r
4522 stream_.doConvertBuffer[mode] = true;
\r
4524 if ( stream_.doConvertBuffer[mode] )
\r
4525 setConvertInfo( mode, 0 );
\r
4527 // Allocate necessary internal buffers
\r
4528 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4530 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4531 if ( !stream_.userBuffer[mode] ) {
\r
4532 errorType = RtAudioError::MEMORY_ERROR;
\r
4533 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4537 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4538 stream_.callbackInfo.priority = 15;
\r
4540 stream_.callbackInfo.priority = 0;
\r
4542 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4543 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4545 methodResult = SUCCESS;
\r
4549 SAFE_RELEASE( captureDevices );
\r
4550 SAFE_RELEASE( renderDevices );
\r
4551 SAFE_RELEASE( devicePtr );
\r
4552 CoTaskMemFree( deviceFormat );
\r
4554 // if method failed, close the stream
\r
4555 if ( methodResult == FAILURE )
\r
4558 if ( !errorText_.empty() )
\r
4559 error( errorType );
\r
4560 return methodResult;
\r
4563 //=============================================================================
\r
4565 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4568 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4573 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4576 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4581 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4584 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4589 //-----------------------------------------------------------------------------
\r
4591 void RtApiWasapi::wasapiThread()
\r
4593 // as this is a new thread, we must CoInitialize it
\r
4594 CoInitialize( NULL );
\r
4598 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4599 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4600 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4601 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4602 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4603 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4605 WAVEFORMATEX* captureFormat = NULL;
\r
4606 WAVEFORMATEX* renderFormat = NULL;
\r
4607 float captureSrRatio = 0.0f;
\r
4608 float renderSrRatio = 0.0f;
\r
4609 WasapiBuffer captureBuffer;
\r
4610 WasapiBuffer renderBuffer;
\r
4612 // declare local stream variables
\r
4613 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4614 BYTE* streamBuffer = NULL;
\r
4615 unsigned long captureFlags = 0;
\r
4616 unsigned int bufferFrameCount = 0;
\r
4617 unsigned int numFramesPadding = 0;
\r
4618 unsigned int convBufferSize = 0;
\r
4619 bool callbackPushed = false;
\r
4620 bool callbackPulled = false;
\r
4621 bool callbackStopped = false;
\r
4622 int callbackResult = 0;
\r
4624 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4625 char* convBuffer = NULL;
\r
4626 unsigned int convBuffSize = 0;
\r
4627 unsigned int deviceBuffSize = 0;
\r
4629 errorText_.clear();
\r
4630 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4632 // Attempt to assign "Pro Audio" characteristic to thread
\r
4633 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4635 DWORD taskIndex = 0;
\r
4636 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4637 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4638 FreeLibrary( AvrtDll );
\r
4641 // start capture stream if applicable
\r
4642 if ( captureAudioClient ) {
\r
4643 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4644 if ( FAILED( hr ) ) {
\r
4645 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4649 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4651 // initialize capture stream according to desire buffer size
\r
4652 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4653 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4655 if ( !captureClient ) {
\r
4656 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4657 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4658 desiredBufferPeriod,
\r
4659 desiredBufferPeriod,
\r
4662 if ( FAILED( hr ) ) {
\r
4663 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4667 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4668 ( void** ) &captureClient );
\r
4669 if ( FAILED( hr ) ) {
\r
4670 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4674 // configure captureEvent to trigger on every available capture buffer
\r
4675 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4676 if ( !captureEvent ) {
\r
4677 errorType = RtAudioError::SYSTEM_ERROR;
\r
4678 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4682 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4683 if ( FAILED( hr ) ) {
\r
4684 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4688 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4689 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4692 unsigned int inBufferSize = 0;
\r
4693 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4694 if ( FAILED( hr ) ) {
\r
4695 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4699 // scale outBufferSize according to stream->user sample rate ratio
\r
4700 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4701 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4703 // set captureBuffer size
\r
4704 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4706 // reset the capture stream
\r
4707 hr = captureAudioClient->Reset();
\r
4708 if ( FAILED( hr ) ) {
\r
4709 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4713 // start the capture stream
\r
4714 hr = captureAudioClient->Start();
\r
4715 if ( FAILED( hr ) ) {
\r
4716 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4721 // start render stream if applicable
\r
4722 if ( renderAudioClient ) {
\r
4723 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4724 if ( FAILED( hr ) ) {
\r
4725 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4729 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4731 // initialize render stream according to desire buffer size
\r
4732 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4733 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4735 if ( !renderClient ) {
\r
4736 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4737 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4738 desiredBufferPeriod,
\r
4739 desiredBufferPeriod,
\r
4742 if ( FAILED( hr ) ) {
\r
4743 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4747 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4748 ( void** ) &renderClient );
\r
4749 if ( FAILED( hr ) ) {
\r
4750 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4754 // configure renderEvent to trigger on every available render buffer
\r
4755 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4756 if ( !renderEvent ) {
\r
4757 errorType = RtAudioError::SYSTEM_ERROR;
\r
4758 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4762 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4763 if ( FAILED( hr ) ) {
\r
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4768 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4769 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4772 unsigned int outBufferSize = 0;
\r
4773 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4774 if ( FAILED( hr ) ) {
\r
4775 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4779 // scale inBufferSize according to user->stream sample rate ratio
\r
4780 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4781 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4783 // set renderBuffer size
\r
4784 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4786 // reset the render stream
\r
4787 hr = renderAudioClient->Reset();
\r
4788 if ( FAILED( hr ) ) {
\r
4789 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4793 // start the render stream
\r
4794 hr = renderAudioClient->Start();
\r
4795 if ( FAILED( hr ) ) {
\r
4796 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4801 if ( stream_.mode == INPUT ) {
\r
4802 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4803 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4805 else if ( stream_.mode == OUTPUT ) {
\r
4806 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4807 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4809 else if ( stream_.mode == DUPLEX ) {
\r
4810 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4811 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4812 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4813 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4816 convBuffer = ( char* ) malloc( convBuffSize );
\r
4817 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4818 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4819 errorType = RtAudioError::MEMORY_ERROR;
\r
4820 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4824 // stream process loop
\r
4825 while ( stream_.state != STREAM_STOPPING ) {
\r
4826 if ( !callbackPulled ) {
\r
4829 // 1. Pull callback buffer from inputBuffer
\r
4830 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4831 // Convert callback buffer to user format
\r
4833 if ( captureAudioClient ) {
\r
4834 // Pull callback buffer from inputBuffer
\r
4835 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4836 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4837 stream_.deviceFormat[INPUT] );
\r
4839 if ( callbackPulled ) {
\r
4840 // Convert callback buffer to user sample rate
\r
4841 convertBufferWasapi( stream_.deviceBuffer,
\r
4843 stream_.nDeviceChannels[INPUT],
\r
4844 captureFormat->nSamplesPerSec,
\r
4845 stream_.sampleRate,
\r
4846 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4848 stream_.deviceFormat[INPUT] );
\r
4850 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4851 // Convert callback buffer to user format
\r
4852 convertBuffer( stream_.userBuffer[INPUT],
\r
4853 stream_.deviceBuffer,
\r
4854 stream_.convertInfo[INPUT] );
\r
4857 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4858 memcpy( stream_.userBuffer[INPUT],
\r
4859 stream_.deviceBuffer,
\r
4860 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4865 // if there is no capture stream, set callbackPulled flag
\r
4866 callbackPulled = true;
\r
4869 // Execute Callback
\r
4870 // ================
\r
4871 // 1. Execute user callback method
\r
4872 // 2. Handle return value from callback
\r
4874 // if callback has not requested the stream to stop
\r
4875 if ( callbackPulled && !callbackStopped ) {
\r
4876 // Execute user callback method
\r
4877 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4878 stream_.userBuffer[INPUT],
\r
4879 stream_.bufferSize,
\r
4881 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4882 stream_.callbackInfo.userData );
\r
4884 // Handle return value from callback
\r
4885 if ( callbackResult == 1 ) {
\r
4886 // instantiate a thread to stop this thread
\r
4887 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4888 if ( !threadHandle ) {
\r
4889 errorType = RtAudioError::THREAD_ERROR;
\r
4890 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4893 else if ( !CloseHandle( threadHandle ) ) {
\r
4894 errorType = RtAudioError::THREAD_ERROR;
\r
4895 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4899 callbackStopped = true;
\r
4901 else if ( callbackResult == 2 ) {
\r
4902 // instantiate a thread to stop this thread
\r
4903 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4904 if ( !threadHandle ) {
\r
4905 errorType = RtAudioError::THREAD_ERROR;
\r
4906 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4909 else if ( !CloseHandle( threadHandle ) ) {
\r
4910 errorType = RtAudioError::THREAD_ERROR;
\r
4911 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4915 callbackStopped = true;
\r
4920 // Callback Output
\r
4921 // ===============
\r
4922 // 1. Convert callback buffer to stream format
\r
4923 // 2. Convert callback buffer to stream sample rate and channel count
\r
4924 // 3. Push callback buffer into outputBuffer
\r
4926 if ( renderAudioClient && callbackPulled ) {
\r
4927 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4928 // Convert callback buffer to stream format
\r
4929 convertBuffer( stream_.deviceBuffer,
\r
4930 stream_.userBuffer[OUTPUT],
\r
4931 stream_.convertInfo[OUTPUT] );
\r
4935 // Convert callback buffer to stream sample rate
\r
4936 convertBufferWasapi( convBuffer,
\r
4937 stream_.deviceBuffer,
\r
4938 stream_.nDeviceChannels[OUTPUT],
\r
4939 stream_.sampleRate,
\r
4940 renderFormat->nSamplesPerSec,
\r
4941 stream_.bufferSize,
\r
4943 stream_.deviceFormat[OUTPUT] );
\r
4945 // Push callback buffer into outputBuffer
\r
4946 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4947 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4948 stream_.deviceFormat[OUTPUT] );
\r
4951 // if there is no render stream, set callbackPushed flag
\r
4952 callbackPushed = true;
\r
4957 // 1. Get capture buffer from stream
\r
4958 // 2. Push capture buffer into inputBuffer
\r
4959 // 3. If 2. was successful: Release capture buffer
\r
4961 if ( captureAudioClient ) {
\r
4962 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4963 if ( !callbackPulled ) {
\r
4964 WaitForSingleObject( captureEvent, INFINITE );
\r
4967 // Get capture buffer from stream
\r
4968 hr = captureClient->GetBuffer( &streamBuffer,
\r
4969 &bufferFrameCount,
\r
4970 &captureFlags, NULL, NULL );
\r
4971 if ( FAILED( hr ) ) {
\r
4972 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4976 if ( bufferFrameCount != 0 ) {
\r
4977 // Push capture buffer into inputBuffer
\r
4978 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4979 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4980 stream_.deviceFormat[INPUT] ) )
\r
4982 // Release capture buffer
\r
4983 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4984 if ( FAILED( hr ) ) {
\r
4985 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4991 // Inform WASAPI that capture was unsuccessful
\r
4992 hr = captureClient->ReleaseBuffer( 0 );
\r
4993 if ( FAILED( hr ) ) {
\r
4994 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5001 // Inform WASAPI that capture was unsuccessful
\r
5002 hr = captureClient->ReleaseBuffer( 0 );
\r
5003 if ( FAILED( hr ) ) {
\r
5004 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5012 // 1. Get render buffer from stream
\r
5013 // 2. Pull next buffer from outputBuffer
\r
5014 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5015 // Release render buffer
\r
5017 if ( renderAudioClient ) {
\r
5018 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5019 if ( callbackPulled && !callbackPushed ) {
\r
5020 WaitForSingleObject( renderEvent, INFINITE );
\r
5023 // Get render buffer from stream
\r
5024 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5025 if ( FAILED( hr ) ) {
\r
5026 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5030 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5031 if ( FAILED( hr ) ) {
\r
5032 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5036 bufferFrameCount -= numFramesPadding;
\r
5038 if ( bufferFrameCount != 0 ) {
\r
5039 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5040 if ( FAILED( hr ) ) {
\r
5041 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5045 // Pull next buffer from outputBuffer
\r
5046 // Fill render buffer with next buffer
\r
5047 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5048 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5049 stream_.deviceFormat[OUTPUT] ) )
\r
5051 // Release render buffer
\r
5052 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5053 if ( FAILED( hr ) ) {
\r
5054 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5060 // Inform WASAPI that render was unsuccessful
\r
5061 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5062 if ( FAILED( hr ) ) {
\r
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5070 // Inform WASAPI that render was unsuccessful
\r
5071 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5072 if ( FAILED( hr ) ) {
\r
5073 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5079 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5080 if ( callbackPushed ) {
\r
5081 callbackPulled = false;
\r
5084 // tick stream time
\r
5085 RtApi::tickStreamTime();
\r
5090 CoTaskMemFree( captureFormat );
\r
5091 CoTaskMemFree( renderFormat );
\r
5093 free ( convBuffer );
\r
5097 // update stream state
\r
5098 stream_.state = STREAM_STOPPED;
\r
5100 if ( errorText_.empty() )
\r
5103 error( errorType );
\r
5106 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5110 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5112 // Modified by Robin Davies, October 2005
\r
5113 // - Improvements to DirectX pointer chasing.
\r
5114 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5115 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5116 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5117 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5119 #include <dsound.h>
\r
5120 #include <assert.h>
\r
5121 #include <algorithm>
\r
5123 #if defined(__MINGW32__)
\r
5124 // missing from latest mingw winapi
\r
5125 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5126 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5127 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5128 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5131 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5133 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5134 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5137 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5139 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5140 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5141 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5142 return pointer >= earlierPointer && pointer < laterPointer;
\r
5145 // A structure to hold various information related to the DirectSound
\r
5146 // API implementation.
\r
5148 unsigned int drainCounter; // Tracks callback counts when draining
\r
5149 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5153 UINT bufferPointer[2];
\r
5154 DWORD dsBufferSize[2];
\r
5155 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5159 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5162 // Declarations for utility functions, callbacks, and structures
\r
5163 // specific to the DirectSound implementation.
\r
5164 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5165 LPCTSTR description,
\r
5167 LPVOID lpContext );
\r
5169 static const char* getErrorString( int code );
\r
5171 static unsigned __stdcall callbackHandler( void *ptr );
\r
5180 : found(false) { validId[0] = false; validId[1] = false; }
\r
5183 struct DsProbeData {
\r
5185 std::vector<struct DsDevice>* dsDevices;
\r
5188 RtApiDs :: RtApiDs()
\r
5190 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5191 // accept whatever the mainline chose for a threading model.
\r
5192 coInitialized_ = false;
\r
5193 HRESULT hr = CoInitialize( NULL );
\r
5194 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5197 RtApiDs :: ~RtApiDs()
\r
5199 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5200 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5203 // The DirectSound default output is always the first device.
\r
5204 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5209 // The DirectSound default input is always the first input device,
\r
5210 // which is the first capture device enumerated.
\r
5211 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5216 unsigned int RtApiDs :: getDeviceCount( void )
\r
5218 // Set query flag for previously found devices to false, so that we
\r
5219 // can check for any devices that have disappeared.
\r
5220 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5221 dsDevices[i].found = false;
\r
5223 // Query DirectSound devices.
\r
5224 struct DsProbeData probeInfo;
\r
5225 probeInfo.isInput = false;
\r
5226 probeInfo.dsDevices = &dsDevices;
\r
5227 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5228 if ( FAILED( result ) ) {
\r
5229 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5230 errorText_ = errorStream_.str();
\r
5231 error( RtAudioError::WARNING );
\r
5234 // Query DirectSoundCapture devices.
\r
5235 probeInfo.isInput = true;
\r
5236 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5237 if ( FAILED( result ) ) {
\r
5238 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5239 errorText_ = errorStream_.str();
\r
5240 error( RtAudioError::WARNING );
\r
5243 // Clean out any devices that may have disappeared.
\r
5244 std::vector< int > indices;
\r
5245 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5246 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5247 //unsigned int nErased = 0;
\r
5248 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5249 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5250 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5252 return static_cast<unsigned int>(dsDevices.size());
\r
5255 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5257 RtAudio::DeviceInfo info;
\r
5258 info.probed = false;
\r
5260 if ( dsDevices.size() == 0 ) {
\r
5261 // Force a query of all devices
\r
5263 if ( dsDevices.size() == 0 ) {
\r
5264 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5265 error( RtAudioError::INVALID_USE );
\r
5270 if ( device >= dsDevices.size() ) {
\r
5271 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5272 error( RtAudioError::INVALID_USE );
\r
5277 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5279 LPDIRECTSOUND output;
\r
5281 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5282 if ( FAILED( result ) ) {
\r
5283 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5284 errorText_ = errorStream_.str();
\r
5285 error( RtAudioError::WARNING );
\r
5289 outCaps.dwSize = sizeof( outCaps );
\r
5290 result = output->GetCaps( &outCaps );
\r
5291 if ( FAILED( result ) ) {
\r
5292 output->Release();
\r
5293 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5294 errorText_ = errorStream_.str();
\r
5295 error( RtAudioError::WARNING );
\r
5299 // Get output channel information.
\r
5300 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5302 // Get sample rate information.
\r
5303 info.sampleRates.clear();
\r
5304 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5305 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5306 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5307 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5310 // Get format information.
\r
5311 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5312 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5314 output->Release();
\r
5316 if ( getDefaultOutputDevice() == device )
\r
5317 info.isDefaultOutput = true;
\r
5319 if ( dsDevices[ device ].validId[1] == false ) {
\r
5320 info.name = dsDevices[ device ].name;
\r
5321 info.probed = true;
\r
5327 LPDIRECTSOUNDCAPTURE input;
\r
5328 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5329 if ( FAILED( result ) ) {
\r
5330 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5331 errorText_ = errorStream_.str();
\r
5332 error( RtAudioError::WARNING );
\r
5337 inCaps.dwSize = sizeof( inCaps );
\r
5338 result = input->GetCaps( &inCaps );
\r
5339 if ( FAILED( result ) ) {
\r
5341 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5342 errorText_ = errorStream_.str();
\r
5343 error( RtAudioError::WARNING );
\r
5347 // Get input channel information.
\r
5348 info.inputChannels = inCaps.dwChannels;
\r
5350 // Get sample rate and format information.
\r
5351 std::vector<unsigned int> rates;
\r
5352 if ( inCaps.dwChannels >= 2 ) {
\r
5353 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5354 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5355 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5362 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5363 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5364 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5368 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5369 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5375 else if ( inCaps.dwChannels == 1 ) {
\r
5376 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5377 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5385 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5386 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5391 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5392 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5398 else info.inputChannels = 0; // technically, this would be an error
\r
5402 if ( info.inputChannels == 0 ) return info;
\r
5404 // Copy the supported rates to the info structure but avoid duplication.
\r
5406 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5408 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5409 if ( rates[i] == info.sampleRates[j] ) {
\r
5414 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5416 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5418 // If device opens for both playback and capture, we determine the channels.
\r
5419 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5420 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5422 if ( device == 0 ) info.isDefaultInput = true;
\r
5424 // Copy name and return.
\r
5425 info.name = dsDevices[ device ].name;
\r
5426 info.probed = true;
\r
5430 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5431 unsigned int firstChannel, unsigned int sampleRate,
\r
5432 RtAudioFormat format, unsigned int *bufferSize,
\r
5433 RtAudio::StreamOptions *options )
\r
5435 if ( channels + firstChannel > 2 ) {
\r
5436 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5440 size_t nDevices = dsDevices.size();
\r
5441 if ( nDevices == 0 ) {
\r
5442 // This should not happen because a check is made before this function is called.
\r
5443 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5447 if ( device >= nDevices ) {
\r
5448 // This should not happen because a check is made before this function is called.
\r
5449 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5453 if ( mode == OUTPUT ) {
\r
5454 if ( dsDevices[ device ].validId[0] == false ) {
\r
5455 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5456 errorText_ = errorStream_.str();
\r
5460 else { // mode == INPUT
\r
5461 if ( dsDevices[ device ].validId[1] == false ) {
\r
5462 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5463 errorText_ = errorStream_.str();
\r
5468 // According to a note in PortAudio, using GetDesktopWindow()
\r
5469 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5470 // that occur when the application's window is not the foreground
\r
5471 // window. Also, if the application window closes before the
\r
5472 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5473 // problems when using GetDesktopWindow() but it seems fine now
\r
5474 // (January 2010). I'll leave it commented here.
\r
5475 // HWND hWnd = GetForegroundWindow();
\r
5476 HWND hWnd = GetDesktopWindow();
\r
5478 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5479 // two. This is a judgement call and a value of two is probably too
\r
5480 // low for capture, but it should work for playback.
\r
5482 if ( options ) nBuffers = options->numberOfBuffers;
\r
5483 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5484 if ( nBuffers < 2 ) nBuffers = 3;
\r
5486 // Check the lower range of the user-specified buffer size and set
\r
5487 // (arbitrarily) to a lower bound of 32.
\r
5488 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5490 // Create the wave format structure. The data format setting will
\r
5491 // be determined later.
\r
5492 WAVEFORMATEX waveFormat;
\r
5493 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5494 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5495 waveFormat.nChannels = channels + firstChannel;
\r
5496 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5498 // Determine the device buffer size. By default, we'll use the value
\r
5499 // defined above (32K), but we will grow it to make allowances for
\r
5500 // very large software buffer sizes.
\r
5501 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5502 DWORD dsPointerLeadTime = 0;
\r
5504 void *ohandle = 0, *bhandle = 0;
\r
5506 if ( mode == OUTPUT ) {
\r
5508 LPDIRECTSOUND output;
\r
5509 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5510 if ( FAILED( result ) ) {
\r
5511 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5512 errorText_ = errorStream_.str();
\r
5517 outCaps.dwSize = sizeof( outCaps );
\r
5518 result = output->GetCaps( &outCaps );
\r
5519 if ( FAILED( result ) ) {
\r
5520 output->Release();
\r
5521 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5522 errorText_ = errorStream_.str();
\r
5526 // Check channel information.
\r
5527 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5528 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5529 errorText_ = errorStream_.str();
\r
5533 // Check format information. Use 16-bit format unless not
\r
5534 // supported or user requests 8-bit.
\r
5535 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5536 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5537 waveFormat.wBitsPerSample = 16;
\r
5538 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5541 waveFormat.wBitsPerSample = 8;
\r
5542 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5544 stream_.userFormat = format;
\r
5546 // Update wave format structure and buffer information.
\r
5547 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5548 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5549 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5551 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5552 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5553 dsBufferSize *= 2;
\r
5555 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5556 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5557 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5558 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5559 if ( FAILED( result ) ) {
\r
5560 output->Release();
\r
5561 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5562 errorText_ = errorStream_.str();
\r
5566 // Even though we will write to the secondary buffer, we need to
\r
5567 // access the primary buffer to set the correct output format
\r
5568 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5569 // buffer description.
\r
5570 DSBUFFERDESC bufferDescription;
\r
5571 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5572 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5573 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5575 // Obtain the primary buffer
\r
5576 LPDIRECTSOUNDBUFFER buffer;
\r
5577 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5578 if ( FAILED( result ) ) {
\r
5579 output->Release();
\r
5580 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5581 errorText_ = errorStream_.str();
\r
5585 // Set the primary DS buffer sound format.
\r
5586 result = buffer->SetFormat( &waveFormat );
\r
5587 if ( FAILED( result ) ) {
\r
5588 output->Release();
\r
5589 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5590 errorText_ = errorStream_.str();
\r
5594 // Setup the secondary DS buffer description.
\r
5595 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5596 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5597 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5598 DSBCAPS_GLOBALFOCUS |
\r
5599 DSBCAPS_GETCURRENTPOSITION2 |
\r
5600 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5601 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5602 bufferDescription.lpwfxFormat = &waveFormat;
\r
5604 // Try to create the secondary DS buffer. If that doesn't work,
\r
5605 // try to use software mixing. Otherwise, there's a problem.
\r
5606 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5607 if ( FAILED( result ) ) {
\r
5608 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5609 DSBCAPS_GLOBALFOCUS |
\r
5610 DSBCAPS_GETCURRENTPOSITION2 |
\r
5611 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5612 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5613 if ( FAILED( result ) ) {
\r
5614 output->Release();
\r
5615 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5616 errorText_ = errorStream_.str();
\r
5621 // Get the buffer size ... might be different from what we specified.
\r
5623 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5624 result = buffer->GetCaps( &dsbcaps );
\r
5625 if ( FAILED( result ) ) {
\r
5626 output->Release();
\r
5627 buffer->Release();
\r
5628 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5629 errorText_ = errorStream_.str();
\r
5633 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5635 // Lock the DS buffer
\r
5638 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5639 if ( FAILED( result ) ) {
\r
5640 output->Release();
\r
5641 buffer->Release();
\r
5642 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5643 errorText_ = errorStream_.str();
\r
5647 // Zero the DS buffer
\r
5648 ZeroMemory( audioPtr, dataLen );
\r
5650 // Unlock the DS buffer
\r
5651 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5652 if ( FAILED( result ) ) {
\r
5653 output->Release();
\r
5654 buffer->Release();
\r
5655 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5656 errorText_ = errorStream_.str();
\r
5660 ohandle = (void *) output;
\r
5661 bhandle = (void *) buffer;
\r
5664 if ( mode == INPUT ) {
\r
5666 LPDIRECTSOUNDCAPTURE input;
\r
5667 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5668 if ( FAILED( result ) ) {
\r
5669 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5670 errorText_ = errorStream_.str();
\r
5675 inCaps.dwSize = sizeof( inCaps );
\r
5676 result = input->GetCaps( &inCaps );
\r
5677 if ( FAILED( result ) ) {
\r
5679 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5680 errorText_ = errorStream_.str();
\r
5684 // Check channel information.
\r
5685 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5686 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5690 // Check format information. Use 16-bit format unless user
\r
5691 // requests 8-bit.
\r
5692 DWORD deviceFormats;
\r
5693 if ( channels + firstChannel == 2 ) {
\r
5694 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5695 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5696 waveFormat.wBitsPerSample = 8;
\r
5697 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5699 else { // assume 16-bit is supported
\r
5700 waveFormat.wBitsPerSample = 16;
\r
5701 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5704 else { // channel == 1
\r
5705 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5706 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5707 waveFormat.wBitsPerSample = 8;
\r
5708 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5710 else { // assume 16-bit is supported
\r
5711 waveFormat.wBitsPerSample = 16;
\r
5712 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5715 stream_.userFormat = format;
\r
5717 // Update wave format structure and buffer information.
\r
5718 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5719 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5720 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5722 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5723 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5724 dsBufferSize *= 2;
\r
5726 // Setup the secondary DS buffer description.
\r
5727 DSCBUFFERDESC bufferDescription;
\r
5728 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5729 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5730 bufferDescription.dwFlags = 0;
\r
5731 bufferDescription.dwReserved = 0;
\r
5732 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5733 bufferDescription.lpwfxFormat = &waveFormat;
\r
5735 // Create the capture buffer.
\r
5736 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5737 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5738 if ( FAILED( result ) ) {
\r
5740 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5741 errorText_ = errorStream_.str();
\r
5745 // Get the buffer size ... might be different from what we specified.
\r
5746 DSCBCAPS dscbcaps;
\r
5747 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5748 result = buffer->GetCaps( &dscbcaps );
\r
5749 if ( FAILED( result ) ) {
\r
5751 buffer->Release();
\r
5752 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5753 errorText_ = errorStream_.str();
\r
5757 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5759 // NOTE: We could have a problem here if this is a duplex stream
\r
5760 // and the play and capture hardware buffer sizes are different
\r
5761 // (I'm actually not sure if that is a problem or not).
\r
5762 // Currently, we are not verifying that.
\r
5764 // Lock the capture buffer
\r
5767 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5768 if ( FAILED( result ) ) {
\r
5770 buffer->Release();
\r
5771 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5772 errorText_ = errorStream_.str();
\r
5776 // Zero the buffer
\r
5777 ZeroMemory( audioPtr, dataLen );
\r
5779 // Unlock the buffer
\r
5780 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5781 if ( FAILED( result ) ) {
\r
5783 buffer->Release();
\r
5784 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5785 errorText_ = errorStream_.str();
\r
5789 ohandle = (void *) input;
\r
5790 bhandle = (void *) buffer;
\r
5793 // Set various stream parameters
\r
5794 DsHandle *handle = 0;
\r
5795 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5796 stream_.nUserChannels[mode] = channels;
\r
5797 stream_.bufferSize = *bufferSize;
\r
5798 stream_.channelOffset[mode] = firstChannel;
\r
5799 stream_.deviceInterleaved[mode] = true;
\r
5800 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5801 else stream_.userInterleaved = true;
\r
5803 // Set flag for buffer conversion
\r
5804 stream_.doConvertBuffer[mode] = false;
\r
5805 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5806 stream_.doConvertBuffer[mode] = true;
\r
5807 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5808 stream_.doConvertBuffer[mode] = true;
\r
5809 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5810 stream_.nUserChannels[mode] > 1 )
\r
5811 stream_.doConvertBuffer[mode] = true;
\r
5813 // Allocate necessary internal buffers
\r
5814 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5815 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5816 if ( stream_.userBuffer[mode] == NULL ) {
\r
5817 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5821 if ( stream_.doConvertBuffer[mode] ) {
\r
5823 bool makeBuffer = true;
\r
5824 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5825 if ( mode == INPUT ) {
\r
5826 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5827 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5828 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5832 if ( makeBuffer ) {
\r
5833 bufferBytes *= *bufferSize;
\r
5834 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5835 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5836 if ( stream_.deviceBuffer == NULL ) {
\r
5837 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5843 // Allocate our DsHandle structures for the stream.
\r
5844 if ( stream_.apiHandle == 0 ) {
\r
5846 handle = new DsHandle;
\r
5848 catch ( std::bad_alloc& ) {
\r
5849 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5853 // Create a manual-reset event.
\r
5854 handle->condition = CreateEvent( NULL, // no security
\r
5855 TRUE, // manual-reset
\r
5856 FALSE, // non-signaled initially
\r
5857 NULL ); // unnamed
\r
5858 stream_.apiHandle = (void *) handle;
\r
5861 handle = (DsHandle *) stream_.apiHandle;
\r
5862 handle->id[mode] = ohandle;
\r
5863 handle->buffer[mode] = bhandle;
\r
5864 handle->dsBufferSize[mode] = dsBufferSize;
\r
5865 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5867 stream_.device[mode] = device;
\r
5868 stream_.state = STREAM_STOPPED;
\r
5869 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5870 // We had already set up an output stream.
\r
5871 stream_.mode = DUPLEX;
\r
5873 stream_.mode = mode;
\r
5874 stream_.nBuffers = nBuffers;
\r
5875 stream_.sampleRate = sampleRate;
\r
5877 // Setup the buffer conversion information structure.
\r
5878 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5880 // Setup the callback thread.
\r
5881 if ( stream_.callbackInfo.isRunning == false ) {
\r
5882 unsigned threadId;
\r
5883 stream_.callbackInfo.isRunning = true;
\r
5884 stream_.callbackInfo.object = (void *) this;
\r
5885 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5886 &stream_.callbackInfo, 0, &threadId );
\r
5887 if ( stream_.callbackInfo.thread == 0 ) {
\r
5888 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5892 // Boost DS thread priority
\r
5893 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5899 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5900 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5901 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5902 if ( buffer ) buffer->Release();
\r
5903 object->Release();
\r
5905 if ( handle->buffer[1] ) {
\r
5906 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5907 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5908 if ( buffer ) buffer->Release();
\r
5909 object->Release();
\r
5911 CloseHandle( handle->condition );
\r
5913 stream_.apiHandle = 0;
\r
5916 for ( int i=0; i<2; i++ ) {
\r
5917 if ( stream_.userBuffer[i] ) {
\r
5918 free( stream_.userBuffer[i] );
\r
5919 stream_.userBuffer[i] = 0;
\r
5923 if ( stream_.deviceBuffer ) {
\r
5924 free( stream_.deviceBuffer );
\r
5925 stream_.deviceBuffer = 0;
\r
5928 stream_.state = STREAM_CLOSED;
\r
5932 void RtApiDs :: closeStream()
\r
5934 if ( stream_.state == STREAM_CLOSED ) {
\r
5935 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5936 error( RtAudioError::WARNING );
\r
5940 // Stop the callback thread.
\r
5941 stream_.callbackInfo.isRunning = false;
\r
5942 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5943 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5945 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5947 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5948 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5949 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5952 buffer->Release();
\r
5954 object->Release();
\r
5956 if ( handle->buffer[1] ) {
\r
5957 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5958 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5961 buffer->Release();
\r
5963 object->Release();
\r
5965 CloseHandle( handle->condition );
\r
5967 stream_.apiHandle = 0;
\r
5970 for ( int i=0; i<2; i++ ) {
\r
5971 if ( stream_.userBuffer[i] ) {
\r
5972 free( stream_.userBuffer[i] );
\r
5973 stream_.userBuffer[i] = 0;
\r
5977 if ( stream_.deviceBuffer ) {
\r
5978 free( stream_.deviceBuffer );
\r
5979 stream_.deviceBuffer = 0;
\r
5982 stream_.mode = UNINITIALIZED;
\r
5983 stream_.state = STREAM_CLOSED;
\r
5986 void RtApiDs :: startStream()
\r
5989 if ( stream_.state == STREAM_RUNNING ) {
\r
5990 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5991 error( RtAudioError::WARNING );
\r
5995 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5997 // Increase scheduler frequency on lesser windows (a side-effect of
\r
5998 // increasing timer accuracy). On greater windows (Win2K or later),
\r
5999 // this is already in effect.
\r
6000 timeBeginPeriod( 1 );
\r
6002 buffersRolling = false;
\r
6003 duplexPrerollBytes = 0;
\r
6005 if ( stream_.mode == DUPLEX ) {
\r
6006 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6007 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6010 HRESULT result = 0;
\r
6011 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6013 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6014 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6015 if ( FAILED( result ) ) {
\r
6016 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6017 errorText_ = errorStream_.str();
\r
6022 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6024 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6025 result = buffer->Start( DSCBSTART_LOOPING );
\r
6026 if ( FAILED( result ) ) {
\r
6027 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6028 errorText_ = errorStream_.str();
\r
6033 handle->drainCounter = 0;
\r
6034 handle->internalDrain = false;
\r
6035 ResetEvent( handle->condition );
\r
6036 stream_.state = STREAM_RUNNING;
\r
6039 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6042 void RtApiDs :: stopStream()
\r
6045 if ( stream_.state == STREAM_STOPPED ) {
\r
6046 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6047 error( RtAudioError::WARNING );
\r
6051 HRESULT result = 0;
\r
6054 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6055 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6056 if ( handle->drainCounter == 0 ) {
\r
6057 handle->drainCounter = 2;
\r
6058 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6061 stream_.state = STREAM_STOPPED;
\r
6063 MUTEX_LOCK( &stream_.mutex );
\r
6065 // Stop the buffer and clear memory
\r
6066 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6067 result = buffer->Stop();
\r
6068 if ( FAILED( result ) ) {
\r
6069 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6070 errorText_ = errorStream_.str();
\r
6074 // Lock the buffer and clear it so that if we start to play again,
\r
6075 // we won't have old data playing.
\r
6076 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6077 if ( FAILED( result ) ) {
\r
6078 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6079 errorText_ = errorStream_.str();
\r
6083 // Zero the DS buffer
\r
6084 ZeroMemory( audioPtr, dataLen );
\r
6086 // Unlock the DS buffer
\r
6087 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6088 if ( FAILED( result ) ) {
\r
6089 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6090 errorText_ = errorStream_.str();
\r
6094 // If we start playing again, we must begin at beginning of buffer.
\r
6095 handle->bufferPointer[0] = 0;
\r
6098 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6099 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6103 stream_.state = STREAM_STOPPED;
\r
6105 if ( stream_.mode != DUPLEX )
\r
6106 MUTEX_LOCK( &stream_.mutex );
\r
6108 result = buffer->Stop();
\r
6109 if ( FAILED( result ) ) {
\r
6110 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6111 errorText_ = errorStream_.str();
\r
6115 // Lock the buffer and clear it so that if we start to play again,
\r
6116 // we won't have old data playing.
\r
6117 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6118 if ( FAILED( result ) ) {
\r
6119 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6120 errorText_ = errorStream_.str();
\r
6124 // Zero the DS buffer
\r
6125 ZeroMemory( audioPtr, dataLen );
\r
6127 // Unlock the DS buffer
\r
6128 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6129 if ( FAILED( result ) ) {
\r
6130 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6131 errorText_ = errorStream_.str();
\r
6135 // If we start recording again, we must begin at beginning of buffer.
\r
6136 handle->bufferPointer[1] = 0;
\r
6140 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6141 MUTEX_UNLOCK( &stream_.mutex );
\r
6143 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6146 void RtApiDs :: abortStream()
\r
6149 if ( stream_.state == STREAM_STOPPED ) {
\r
6150 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6151 error( RtAudioError::WARNING );
\r
6155 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6156 handle->drainCounter = 2;
\r
6161 void RtApiDs :: callbackEvent()
\r
6163 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6164 Sleep( 50 ); // sleep 50 milliseconds
\r
6168 if ( stream_.state == STREAM_CLOSED ) {
\r
6169 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6170 error( RtAudioError::WARNING );
\r
6174 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6175 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6177 // Check if we were draining the stream and signal is finished.
\r
6178 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6180 stream_.state = STREAM_STOPPING;
\r
6181 if ( handle->internalDrain == false )
\r
6182 SetEvent( handle->condition );
\r
6188 // Invoke user callback to get fresh output data UNLESS we are
\r
6189 // draining stream.
\r
6190 if ( handle->drainCounter == 0 ) {
\r
6191 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6192 double streamTime = getStreamTime();
\r
6193 RtAudioStreamStatus status = 0;
\r
6194 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6195 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6196 handle->xrun[0] = false;
\r
6198 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6199 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6200 handle->xrun[1] = false;
\r
6202 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6203 stream_.bufferSize, streamTime, status, info->userData );
\r
6204 if ( cbReturnValue == 2 ) {
\r
6205 stream_.state = STREAM_STOPPING;
\r
6206 handle->drainCounter = 2;
\r
6210 else if ( cbReturnValue == 1 ) {
\r
6211 handle->drainCounter = 1;
\r
6212 handle->internalDrain = true;
\r
6217 DWORD currentWritePointer, safeWritePointer;
\r
6218 DWORD currentReadPointer, safeReadPointer;
\r
6219 UINT nextWritePointer;
\r
6221 LPVOID buffer1 = NULL;
\r
6222 LPVOID buffer2 = NULL;
\r
6223 DWORD bufferSize1 = 0;
\r
6224 DWORD bufferSize2 = 0;
\r
6229 MUTEX_LOCK( &stream_.mutex );
\r
6230 if ( stream_.state == STREAM_STOPPED ) {
\r
6231 MUTEX_UNLOCK( &stream_.mutex );
\r
6235 if ( buffersRolling == false ) {
\r
6236 if ( stream_.mode == DUPLEX ) {
\r
6237 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6239 // It takes a while for the devices to get rolling. As a result,
\r
6240 // there's no guarantee that the capture and write device pointers
\r
6241 // will move in lockstep. Wait here for both devices to start
\r
6242 // rolling, and then set our buffer pointers accordingly.
\r
6243 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6244 // bytes later than the write buffer.
\r
6246 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6247 // take place between the two GetCurrentPosition calls... but I'm
\r
6248 // really not sure how to solve the problem. Temporarily boost to
\r
6249 // Realtime priority, maybe; but I'm not sure what priority the
\r
6250 // DirectSound service threads run at. We *should* be roughly
\r
6251 // within a ms or so of correct.
\r
6253 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6254 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6256 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6258 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6259 if ( FAILED( result ) ) {
\r
6260 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6261 errorText_ = errorStream_.str();
\r
6262 error( RtAudioError::SYSTEM_ERROR );
\r
6265 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6266 if ( FAILED( result ) ) {
\r
6267 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6268 errorText_ = errorStream_.str();
\r
6269 error( RtAudioError::SYSTEM_ERROR );
\r
6273 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6274 if ( FAILED( result ) ) {
\r
6275 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6276 errorText_ = errorStream_.str();
\r
6277 error( RtAudioError::SYSTEM_ERROR );
\r
6280 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6281 if ( FAILED( result ) ) {
\r
6282 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6283 errorText_ = errorStream_.str();
\r
6284 error( RtAudioError::SYSTEM_ERROR );
\r
6287 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6291 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6293 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6294 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6295 handle->bufferPointer[1] = safeReadPointer;
\r
6297 else if ( stream_.mode == OUTPUT ) {
\r
6299 // Set the proper nextWritePosition after initial startup.
\r
6300 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6301 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6302 if ( FAILED( result ) ) {
\r
6303 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6304 errorText_ = errorStream_.str();
\r
6305 error( RtAudioError::SYSTEM_ERROR );
\r
6308 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6309 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6312 buffersRolling = true;
\r
6315 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6317 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6319 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6320 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6321 bufferBytes *= formatBytes( stream_.userFormat );
\r
6322 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6325 // Setup parameters and do buffer conversion if necessary.
\r
6326 if ( stream_.doConvertBuffer[0] ) {
\r
6327 buffer = stream_.deviceBuffer;
\r
6328 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6329 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6330 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6333 buffer = stream_.userBuffer[0];
\r
6334 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6335 bufferBytes *= formatBytes( stream_.userFormat );
\r
6338 // No byte swapping necessary in DirectSound implementation.
\r
6340 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6341 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6343 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6344 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6346 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6347 nextWritePointer = handle->bufferPointer[0];
\r
6349 DWORD endWrite, leadPointer;
\r
6351 // Find out where the read and "safe write" pointers are.
\r
6352 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6353 if ( FAILED( result ) ) {
\r
6354 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6355 errorText_ = errorStream_.str();
\r
6356 error( RtAudioError::SYSTEM_ERROR );
\r
6360 // We will copy our output buffer into the region between
\r
6361 // safeWritePointer and leadPointer. If leadPointer is not
\r
6362 // beyond the next endWrite position, wait until it is.
\r
6363 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6364 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6365 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6366 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6367 endWrite = nextWritePointer + bufferBytes;
\r
6369 // Check whether the entire write region is behind the play pointer.
\r
6370 if ( leadPointer >= endWrite ) break;
\r
6372 // If we are here, then we must wait until the leadPointer advances
\r
6373 // beyond the end of our next write region. We use the
\r
6374 // Sleep() function to suspend operation until that happens.
\r
6375 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6376 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6377 if ( millis < 1.0 ) millis = 1.0;
\r
6378 Sleep( (DWORD) millis );
\r
6381 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6382 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6383 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6384 handle->xrun[0] = true;
\r
6385 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6386 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6387 handle->bufferPointer[0] = nextWritePointer;
\r
6388 endWrite = nextWritePointer + bufferBytes;
\r
6391 // Lock free space in the buffer
\r
6392 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6393 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6394 if ( FAILED( result ) ) {
\r
6395 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6396 errorText_ = errorStream_.str();
\r
6397 error( RtAudioError::SYSTEM_ERROR );
\r
6401 // Copy our buffer into the DS buffer
\r
6402 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6403 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6405 // Update our buffer offset and unlock sound buffer
\r
6406 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6407 if ( FAILED( result ) ) {
\r
6408 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6409 errorText_ = errorStream_.str();
\r
6410 error( RtAudioError::SYSTEM_ERROR );
\r
6413 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6414 handle->bufferPointer[0] = nextWritePointer;
\r
6417 // Don't bother draining input
\r
6418 if ( handle->drainCounter ) {
\r
6419 handle->drainCounter++;
\r
6423 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6425 // Setup parameters.
\r
6426 if ( stream_.doConvertBuffer[1] ) {
\r
6427 buffer = stream_.deviceBuffer;
\r
6428 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6429 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6432 buffer = stream_.userBuffer[1];
\r
6433 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6434 bufferBytes *= formatBytes( stream_.userFormat );
\r
6437 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6438 long nextReadPointer = handle->bufferPointer[1];
\r
6439 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6441 // Find out where the write and "safe read" pointers are.
\r
6442 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6443 if ( FAILED( result ) ) {
\r
6444 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6445 errorText_ = errorStream_.str();
\r
6446 error( RtAudioError::SYSTEM_ERROR );
\r
6450 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6451 DWORD endRead = nextReadPointer + bufferBytes;
\r
6453 // Handling depends on whether we are INPUT or DUPLEX.
\r
6454 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6455 // then a wait here will drag the write pointers into the forbidden zone.
\r
6457 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6458 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6459 // practical way to sync up the read and write pointers reliably, given the
\r
6460 // the very complex relationship between phase and increment of the read and write
\r
6463 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6464 // provide a pre-roll period of 0.5 seconds in which we return
\r
6465 // zeros from the read buffer while the pointers sync up.
\r
6467 if ( stream_.mode == DUPLEX ) {
\r
6468 if ( safeReadPointer < endRead ) {
\r
6469 if ( duplexPrerollBytes <= 0 ) {
\r
6470 // Pre-roll time over. Be more agressive.
\r
6471 int adjustment = endRead-safeReadPointer;
\r
6473 handle->xrun[1] = true;
\r
6475 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6476 // and perform fine adjustments later.
\r
6477 // - small adjustments: back off by twice as much.
\r
6478 if ( adjustment >= 2*bufferBytes )
\r
6479 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6481 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6483 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6487 // In pre=roll time. Just do it.
\r
6488 nextReadPointer = safeReadPointer - bufferBytes;
\r
6489 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6491 endRead = nextReadPointer + bufferBytes;
\r
6494 else { // mode == INPUT
\r
6495 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6496 // See comments for playback.
\r
6497 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6498 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6499 if ( millis < 1.0 ) millis = 1.0;
\r
6500 Sleep( (DWORD) millis );
\r
6502 // Wake up and find out where we are now.
\r
6503 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6504 if ( FAILED( result ) ) {
\r
6505 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6506 errorText_ = errorStream_.str();
\r
6507 error( RtAudioError::SYSTEM_ERROR );
\r
6511 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6515 // Lock free space in the buffer
\r
6516 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6517 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6518 if ( FAILED( result ) ) {
\r
6519 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6520 errorText_ = errorStream_.str();
\r
6521 error( RtAudioError::SYSTEM_ERROR );
\r
6525 if ( duplexPrerollBytes <= 0 ) {
\r
6526 // Copy our buffer into the DS buffer
\r
6527 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6528 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6531 memset( buffer, 0, bufferSize1 );
\r
6532 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6533 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6536 // Update our buffer offset and unlock sound buffer
\r
6537 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6538 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6539 if ( FAILED( result ) ) {
\r
6540 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6541 errorText_ = errorStream_.str();
\r
6542 error( RtAudioError::SYSTEM_ERROR );
\r
6545 handle->bufferPointer[1] = nextReadPointer;
\r
6547 // No byte swapping necessary in DirectSound implementation.
\r
6549 // If necessary, convert 8-bit data from unsigned to signed.
\r
6550 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6551 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6553 // Do buffer conversion if necessary.
\r
6554 if ( stream_.doConvertBuffer[1] )
\r
6555 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6559 MUTEX_UNLOCK( &stream_.mutex );
\r
6560 RtApi::tickStreamTime();
\r
6563 // Definitions for utility functions and callbacks
\r
6564 // specific to the DirectSound implementation.
\r
6566 static unsigned __stdcall callbackHandler( void *ptr )
\r
6568 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6569 RtApiDs *object = (RtApiDs *) info->object;
\r
6570 bool* isRunning = &info->isRunning;
\r
6572 while ( *isRunning == true ) {
\r
6573 object->callbackEvent();
\r
6576 _endthreadex( 0 );
\r
6580 #include "tchar.h"
\r
6582 static std::string convertTChar( LPCTSTR name )
\r
6584 #if defined( UNICODE ) || defined( _UNICODE )
\r
6585 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6586 std::string s( length-1, '\0' );
\r
6587 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6589 std::string s( name );
\r
6595 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6596 LPCTSTR description,
\r
6597 LPCTSTR /*module*/,
\r
6598 LPVOID lpContext )
\r
6600 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6601 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6604 bool validDevice = false;
\r
6605 if ( probeInfo.isInput == true ) {
\r
6607 LPDIRECTSOUNDCAPTURE object;
\r
6609 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6610 if ( hr != DS_OK ) return TRUE;
\r
6612 caps.dwSize = sizeof(caps);
\r
6613 hr = object->GetCaps( &caps );
\r
6614 if ( hr == DS_OK ) {
\r
6615 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6616 validDevice = true;
\r
6618 object->Release();
\r
6622 LPDIRECTSOUND object;
\r
6623 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6624 if ( hr != DS_OK ) return TRUE;
\r
6626 caps.dwSize = sizeof(caps);
\r
6627 hr = object->GetCaps( &caps );
\r
6628 if ( hr == DS_OK ) {
\r
6629 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6630 validDevice = true;
\r
6632 object->Release();
\r
6635 // If good device, then save its name and guid.
\r
6636 std::string name = convertTChar( description );
\r
6637 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6638 if ( lpguid == NULL )
\r
6639 name = "Default Device";
\r
6640 if ( validDevice ) {
\r
6641 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6642 if ( dsDevices[i].name == name ) {
\r
6643 dsDevices[i].found = true;
\r
6644 if ( probeInfo.isInput ) {
\r
6645 dsDevices[i].id[1] = lpguid;
\r
6646 dsDevices[i].validId[1] = true;
\r
6649 dsDevices[i].id[0] = lpguid;
\r
6650 dsDevices[i].validId[0] = true;
\r
6657 device.name = name;
\r
6658 device.found = true;
\r
6659 if ( probeInfo.isInput ) {
\r
6660 device.id[1] = lpguid;
\r
6661 device.validId[1] = true;
\r
6664 device.id[0] = lpguid;
\r
6665 device.validId[0] = true;
\r
6667 dsDevices.push_back( device );
\r
6673 static const char* getErrorString( int code )
\r
6677 case DSERR_ALLOCATED:
\r
6678 return "Already allocated";
\r
6680 case DSERR_CONTROLUNAVAIL:
\r
6681 return "Control unavailable";
\r
6683 case DSERR_INVALIDPARAM:
\r
6684 return "Invalid parameter";
\r
6686 case DSERR_INVALIDCALL:
\r
6687 return "Invalid call";
\r
6689 case DSERR_GENERIC:
\r
6690 return "Generic error";
\r
6692 case DSERR_PRIOLEVELNEEDED:
\r
6693 return "Priority level needed";
\r
6695 case DSERR_OUTOFMEMORY:
\r
6696 return "Out of memory";
\r
6698 case DSERR_BADFORMAT:
\r
6699 return "The sample rate or the channel format is not supported";
\r
6701 case DSERR_UNSUPPORTED:
\r
6702 return "Not supported";
\r
6704 case DSERR_NODRIVER:
\r
6705 return "No driver";
\r
6707 case DSERR_ALREADYINITIALIZED:
\r
6708 return "Already initialized";
\r
6710 case DSERR_NOAGGREGATION:
\r
6711 return "No aggregation";
\r
6713 case DSERR_BUFFERLOST:
\r
6714 return "Buffer lost";
\r
6716 case DSERR_OTHERAPPHASPRIO:
\r
6717 return "Another application already has priority";
\r
6719 case DSERR_UNINITIALIZED:
\r
6720 return "Uninitialized";
\r
6723 return "DirectSound unknown error";
\r
6726 //******************** End of __WINDOWS_DS__ *********************//
\r
6730 #if defined(__LINUX_ALSA__)
\r
6732 #include <alsa/asoundlib.h>
\r
6733 #include <unistd.h>
\r
6735 // A structure to hold various information related to the ALSA API
\r
6736 // implementation.
\r
6737 struct AlsaHandle {
\r
6738 snd_pcm_t *handles[2];
\r
6739 bool synchronized;
\r
6741 pthread_cond_t runnable_cv;
\r
6745 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6748 static void *alsaCallbackHandler( void * ptr );
\r
6750 RtApiAlsa :: RtApiAlsa()
\r
6752 // Nothing to do here.
\r
6755 RtApiAlsa :: ~RtApiAlsa()
\r
6757 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6760 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6762 unsigned nDevices = 0;
\r
6763 int result, subdevice, card;
\r
6765 snd_ctl_t *handle;
\r
6767 // Count cards and devices
\r
6769 snd_card_next( &card );
\r
6770 while ( card >= 0 ) {
\r
6771 sprintf( name, "hw:%d", card );
\r
6772 result = snd_ctl_open( &handle, name, 0 );
\r
6773 if ( result < 0 ) {
\r
6774 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6775 errorText_ = errorStream_.str();
\r
6776 error( RtAudioError::WARNING );
\r
6781 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6782 if ( result < 0 ) {
\r
6783 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6784 errorText_ = errorStream_.str();
\r
6785 error( RtAudioError::WARNING );
\r
6788 if ( subdevice < 0 )
\r
6793 snd_ctl_close( handle );
\r
6794 snd_card_next( &card );
\r
6797 result = snd_ctl_open( &handle, "default", 0 );
\r
6798 if (result == 0) {
\r
6800 snd_ctl_close( handle );
\r
6806 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6808 RtAudio::DeviceInfo info;
\r
6809 info.probed = false;
\r
6811 unsigned nDevices = 0;
\r
6812 int result, subdevice, card;
\r
6814 snd_ctl_t *chandle;
\r
6816 // Count cards and devices
\r
6819 snd_card_next( &card );
\r
6820 while ( card >= 0 ) {
\r
6821 sprintf( name, "hw:%d", card );
\r
6822 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6823 if ( result < 0 ) {
\r
6824 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6825 errorText_ = errorStream_.str();
\r
6826 error( RtAudioError::WARNING );
\r
6831 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6832 if ( result < 0 ) {
\r
6833 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6834 errorText_ = errorStream_.str();
\r
6835 error( RtAudioError::WARNING );
\r
6838 if ( subdevice < 0 ) break;
\r
6839 if ( nDevices == device ) {
\r
6840 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6846 snd_ctl_close( chandle );
\r
6847 snd_card_next( &card );
\r
6850 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6851 if ( result == 0 ) {
\r
6852 if ( nDevices == device ) {
\r
6853 strcpy( name, "default" );
\r
6859 if ( nDevices == 0 ) {
\r
6860 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6861 error( RtAudioError::INVALID_USE );
\r
6865 if ( device >= nDevices ) {
\r
6866 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6867 error( RtAudioError::INVALID_USE );
\r
6873 // If a stream is already open, we cannot probe the stream devices.
\r
6874 // Thus, use the saved results.
\r
6875 if ( stream_.state != STREAM_CLOSED &&
\r
6876 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6877 snd_ctl_close( chandle );
\r
6878 if ( device >= devices_.size() ) {
\r
6879 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6880 error( RtAudioError::WARNING );
\r
6883 return devices_[ device ];
\r
6886 int openMode = SND_PCM_ASYNC;
\r
6887 snd_pcm_stream_t stream;
\r
6888 snd_pcm_info_t *pcminfo;
\r
6889 snd_pcm_info_alloca( &pcminfo );
\r
6890 snd_pcm_t *phandle;
\r
6891 snd_pcm_hw_params_t *params;
\r
6892 snd_pcm_hw_params_alloca( ¶ms );
\r
6894 // First try for playback unless default device (which has subdev -1)
\r
6895 stream = SND_PCM_STREAM_PLAYBACK;
\r
6896 snd_pcm_info_set_stream( pcminfo, stream );
\r
6897 if ( subdevice != -1 ) {
\r
6898 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6899 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6901 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6902 if ( result < 0 ) {
\r
6903 // Device probably doesn't support playback.
\r
6904 goto captureProbe;
\r
6908 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6909 if ( result < 0 ) {
\r
6910 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6911 errorText_ = errorStream_.str();
\r
6912 error( RtAudioError::WARNING );
\r
6913 goto captureProbe;
\r
6916 // The device is open ... fill the parameter structure.
\r
6917 result = snd_pcm_hw_params_any( phandle, params );
\r
6918 if ( result < 0 ) {
\r
6919 snd_pcm_close( phandle );
\r
6920 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6921 errorText_ = errorStream_.str();
\r
6922 error( RtAudioError::WARNING );
\r
6923 goto captureProbe;
\r
6926 // Get output channel information.
\r
6927 unsigned int value;
\r
6928 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6929 if ( result < 0 ) {
\r
6930 snd_pcm_close( phandle );
\r
6931 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6932 errorText_ = errorStream_.str();
\r
6933 error( RtAudioError::WARNING );
\r
6934 goto captureProbe;
\r
6936 info.outputChannels = value;
\r
6937 snd_pcm_close( phandle );
\r
6940 stream = SND_PCM_STREAM_CAPTURE;
\r
6941 snd_pcm_info_set_stream( pcminfo, stream );
\r
6943 // Now try for capture unless default device (with subdev = -1)
\r
6944 if ( subdevice != -1 ) {
\r
6945 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6946 snd_ctl_close( chandle );
\r
6947 if ( result < 0 ) {
\r
6948 // Device probably doesn't support capture.
\r
6949 if ( info.outputChannels == 0 ) return info;
\r
6950 goto probeParameters;
\r
6954 snd_ctl_close( chandle );
\r
6956 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6957 if ( result < 0 ) {
\r
6958 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6959 errorText_ = errorStream_.str();
\r
6960 error( RtAudioError::WARNING );
\r
6961 if ( info.outputChannels == 0 ) return info;
\r
6962 goto probeParameters;
\r
6965 // The device is open ... fill the parameter structure.
\r
6966 result = snd_pcm_hw_params_any( phandle, params );
\r
6967 if ( result < 0 ) {
\r
6968 snd_pcm_close( phandle );
\r
6969 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6970 errorText_ = errorStream_.str();
\r
6971 error( RtAudioError::WARNING );
\r
6972 if ( info.outputChannels == 0 ) return info;
\r
6973 goto probeParameters;
\r
6976 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6977 if ( result < 0 ) {
\r
6978 snd_pcm_close( phandle );
\r
6979 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6980 errorText_ = errorStream_.str();
\r
6981 error( RtAudioError::WARNING );
\r
6982 if ( info.outputChannels == 0 ) return info;
\r
6983 goto probeParameters;
\r
6985 info.inputChannels = value;
\r
6986 snd_pcm_close( phandle );
\r
6988 // If device opens for both playback and capture, we determine the channels.
\r
6989 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6990 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6992 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6993 if ( device == 0 && info.outputChannels > 0 )
\r
6994 info.isDefaultOutput = true;
\r
6995 if ( device == 0 && info.inputChannels > 0 )
\r
6996 info.isDefaultInput = true;
\r
6999 // At this point, we just need to figure out the supported data
\r
7000 // formats and sample rates. We'll proceed by opening the device in
\r
7001 // the direction with the maximum number of channels, or playback if
\r
7002 // they are equal. This might limit our sample rate options, but so
\r
7005 if ( info.outputChannels >= info.inputChannels )
\r
7006 stream = SND_PCM_STREAM_PLAYBACK;
\r
7008 stream = SND_PCM_STREAM_CAPTURE;
\r
7009 snd_pcm_info_set_stream( pcminfo, stream );
\r
7011 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7012 if ( result < 0 ) {
\r
7013 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7014 errorText_ = errorStream_.str();
\r
7015 error( RtAudioError::WARNING );
\r
7019 // The device is open ... fill the parameter structure.
\r
7020 result = snd_pcm_hw_params_any( phandle, params );
\r
7021 if ( result < 0 ) {
\r
7022 snd_pcm_close( phandle );
\r
7023 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7024 errorText_ = errorStream_.str();
\r
7025 error( RtAudioError::WARNING );
\r
7029 // Test our discrete set of sample rate values.
\r
7030 info.sampleRates.clear();
\r
7031 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7032 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7033 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7035 if ( info.sampleRates.size() == 0 ) {
\r
7036 snd_pcm_close( phandle );
\r
7037 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7038 errorText_ = errorStream_.str();
\r
7039 error( RtAudioError::WARNING );
\r
7043 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7044 snd_pcm_format_t format;
\r
7045 info.nativeFormats = 0;
\r
7046 format = SND_PCM_FORMAT_S8;
\r
7047 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7048 info.nativeFormats |= RTAUDIO_SINT8;
\r
7049 format = SND_PCM_FORMAT_S16;
\r
7050 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7051 info.nativeFormats |= RTAUDIO_SINT16;
\r
7052 format = SND_PCM_FORMAT_S24;
\r
7053 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7054 info.nativeFormats |= RTAUDIO_SINT24;
\r
7055 format = SND_PCM_FORMAT_S32;
\r
7056 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7057 info.nativeFormats |= RTAUDIO_SINT32;
\r
7058 format = SND_PCM_FORMAT_FLOAT;
\r
7059 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7060 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7061 format = SND_PCM_FORMAT_FLOAT64;
\r
7062 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7063 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7065 // Check that we have at least one supported format
\r
7066 if ( info.nativeFormats == 0 ) {
\r
7067 snd_pcm_close( phandle );
\r
7068 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7069 errorText_ = errorStream_.str();
\r
7070 error( RtAudioError::WARNING );
\r
7074 // Get the device name
\r
7076 result = snd_card_get_name( card, &cardname );
\r
7077 if ( result >= 0 ) {
\r
7078 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7083 // That's all ... close the device and return
\r
7084 snd_pcm_close( phandle );
\r
7085 info.probed = true;
\r
7089 void RtApiAlsa :: saveDeviceInfo( void )
\r
7093 unsigned int nDevices = getDeviceCount();
\r
7094 devices_.resize( nDevices );
\r
7095 for ( unsigned int i=0; i<nDevices; i++ )
\r
7096 devices_[i] = getDeviceInfo( i );
\r
7099 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7100 unsigned int firstChannel, unsigned int sampleRate,
\r
7101 RtAudioFormat format, unsigned int *bufferSize,
\r
7102 RtAudio::StreamOptions *options )
\r
7105 #if defined(__RTAUDIO_DEBUG__)
\r
7106 snd_output_t *out;
\r
7107 snd_output_stdio_attach(&out, stderr, 0);
\r
7110 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7112 unsigned nDevices = 0;
\r
7113 int result, subdevice, card;
\r
7115 snd_ctl_t *chandle;
\r
7117 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7118 snprintf(name, sizeof(name), "%s", "default");
\r
7120 // Count cards and devices
\r
7122 snd_card_next( &card );
\r
7123 while ( card >= 0 ) {
\r
7124 sprintf( name, "hw:%d", card );
\r
7125 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7126 if ( result < 0 ) {
\r
7127 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7128 errorText_ = errorStream_.str();
\r
7133 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7134 if ( result < 0 ) break;
\r
7135 if ( subdevice < 0 ) break;
\r
7136 if ( nDevices == device ) {
\r
7137 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7138 snd_ctl_close( chandle );
\r
7143 snd_ctl_close( chandle );
\r
7144 snd_card_next( &card );
\r
7147 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7148 if ( result == 0 ) {
\r
7149 if ( nDevices == device ) {
\r
7150 strcpy( name, "default" );
\r
7156 if ( nDevices == 0 ) {
\r
7157 // This should not happen because a check is made before this function is called.
\r
7158 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7162 if ( device >= nDevices ) {
\r
7163 // This should not happen because a check is made before this function is called.
\r
7164 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7171 // The getDeviceInfo() function will not work for a device that is
\r
7172 // already open. Thus, we'll probe the system before opening a
\r
7173 // stream and save the results for use by getDeviceInfo().
\r
7174 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7175 this->saveDeviceInfo();
\r
7177 snd_pcm_stream_t stream;
\r
7178 if ( mode == OUTPUT )
\r
7179 stream = SND_PCM_STREAM_PLAYBACK;
\r
7181 stream = SND_PCM_STREAM_CAPTURE;
\r
7183 snd_pcm_t *phandle;
\r
7184 int openMode = SND_PCM_ASYNC;
\r
7185 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7186 if ( result < 0 ) {
\r
7187 if ( mode == OUTPUT )
\r
7188 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7190 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7191 errorText_ = errorStream_.str();
\r
7195 // Fill the parameter structure.
\r
7196 snd_pcm_hw_params_t *hw_params;
\r
7197 snd_pcm_hw_params_alloca( &hw_params );
\r
7198 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7199 if ( result < 0 ) {
\r
7200 snd_pcm_close( phandle );
\r
7201 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7202 errorText_ = errorStream_.str();
\r
7206 #if defined(__RTAUDIO_DEBUG__)
\r
7207 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7208 snd_pcm_hw_params_dump( hw_params, out );
\r
7211 // Set access ... check user preference.
\r
7212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7213 stream_.userInterleaved = false;
\r
7214 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7215 if ( result < 0 ) {
\r
7216 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7217 stream_.deviceInterleaved[mode] = true;
\r
7220 stream_.deviceInterleaved[mode] = false;
\r
7223 stream_.userInterleaved = true;
\r
7224 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7225 if ( result < 0 ) {
\r
7226 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7227 stream_.deviceInterleaved[mode] = false;
\r
7230 stream_.deviceInterleaved[mode] = true;
\r
7233 if ( result < 0 ) {
\r
7234 snd_pcm_close( phandle );
\r
7235 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7236 errorText_ = errorStream_.str();
\r
7240 // Determine how to set the device format.
\r
7241 stream_.userFormat = format;
\r
7242 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7244 if ( format == RTAUDIO_SINT8 )
\r
7245 deviceFormat = SND_PCM_FORMAT_S8;
\r
7246 else if ( format == RTAUDIO_SINT16 )
\r
7247 deviceFormat = SND_PCM_FORMAT_S16;
\r
7248 else if ( format == RTAUDIO_SINT24 )
\r
7249 deviceFormat = SND_PCM_FORMAT_S24;
\r
7250 else if ( format == RTAUDIO_SINT32 )
\r
7251 deviceFormat = SND_PCM_FORMAT_S32;
\r
7252 else if ( format == RTAUDIO_FLOAT32 )
\r
7253 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7254 else if ( format == RTAUDIO_FLOAT64 )
\r
7255 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7257 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7258 stream_.deviceFormat[mode] = format;
\r
7262 // The user requested format is not natively supported by the device.
\r
7263 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7264 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7265 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7269 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7270 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7271 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7275 deviceFormat = SND_PCM_FORMAT_S32;
\r
7276 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7277 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7281 deviceFormat = SND_PCM_FORMAT_S24;
\r
7282 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7283 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7287 deviceFormat = SND_PCM_FORMAT_S16;
\r
7288 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7289 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7293 deviceFormat = SND_PCM_FORMAT_S8;
\r
7294 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7295 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7299 // If we get here, no supported format was found.
\r
7300 snd_pcm_close( phandle );
\r
7301 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7302 errorText_ = errorStream_.str();
\r
7306 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7307 if ( result < 0 ) {
\r
7308 snd_pcm_close( phandle );
\r
7309 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7310 errorText_ = errorStream_.str();
\r
7314 // Determine whether byte-swaping is necessary.
\r
7315 stream_.doByteSwap[mode] = false;
\r
7316 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7317 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7318 if ( result == 0 )
\r
7319 stream_.doByteSwap[mode] = true;
\r
7320 else if (result < 0) {
\r
7321 snd_pcm_close( phandle );
\r
7322 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7323 errorText_ = errorStream_.str();
\r
7328 // Set the sample rate.
\r
7329 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7330 if ( result < 0 ) {
\r
7331 snd_pcm_close( phandle );
\r
7332 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7333 errorText_ = errorStream_.str();
\r
7337 // Determine the number of channels for this device. We support a possible
\r
7338 // minimum device channel number > than the value requested by the user.
\r
7339 stream_.nUserChannels[mode] = channels;
\r
7340 unsigned int value;
\r
7341 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7342 unsigned int deviceChannels = value;
\r
7343 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7344 snd_pcm_close( phandle );
\r
7345 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7346 errorText_ = errorStream_.str();
\r
7350 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7351 if ( result < 0 ) {
\r
7352 snd_pcm_close( phandle );
\r
7353 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7354 errorText_ = errorStream_.str();
\r
7357 deviceChannels = value;
\r
7358 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7359 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7361 // Set the device channels.
\r
7362 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7363 if ( result < 0 ) {
\r
7364 snd_pcm_close( phandle );
\r
7365 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7366 errorText_ = errorStream_.str();
\r
7370 // Set the buffer (or period) size.
\r
7372 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7373 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7374 if ( result < 0 ) {
\r
7375 snd_pcm_close( phandle );
\r
7376 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7377 errorText_ = errorStream_.str();
\r
7380 *bufferSize = periodSize;
\r
7382 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7383 unsigned int periods = 0;
\r
7384 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7385 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7386 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7387 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7388 if ( result < 0 ) {
\r
7389 snd_pcm_close( phandle );
\r
7390 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7391 errorText_ = errorStream_.str();
\r
7395 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7396 // MUST be the same in both directions!
\r
7397 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7398 snd_pcm_close( phandle );
\r
7399 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7400 errorText_ = errorStream_.str();
\r
7404 stream_.bufferSize = *bufferSize;
\r
7406 // Install the hardware configuration
\r
7407 result = snd_pcm_hw_params( phandle, hw_params );
\r
7408 if ( result < 0 ) {
\r
7409 snd_pcm_close( phandle );
\r
7410 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7411 errorText_ = errorStream_.str();
\r
7415 #if defined(__RTAUDIO_DEBUG__)
\r
7416 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7417 snd_pcm_hw_params_dump( hw_params, out );
\r
7420 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7421 snd_pcm_sw_params_t *sw_params = NULL;
\r
7422 snd_pcm_sw_params_alloca( &sw_params );
\r
7423 snd_pcm_sw_params_current( phandle, sw_params );
\r
7424 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7425 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7426 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7428 // The following two settings were suggested by Theo Veenker
\r
7429 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7430 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7432 // here are two options for a fix
\r
7433 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7434 snd_pcm_uframes_t val;
\r
7435 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7436 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7438 result = snd_pcm_sw_params( phandle, sw_params );
\r
7439 if ( result < 0 ) {
\r
7440 snd_pcm_close( phandle );
\r
7441 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7442 errorText_ = errorStream_.str();
\r
7446 #if defined(__RTAUDIO_DEBUG__)
\r
7447 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7448 snd_pcm_sw_params_dump( sw_params, out );
\r
7451 // Set flags for buffer conversion
\r
7452 stream_.doConvertBuffer[mode] = false;
\r
7453 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7454 stream_.doConvertBuffer[mode] = true;
\r
7455 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7456 stream_.doConvertBuffer[mode] = true;
\r
7457 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7458 stream_.nUserChannels[mode] > 1 )
\r
7459 stream_.doConvertBuffer[mode] = true;
\r
7461 // Allocate the ApiHandle if necessary and then save.
\r
7462 AlsaHandle *apiInfo = 0;
\r
7463 if ( stream_.apiHandle == 0 ) {
\r
7465 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7467 catch ( std::bad_alloc& ) {
\r
7468 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7472 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7473 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7477 stream_.apiHandle = (void *) apiInfo;
\r
7478 apiInfo->handles[0] = 0;
\r
7479 apiInfo->handles[1] = 0;
\r
7482 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7484 apiInfo->handles[mode] = phandle;
\r
7487 // Allocate necessary internal buffers.
\r
7488 unsigned long bufferBytes;
\r
7489 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7490 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7491 if ( stream_.userBuffer[mode] == NULL ) {
\r
7492 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7496 if ( stream_.doConvertBuffer[mode] ) {
\r
7498 bool makeBuffer = true;
\r
7499 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7500 if ( mode == INPUT ) {
\r
7501 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7502 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7503 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7507 if ( makeBuffer ) {
\r
7508 bufferBytes *= *bufferSize;
\r
7509 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7510 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7511 if ( stream_.deviceBuffer == NULL ) {
\r
7512 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7518 stream_.sampleRate = sampleRate;
\r
7519 stream_.nBuffers = periods;
\r
7520 stream_.device[mode] = device;
\r
7521 stream_.state = STREAM_STOPPED;
\r
7523 // Setup the buffer conversion information structure.
\r
7524 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7526 // Setup thread if necessary.
\r
7527 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7528 // We had already set up an output stream.
\r
7529 stream_.mode = DUPLEX;
\r
7530 // Link the streams if possible.
\r
7531 apiInfo->synchronized = false;
\r
7532 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7533 apiInfo->synchronized = true;
\r
7535 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7536 error( RtAudioError::WARNING );
\r
7540 stream_.mode = mode;
\r
7542 // Setup callback thread.
\r
7543 stream_.callbackInfo.object = (void *) this;
\r
7545 // Set the thread attributes for joinable and realtime scheduling
\r
7546 // priority (optional). The higher priority will only take affect
\r
7547 // if the program is run as root or suid. Note, under Linux
\r
7548 // processes with CAP_SYS_NICE privilege, a user can change
\r
7549 // scheduling policy and priority (thus need not be root). See
\r
7550 // POSIX "capabilities".
\r
7551 pthread_attr_t attr;
\r
7552 pthread_attr_init( &attr );
\r
7553 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7555 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7556 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7557 // We previously attempted to increase the audio callback priority
\r
7558 // to SCHED_RR here via the attributes. However, while no errors
\r
7559 // were reported in doing so, it did not work. So, now this is
\r
7560 // done in the alsaCallbackHandler function.
\r
7561 stream_.callbackInfo.doRealtime = true;
\r
7562 int priority = options->priority;
\r
7563 int min = sched_get_priority_min( SCHED_RR );
\r
7564 int max = sched_get_priority_max( SCHED_RR );
\r
7565 if ( priority < min ) priority = min;
\r
7566 else if ( priority > max ) priority = max;
\r
7567 stream_.callbackInfo.priority = priority;
\r
7571 stream_.callbackInfo.isRunning = true;
\r
7572 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7573 pthread_attr_destroy( &attr );
\r
7575 stream_.callbackInfo.isRunning = false;
\r
7576 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7585 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7586 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7587 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7589 stream_.apiHandle = 0;
\r
7592 if ( phandle) snd_pcm_close( phandle );
\r
7594 for ( int i=0; i<2; i++ ) {
\r
7595 if ( stream_.userBuffer[i] ) {
\r
7596 free( stream_.userBuffer[i] );
\r
7597 stream_.userBuffer[i] = 0;
\r
7601 if ( stream_.deviceBuffer ) {
\r
7602 free( stream_.deviceBuffer );
\r
7603 stream_.deviceBuffer = 0;
\r
7606 stream_.state = STREAM_CLOSED;
\r
7610 void RtApiAlsa :: closeStream()
\r
7612 if ( stream_.state == STREAM_CLOSED ) {
\r
7613 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7614 error( RtAudioError::WARNING );
\r
7618 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7619 stream_.callbackInfo.isRunning = false;
\r
7620 MUTEX_LOCK( &stream_.mutex );
\r
7621 if ( stream_.state == STREAM_STOPPED ) {
\r
7622 apiInfo->runnable = true;
\r
7623 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7625 MUTEX_UNLOCK( &stream_.mutex );
\r
7626 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7628 if ( stream_.state == STREAM_RUNNING ) {
\r
7629 stream_.state = STREAM_STOPPED;
\r
7630 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7631 snd_pcm_drop( apiInfo->handles[0] );
\r
7632 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7633 snd_pcm_drop( apiInfo->handles[1] );
\r
7637 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7638 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7639 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7641 stream_.apiHandle = 0;
\r
7644 for ( int i=0; i<2; i++ ) {
\r
7645 if ( stream_.userBuffer[i] ) {
\r
7646 free( stream_.userBuffer[i] );
\r
7647 stream_.userBuffer[i] = 0;
\r
7651 if ( stream_.deviceBuffer ) {
\r
7652 free( stream_.deviceBuffer );
\r
7653 stream_.deviceBuffer = 0;
\r
7656 stream_.mode = UNINITIALIZED;
\r
7657 stream_.state = STREAM_CLOSED;
\r
7660 void RtApiAlsa :: startStream()
\r
7662 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7665 if ( stream_.state == STREAM_RUNNING ) {
\r
7666 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7667 error( RtAudioError::WARNING );
\r
7671 MUTEX_LOCK( &stream_.mutex );
\r
7674 snd_pcm_state_t state;
\r
7675 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7676 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7677 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7678 state = snd_pcm_state( handle[0] );
\r
7679 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7680 result = snd_pcm_prepare( handle[0] );
\r
7681 if ( result < 0 ) {
\r
7682 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7683 errorText_ = errorStream_.str();
\r
7689 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7690 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7691 state = snd_pcm_state( handle[1] );
\r
7692 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7693 result = snd_pcm_prepare( handle[1] );
\r
7694 if ( result < 0 ) {
\r
7695 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7696 errorText_ = errorStream_.str();
\r
7702 stream_.state = STREAM_RUNNING;
\r
7705 apiInfo->runnable = true;
\r
7706 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7707 MUTEX_UNLOCK( &stream_.mutex );
\r
7709 if ( result >= 0 ) return;
\r
7710 error( RtAudioError::SYSTEM_ERROR );
\r
7713 void RtApiAlsa :: stopStream()
\r
7716 if ( stream_.state == STREAM_STOPPED ) {
\r
7717 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7718 error( RtAudioError::WARNING );
\r
7722 stream_.state = STREAM_STOPPED;
\r
7723 MUTEX_LOCK( &stream_.mutex );
\r
7726 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7727 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7728 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7729 if ( apiInfo->synchronized )
\r
7730 result = snd_pcm_drop( handle[0] );
\r
7732 result = snd_pcm_drain( handle[0] );
\r
7733 if ( result < 0 ) {
\r
7734 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7735 errorText_ = errorStream_.str();
\r
7740 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7741 result = snd_pcm_drop( handle[1] );
\r
7742 if ( result < 0 ) {
\r
7743 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7744 errorText_ = errorStream_.str();
\r
7750 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7751 MUTEX_UNLOCK( &stream_.mutex );
\r
7753 if ( result >= 0 ) return;
\r
7754 error( RtAudioError::SYSTEM_ERROR );
\r
7757 void RtApiAlsa :: abortStream()
\r
7760 if ( stream_.state == STREAM_STOPPED ) {
\r
7761 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7762 error( RtAudioError::WARNING );
\r
7766 stream_.state = STREAM_STOPPED;
\r
7767 MUTEX_LOCK( &stream_.mutex );
\r
7770 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7771 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7772 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7773 result = snd_pcm_drop( handle[0] );
\r
7774 if ( result < 0 ) {
\r
7775 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7776 errorText_ = errorStream_.str();
\r
7781 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7782 result = snd_pcm_drop( handle[1] );
\r
7783 if ( result < 0 ) {
\r
7784 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7785 errorText_ = errorStream_.str();
\r
7791 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7792 MUTEX_UNLOCK( &stream_.mutex );
\r
7794 if ( result >= 0 ) return;
\r
7795 error( RtAudioError::SYSTEM_ERROR );
\r
7798 void RtApiAlsa :: callbackEvent()
\r
7800 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7801 if ( stream_.state == STREAM_STOPPED ) {
\r
7802 MUTEX_LOCK( &stream_.mutex );
\r
7803 while ( !apiInfo->runnable )
\r
7804 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7806 if ( stream_.state != STREAM_RUNNING ) {
\r
7807 MUTEX_UNLOCK( &stream_.mutex );
\r
7810 MUTEX_UNLOCK( &stream_.mutex );
\r
7813 if ( stream_.state == STREAM_CLOSED ) {
\r
7814 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7815 error( RtAudioError::WARNING );
\r
7819 int doStopStream = 0;
\r
7820 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7821 double streamTime = getStreamTime();
\r
7822 RtAudioStreamStatus status = 0;
\r
7823 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7824 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7825 apiInfo->xrun[0] = false;
\r
7827 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7828 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7829 apiInfo->xrun[1] = false;
\r
7831 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7832 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7834 if ( doStopStream == 2 ) {
\r
7839 MUTEX_LOCK( &stream_.mutex );
\r
7841 // The state might change while waiting on a mutex.
\r
7842 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7847 snd_pcm_t **handle;
\r
7848 snd_pcm_sframes_t frames;
\r
7849 RtAudioFormat format;
\r
7850 handle = (snd_pcm_t **) apiInfo->handles;
\r
7852 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7854 // Setup parameters.
\r
7855 if ( stream_.doConvertBuffer[1] ) {
\r
7856 buffer = stream_.deviceBuffer;
\r
7857 channels = stream_.nDeviceChannels[1];
\r
7858 format = stream_.deviceFormat[1];
\r
7861 buffer = stream_.userBuffer[1];
\r
7862 channels = stream_.nUserChannels[1];
\r
7863 format = stream_.userFormat;
\r
7866 // Read samples from device in interleaved/non-interleaved format.
\r
7867 if ( stream_.deviceInterleaved[1] )
\r
7868 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7870 void *bufs[channels];
\r
7871 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7872 for ( int i=0; i<channels; i++ )
\r
7873 bufs[i] = (void *) (buffer + (i * offset));
\r
7874 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7877 if ( result < (int) stream_.bufferSize ) {
\r
7878 // Either an error or overrun occured.
\r
7879 if ( result == -EPIPE ) {
\r
7880 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7881 if ( state == SND_PCM_STATE_XRUN ) {
\r
7882 apiInfo->xrun[1] = true;
\r
7883 result = snd_pcm_prepare( handle[1] );
\r
7884 if ( result < 0 ) {
\r
7885 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7886 errorText_ = errorStream_.str();
\r
7890 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7891 errorText_ = errorStream_.str();
\r
7895 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7896 errorText_ = errorStream_.str();
\r
7898 error( RtAudioError::WARNING );
\r
7902 // Do byte swapping if necessary.
\r
7903 if ( stream_.doByteSwap[1] )
\r
7904 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7906 // Do buffer conversion if necessary.
\r
7907 if ( stream_.doConvertBuffer[1] )
\r
7908 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7910 // Check stream latency
\r
7911 result = snd_pcm_delay( handle[1], &frames );
\r
7912 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7917 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7919 // Setup parameters and do buffer conversion if necessary.
\r
7920 if ( stream_.doConvertBuffer[0] ) {
\r
7921 buffer = stream_.deviceBuffer;
\r
7922 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7923 channels = stream_.nDeviceChannels[0];
\r
7924 format = stream_.deviceFormat[0];
\r
7927 buffer = stream_.userBuffer[0];
\r
7928 channels = stream_.nUserChannels[0];
\r
7929 format = stream_.userFormat;
\r
7932 // Do byte swapping if necessary.
\r
7933 if ( stream_.doByteSwap[0] )
\r
7934 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7936 // Write samples to device in interleaved/non-interleaved format.
\r
7937 if ( stream_.deviceInterleaved[0] )
\r
7938 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7940 void *bufs[channels];
\r
7941 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7942 for ( int i=0; i<channels; i++ )
\r
7943 bufs[i] = (void *) (buffer + (i * offset));
\r
7944 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7947 if ( result < (int) stream_.bufferSize ) {
\r
7948 // Either an error or underrun occured.
\r
7949 if ( result == -EPIPE ) {
\r
7950 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7951 if ( state == SND_PCM_STATE_XRUN ) {
\r
7952 apiInfo->xrun[0] = true;
\r
7953 result = snd_pcm_prepare( handle[0] );
\r
7954 if ( result < 0 ) {
\r
7955 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7956 errorText_ = errorStream_.str();
\r
7960 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7961 errorText_ = errorStream_.str();
\r
7965 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7966 errorText_ = errorStream_.str();
\r
7968 error( RtAudioError::WARNING );
\r
7972 // Check stream latency
\r
7973 result = snd_pcm_delay( handle[0], &frames );
\r
7974 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7978 MUTEX_UNLOCK( &stream_.mutex );
\r
7980 RtApi::tickStreamTime();
\r
7981 if ( doStopStream == 1 ) this->stopStream();
\r
7984 static void *alsaCallbackHandler( void *ptr )
\r
7986 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7987 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7988 bool *isRunning = &info->isRunning;
\r
7990 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7991 if ( &info->doRealtime ) {
\r
7992 pthread_t tID = pthread_self(); // ID of this thread
\r
7993 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7994 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7998 while ( *isRunning == true ) {
\r
7999 pthread_testcancel();
\r
8000 object->callbackEvent();
\r
8003 pthread_exit( NULL );
\r
8006 //******************** End of __LINUX_ALSA__ *********************//
\r
8009 #if defined(__LINUX_PULSE__)
\r
8011 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8012 // and Tristan Matthews.
\r
8014 #include <pulse/error.h>
\r
8015 #include <pulse/simple.h>
\r
8018 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8019 44100, 48000, 96000, 0};
\r
8021 struct rtaudio_pa_format_mapping_t {
\r
8022 RtAudioFormat rtaudio_format;
\r
8023 pa_sample_format_t pa_format;
\r
8026 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8027 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8028 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8029 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8030 {0, PA_SAMPLE_INVALID}};
\r
8032 struct PulseAudioHandle {
\r
8033 pa_simple *s_play;
\r
8036 pthread_cond_t runnable_cv;
\r
8038 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8041 RtApiPulse::~RtApiPulse()
\r
8043 if ( stream_.state != STREAM_CLOSED )
\r
8047 unsigned int RtApiPulse::getDeviceCount( void )
\r
8052 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8054 RtAudio::DeviceInfo info;
\r
8055 info.probed = true;
\r
8056 info.name = "PulseAudio";
\r
8057 info.outputChannels = 2;
\r
8058 info.inputChannels = 2;
\r
8059 info.duplexChannels = 2;
\r
8060 info.isDefaultOutput = true;
\r
8061 info.isDefaultInput = true;
\r
8063 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8064 info.sampleRates.push_back( *sr );
\r
8066 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8071 static void *pulseaudio_callback( void * user )
\r
8073 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8074 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8075 volatile bool *isRunning = &cbi->isRunning;
\r
8077 while ( *isRunning ) {
\r
8078 pthread_testcancel();
\r
8079 context->callbackEvent();
\r
8082 pthread_exit( NULL );
\r
8085 void RtApiPulse::closeStream( void )
\r
8087 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8089 stream_.callbackInfo.isRunning = false;
\r
8091 MUTEX_LOCK( &stream_.mutex );
\r
8092 if ( stream_.state == STREAM_STOPPED ) {
\r
8093 pah->runnable = true;
\r
8094 pthread_cond_signal( &pah->runnable_cv );
\r
8096 MUTEX_UNLOCK( &stream_.mutex );
\r
8098 pthread_join( pah->thread, 0 );
\r
8099 if ( pah->s_play ) {
\r
8100 pa_simple_flush( pah->s_play, NULL );
\r
8101 pa_simple_free( pah->s_play );
\r
8104 pa_simple_free( pah->s_rec );
\r
8106 pthread_cond_destroy( &pah->runnable_cv );
\r
8108 stream_.apiHandle = 0;
\r
8111 if ( stream_.userBuffer[0] ) {
\r
8112 free( stream_.userBuffer[0] );
\r
8113 stream_.userBuffer[0] = 0;
\r
8115 if ( stream_.userBuffer[1] ) {
\r
8116 free( stream_.userBuffer[1] );
\r
8117 stream_.userBuffer[1] = 0;
\r
8120 stream_.state = STREAM_CLOSED;
\r
8121 stream_.mode = UNINITIALIZED;
\r
8124 void RtApiPulse::callbackEvent( void )
\r
8126 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8128 if ( stream_.state == STREAM_STOPPED ) {
\r
8129 MUTEX_LOCK( &stream_.mutex );
\r
8130 while ( !pah->runnable )
\r
8131 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8133 if ( stream_.state != STREAM_RUNNING ) {
\r
8134 MUTEX_UNLOCK( &stream_.mutex );
\r
8137 MUTEX_UNLOCK( &stream_.mutex );
\r
8140 if ( stream_.state == STREAM_CLOSED ) {
\r
8141 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8142 "this shouldn't happen!";
\r
8143 error( RtAudioError::WARNING );
\r
8147 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8148 double streamTime = getStreamTime();
\r
8149 RtAudioStreamStatus status = 0;
\r
8150 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8151 stream_.bufferSize, streamTime, status,
\r
8152 stream_.callbackInfo.userData );
\r
8154 if ( doStopStream == 2 ) {
\r
8159 MUTEX_LOCK( &stream_.mutex );
\r
8160 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8161 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8163 if ( stream_.state != STREAM_RUNNING )
\r
8168 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8169 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8170 convertBuffer( stream_.deviceBuffer,
\r
8171 stream_.userBuffer[OUTPUT],
\r
8172 stream_.convertInfo[OUTPUT] );
\r
8173 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8174 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8176 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8177 formatBytes( stream_.userFormat );
\r
8179 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8180 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8181 pa_strerror( pa_error ) << ".";
\r
8182 errorText_ = errorStream_.str();
\r
8183 error( RtAudioError::WARNING );
\r
8187 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8188 if ( stream_.doConvertBuffer[INPUT] )
\r
8189 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8190 formatBytes( stream_.deviceFormat[INPUT] );
\r
8192 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8193 formatBytes( stream_.userFormat );
\r
8195 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8196 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8197 pa_strerror( pa_error ) << ".";
\r
8198 errorText_ = errorStream_.str();
\r
8199 error( RtAudioError::WARNING );
\r
8201 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8202 convertBuffer( stream_.userBuffer[INPUT],
\r
8203 stream_.deviceBuffer,
\r
8204 stream_.convertInfo[INPUT] );
\r
8209 MUTEX_UNLOCK( &stream_.mutex );
\r
8210 RtApi::tickStreamTime();
\r
8212 if ( doStopStream == 1 )
\r
8216 void RtApiPulse::startStream( void )
\r
8218 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8220 if ( stream_.state == STREAM_CLOSED ) {
\r
8221 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8222 error( RtAudioError::INVALID_USE );
\r
8225 if ( stream_.state == STREAM_RUNNING ) {
\r
8226 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8227 error( RtAudioError::WARNING );
\r
8231 MUTEX_LOCK( &stream_.mutex );
\r
8233 stream_.state = STREAM_RUNNING;
\r
8235 pah->runnable = true;
\r
8236 pthread_cond_signal( &pah->runnable_cv );
\r
8237 MUTEX_UNLOCK( &stream_.mutex );
\r
8240 void RtApiPulse::stopStream( void )
\r
8242 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8244 if ( stream_.state == STREAM_CLOSED ) {
\r
8245 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8246 error( RtAudioError::INVALID_USE );
\r
8249 if ( stream_.state == STREAM_STOPPED ) {
\r
8250 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8251 error( RtAudioError::WARNING );
\r
8255 stream_.state = STREAM_STOPPED;
\r
8256 MUTEX_LOCK( &stream_.mutex );
\r
8258 if ( pah && pah->s_play ) {
\r
8260 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8261 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8262 pa_strerror( pa_error ) << ".";
\r
8263 errorText_ = errorStream_.str();
\r
8264 MUTEX_UNLOCK( &stream_.mutex );
\r
8265 error( RtAudioError::SYSTEM_ERROR );
\r
8270 stream_.state = STREAM_STOPPED;
\r
8271 MUTEX_UNLOCK( &stream_.mutex );
\r
8274 void RtApiPulse::abortStream( void )
\r
8276 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8278 if ( stream_.state == STREAM_CLOSED ) {
\r
8279 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8280 error( RtAudioError::INVALID_USE );
\r
8283 if ( stream_.state == STREAM_STOPPED ) {
\r
8284 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8285 error( RtAudioError::WARNING );
\r
8289 stream_.state = STREAM_STOPPED;
\r
8290 MUTEX_LOCK( &stream_.mutex );
\r
8292 if ( pah && pah->s_play ) {
\r
8294 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8295 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8296 pa_strerror( pa_error ) << ".";
\r
8297 errorText_ = errorStream_.str();
\r
8298 MUTEX_UNLOCK( &stream_.mutex );
\r
8299 error( RtAudioError::SYSTEM_ERROR );
\r
8304 stream_.state = STREAM_STOPPED;
\r
8305 MUTEX_UNLOCK( &stream_.mutex );
\r
8308 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8309 unsigned int channels, unsigned int firstChannel,
\r
8310 unsigned int sampleRate, RtAudioFormat format,
\r
8311 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8313 PulseAudioHandle *pah = 0;
\r
8314 unsigned long bufferBytes = 0;
\r
8315 pa_sample_spec ss;
\r
8317 if ( device != 0 ) return false;
\r
8318 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8319 if ( channels != 1 && channels != 2 ) {
\r
8320 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8323 ss.channels = channels;
\r
8325 if ( firstChannel != 0 ) return false;
\r
8327 bool sr_found = false;
\r
8328 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8329 if ( sampleRate == *sr ) {
\r
8331 stream_.sampleRate = sampleRate;
\r
8332 ss.rate = sampleRate;
\r
8336 if ( !sr_found ) {
\r
8337 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8341 bool sf_found = 0;
\r
8342 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8343 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8344 if ( format == sf->rtaudio_format ) {
\r
8346 stream_.userFormat = sf->rtaudio_format;
\r
8347 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8348 ss.format = sf->pa_format;
\r
8352 if ( !sf_found ) { // Use internal data format conversion.
\r
8353 stream_.userFormat = format;
\r
8354 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8355 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8358 // Set other stream parameters.
\r
8359 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8360 else stream_.userInterleaved = true;
\r
8361 stream_.deviceInterleaved[mode] = true;
\r
8362 stream_.nBuffers = 1;
\r
8363 stream_.doByteSwap[mode] = false;
\r
8364 stream_.nUserChannels[mode] = channels;
\r
8365 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8366 stream_.channelOffset[mode] = 0;
\r
8367 std::string streamName = "RtAudio";
\r
8369 // Set flags for buffer conversion.
\r
8370 stream_.doConvertBuffer[mode] = false;
\r
8371 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8372 stream_.doConvertBuffer[mode] = true;
\r
8373 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8374 stream_.doConvertBuffer[mode] = true;
\r
8376 // Allocate necessary internal buffers.
\r
8377 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8378 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8379 if ( stream_.userBuffer[mode] == NULL ) {
\r
8380 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8383 stream_.bufferSize = *bufferSize;
\r
8385 if ( stream_.doConvertBuffer[mode] ) {
\r
8387 bool makeBuffer = true;
\r
8388 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8389 if ( mode == INPUT ) {
\r
8390 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8391 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8392 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8396 if ( makeBuffer ) {
\r
8397 bufferBytes *= *bufferSize;
\r
8398 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8399 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8400 if ( stream_.deviceBuffer == NULL ) {
\r
8401 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8407 stream_.device[mode] = device;
\r
8409 // Setup the buffer conversion information structure.
\r
8410 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8412 if ( !stream_.apiHandle ) {
\r
8413 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8415 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8419 stream_.apiHandle = pah;
\r
8420 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8421 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8425 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8428 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8431 pa_buffer_attr buffer_attr;
\r
8432 buffer_attr.fragsize = bufferBytes;
\r
8433 buffer_attr.maxlength = -1;
\r
8435 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8436 if ( !pah->s_rec ) {
\r
8437 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8442 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8443 if ( !pah->s_play ) {
\r
8444 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8452 if ( stream_.mode == UNINITIALIZED )
\r
8453 stream_.mode = mode;
\r
8454 else if ( stream_.mode == mode )
\r
8457 stream_.mode = DUPLEX;
\r
8459 if ( !stream_.callbackInfo.isRunning ) {
\r
8460 stream_.callbackInfo.object = this;
\r
8461 stream_.callbackInfo.isRunning = true;
\r
8462 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8463 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8468 stream_.state = STREAM_STOPPED;
\r
8472 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8473 pthread_cond_destroy( &pah->runnable_cv );
\r
8475 stream_.apiHandle = 0;
\r
8478 for ( int i=0; i<2; i++ ) {
\r
8479 if ( stream_.userBuffer[i] ) {
\r
8480 free( stream_.userBuffer[i] );
\r
8481 stream_.userBuffer[i] = 0;
\r
8485 if ( stream_.deviceBuffer ) {
\r
8486 free( stream_.deviceBuffer );
\r
8487 stream_.deviceBuffer = 0;
\r
8493 //******************** End of __LINUX_PULSE__ *********************//
\r
8496 #if defined(__LINUX_OSS__)
\r
8498 #include <unistd.h>
\r
8499 #include <sys/ioctl.h>
\r
8500 #include <unistd.h>
\r
8501 #include <fcntl.h>
\r
8502 #include <sys/soundcard.h>
\r
8503 #include <errno.h>
\r
8506 static void *ossCallbackHandler(void * ptr);
\r
8508 // A structure to hold various information related to the OSS API
\r
8509 // implementation.
\r
8510 struct OssHandle {
\r
8511 int id[2]; // device ids
\r
8514 pthread_cond_t runnable;
\r
8517 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8520 RtApiOss :: RtApiOss()
\r
8522 // Nothing to do here.
\r
8525 RtApiOss :: ~RtApiOss()
\r
8527 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8530 unsigned int RtApiOss :: getDeviceCount( void )
\r
8532 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8533 if ( mixerfd == -1 ) {
\r
8534 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8535 error( RtAudioError::WARNING );
\r
8539 oss_sysinfo sysinfo;
\r
8540 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8542 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8543 error( RtAudioError::WARNING );
\r
8548 return sysinfo.numaudios;
\r
8551 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8553 RtAudio::DeviceInfo info;
\r
8554 info.probed = false;
\r
8556 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8557 if ( mixerfd == -1 ) {
\r
8558 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8559 error( RtAudioError::WARNING );
\r
8563 oss_sysinfo sysinfo;
\r
8564 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8565 if ( result == -1 ) {
\r
8567 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8568 error( RtAudioError::WARNING );
\r
8572 unsigned nDevices = sysinfo.numaudios;
\r
8573 if ( nDevices == 0 ) {
\r
8575 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8576 error( RtAudioError::INVALID_USE );
\r
8580 if ( device >= nDevices ) {
\r
8582 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8583 error( RtAudioError::INVALID_USE );
\r
8587 oss_audioinfo ainfo;
\r
8588 ainfo.dev = device;
\r
8589 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8591 if ( result == -1 ) {
\r
8592 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8593 errorText_ = errorStream_.str();
\r
8594 error( RtAudioError::WARNING );
\r
8599 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8600 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8601 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8602 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8603 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8606 // Probe data formats ... do for input
\r
8607 unsigned long mask = ainfo.iformats;
\r
8608 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8609 info.nativeFormats |= RTAUDIO_SINT16;
\r
8610 if ( mask & AFMT_S8 )
\r
8611 info.nativeFormats |= RTAUDIO_SINT8;
\r
8612 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8613 info.nativeFormats |= RTAUDIO_SINT32;
\r
8614 if ( mask & AFMT_FLOAT )
\r
8615 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8616 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8617 info.nativeFormats |= RTAUDIO_SINT24;
\r
8619 // Check that we have at least one supported format
\r
8620 if ( info.nativeFormats == 0 ) {
\r
8621 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8622 errorText_ = errorStream_.str();
\r
8623 error( RtAudioError::WARNING );
\r
8627 // Probe the supported sample rates.
\r
8628 info.sampleRates.clear();
\r
8629 if ( ainfo.nrates ) {
\r
8630 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8631 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8632 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8633 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8640 // Check min and max rate values;
\r
8641 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8642 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8643 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8647 if ( info.sampleRates.size() == 0 ) {
\r
8648 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8649 errorText_ = errorStream_.str();
\r
8650 error( RtAudioError::WARNING );
\r
8653 info.probed = true;
\r
8654 info.name = ainfo.name;
\r
8661 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8662 unsigned int firstChannel, unsigned int sampleRate,
\r
8663 RtAudioFormat format, unsigned int *bufferSize,
\r
8664 RtAudio::StreamOptions *options )
\r
8666 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8667 if ( mixerfd == -1 ) {
\r
8668 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8672 oss_sysinfo sysinfo;
\r
8673 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8674 if ( result == -1 ) {
\r
8676 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8680 unsigned nDevices = sysinfo.numaudios;
\r
8681 if ( nDevices == 0 ) {
\r
8682 // This should not happen because a check is made before this function is called.
\r
8684 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8688 if ( device >= nDevices ) {
\r
8689 // This should not happen because a check is made before this function is called.
\r
8691 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8695 oss_audioinfo ainfo;
\r
8696 ainfo.dev = device;
\r
8697 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8699 if ( result == -1 ) {
\r
8700 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8701 errorText_ = errorStream_.str();
\r
8705 // Check if device supports input or output
\r
8706 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8707 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8708 if ( mode == OUTPUT )
\r
8709 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8711 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8712 errorText_ = errorStream_.str();
\r
8717 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8718 if ( mode == OUTPUT )
\r
8719 flags |= O_WRONLY;
\r
8720 else { // mode == INPUT
\r
8721 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8722 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8723 close( handle->id[0] );
\r
8724 handle->id[0] = 0;
\r
8725 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8726 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8727 errorText_ = errorStream_.str();
\r
8730 // Check that the number previously set channels is the same.
\r
8731 if ( stream_.nUserChannels[0] != channels ) {
\r
8732 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8733 errorText_ = errorStream_.str();
\r
8739 flags |= O_RDONLY;
\r
8742 // Set exclusive access if specified.
\r
8743 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8745 // Try to open the device.
\r
8747 fd = open( ainfo.devnode, flags, 0 );
\r
8749 if ( errno == EBUSY )
\r
8750 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8752 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8753 errorText_ = errorStream_.str();
\r
8757 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8759 if ( flags | O_RDWR ) {
\r
8760 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8761 if ( result == -1) {
\r
8762 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8763 errorText_ = errorStream_.str();
\r
8769 // Check the device channel support.
\r
8770 stream_.nUserChannels[mode] = channels;
\r
8771 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8773 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8774 errorText_ = errorStream_.str();
\r
8778 // Set the number of channels.
\r
8779 int deviceChannels = channels + firstChannel;
\r
8780 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8781 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8783 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8784 errorText_ = errorStream_.str();
\r
8787 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8789 // Get the data format mask
\r
8791 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8792 if ( result == -1 ) {
\r
8794 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8795 errorText_ = errorStream_.str();
\r
8799 // Determine how to set the device format.
\r
8800 stream_.userFormat = format;
\r
8801 int deviceFormat = -1;
\r
8802 stream_.doByteSwap[mode] = false;
\r
8803 if ( format == RTAUDIO_SINT8 ) {
\r
8804 if ( mask & AFMT_S8 ) {
\r
8805 deviceFormat = AFMT_S8;
\r
8806 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8809 else if ( format == RTAUDIO_SINT16 ) {
\r
8810 if ( mask & AFMT_S16_NE ) {
\r
8811 deviceFormat = AFMT_S16_NE;
\r
8812 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8814 else if ( mask & AFMT_S16_OE ) {
\r
8815 deviceFormat = AFMT_S16_OE;
\r
8816 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8817 stream_.doByteSwap[mode] = true;
\r
8820 else if ( format == RTAUDIO_SINT24 ) {
\r
8821 if ( mask & AFMT_S24_NE ) {
\r
8822 deviceFormat = AFMT_S24_NE;
\r
8823 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8825 else if ( mask & AFMT_S24_OE ) {
\r
8826 deviceFormat = AFMT_S24_OE;
\r
8827 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8828 stream_.doByteSwap[mode] = true;
\r
8831 else if ( format == RTAUDIO_SINT32 ) {
\r
8832 if ( mask & AFMT_S32_NE ) {
\r
8833 deviceFormat = AFMT_S32_NE;
\r
8834 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8836 else if ( mask & AFMT_S32_OE ) {
\r
8837 deviceFormat = AFMT_S32_OE;
\r
8838 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8839 stream_.doByteSwap[mode] = true;
\r
8843 if ( deviceFormat == -1 ) {
\r
8844 // The user requested format is not natively supported by the device.
\r
8845 if ( mask & AFMT_S16_NE ) {
\r
8846 deviceFormat = AFMT_S16_NE;
\r
8847 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8849 else if ( mask & AFMT_S32_NE ) {
\r
8850 deviceFormat = AFMT_S32_NE;
\r
8851 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8853 else if ( mask & AFMT_S24_NE ) {
\r
8854 deviceFormat = AFMT_S24_NE;
\r
8855 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8857 else if ( mask & AFMT_S16_OE ) {
\r
8858 deviceFormat = AFMT_S16_OE;
\r
8859 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8860 stream_.doByteSwap[mode] = true;
\r
8862 else if ( mask & AFMT_S32_OE ) {
\r
8863 deviceFormat = AFMT_S32_OE;
\r
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8865 stream_.doByteSwap[mode] = true;
\r
8867 else if ( mask & AFMT_S24_OE ) {
\r
8868 deviceFormat = AFMT_S24_OE;
\r
8869 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8870 stream_.doByteSwap[mode] = true;
\r
8872 else if ( mask & AFMT_S8) {
\r
8873 deviceFormat = AFMT_S8;
\r
8874 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8878 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8879 // This really shouldn't happen ...
\r
8881 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8882 errorText_ = errorStream_.str();
\r
8886 // Set the data format.
\r
8887 int temp = deviceFormat;
\r
8888 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8889 if ( result == -1 || deviceFormat != temp ) {
\r
8891 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8892 errorText_ = errorStream_.str();
\r
8896 // Attempt to set the buffer size. According to OSS, the minimum
\r
8897 // number of buffers is two. The supposed minimum buffer size is 16
\r
8898 // bytes, so that will be our lower bound. The argument to this
\r
8899 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8900 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8901 // We'll check the actual value used near the end of the setup
\r
8903 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8904 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8906 if ( options ) buffers = options->numberOfBuffers;
\r
8907 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8908 if ( buffers < 2 ) buffers = 3;
\r
8909 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8910 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8911 if ( result == -1 ) {
\r
8913 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8914 errorText_ = errorStream_.str();
\r
8917 stream_.nBuffers = buffers;
\r
8919 // Save buffer size (in sample frames).
\r
8920 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8921 stream_.bufferSize = *bufferSize;
\r
8923 // Set the sample rate.
\r
8924 int srate = sampleRate;
\r
8925 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8926 if ( result == -1 ) {
\r
8928 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8929 errorText_ = errorStream_.str();
\r
8933 // Verify the sample rate setup worked.
\r
8934 if ( abs( srate - sampleRate ) > 100 ) {
\r
8936 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8937 errorText_ = errorStream_.str();
\r
8940 stream_.sampleRate = sampleRate;
\r
8942 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8943 // We're doing duplex setup here.
\r
8944 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8945 stream_.nDeviceChannels[0] = deviceChannels;
\r
8948 // Set interleaving parameters.
\r
8949 stream_.userInterleaved = true;
\r
8950 stream_.deviceInterleaved[mode] = true;
\r
8951 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8952 stream_.userInterleaved = false;
\r
8954 // Set flags for buffer conversion
\r
8955 stream_.doConvertBuffer[mode] = false;
\r
8956 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8957 stream_.doConvertBuffer[mode] = true;
\r
8958 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8959 stream_.doConvertBuffer[mode] = true;
\r
8960 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8961 stream_.nUserChannels[mode] > 1 )
\r
8962 stream_.doConvertBuffer[mode] = true;
\r
8964 // Allocate the stream handles if necessary and then save.
\r
8965 if ( stream_.apiHandle == 0 ) {
\r
8967 handle = new OssHandle;
\r
8969 catch ( std::bad_alloc& ) {
\r
8970 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8974 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8975 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8979 stream_.apiHandle = (void *) handle;
\r
8982 handle = (OssHandle *) stream_.apiHandle;
\r
8984 handle->id[mode] = fd;
\r
8986 // Allocate necessary internal buffers.
\r
8987 unsigned long bufferBytes;
\r
8988 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8989 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8990 if ( stream_.userBuffer[mode] == NULL ) {
\r
8991 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8995 if ( stream_.doConvertBuffer[mode] ) {
\r
8997 bool makeBuffer = true;
\r
8998 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8999 if ( mode == INPUT ) {
\r
9000 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9001 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9002 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9006 if ( makeBuffer ) {
\r
9007 bufferBytes *= *bufferSize;
\r
9008 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9009 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9010 if ( stream_.deviceBuffer == NULL ) {
\r
9011 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9017 stream_.device[mode] = device;
\r
9018 stream_.state = STREAM_STOPPED;
\r
9020 // Setup the buffer conversion information structure.
\r
9021 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9023 // Setup thread if necessary.
\r
9024 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9025 // We had already set up an output stream.
\r
9026 stream_.mode = DUPLEX;
\r
9027 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9030 stream_.mode = mode;
\r
9032 // Setup callback thread.
\r
9033 stream_.callbackInfo.object = (void *) this;
\r
9035 // Set the thread attributes for joinable and realtime scheduling
\r
9036 // priority. The higher priority will only take affect if the
\r
9037 // program is run as root or suid.
\r
9038 pthread_attr_t attr;
\r
9039 pthread_attr_init( &attr );
\r
9040 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9041 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9042 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9043 struct sched_param param;
\r
9044 int priority = options->priority;
\r
9045 int min = sched_get_priority_min( SCHED_RR );
\r
9046 int max = sched_get_priority_max( SCHED_RR );
\r
9047 if ( priority < min ) priority = min;
\r
9048 else if ( priority > max ) priority = max;
\r
9049 param.sched_priority = priority;
\r
9050 pthread_attr_setschedparam( &attr, ¶m );
\r
9051 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9054 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9056 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9059 stream_.callbackInfo.isRunning = true;
\r
9060 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9061 pthread_attr_destroy( &attr );
\r
9063 stream_.callbackInfo.isRunning = false;
\r
9064 errorText_ = "RtApiOss::error creating callback thread!";
\r
9073 pthread_cond_destroy( &handle->runnable );
\r
9074 if ( handle->id[0] ) close( handle->id[0] );
\r
9075 if ( handle->id[1] ) close( handle->id[1] );
\r
9077 stream_.apiHandle = 0;
\r
9080 for ( int i=0; i<2; i++ ) {
\r
9081 if ( stream_.userBuffer[i] ) {
\r
9082 free( stream_.userBuffer[i] );
\r
9083 stream_.userBuffer[i] = 0;
\r
9087 if ( stream_.deviceBuffer ) {
\r
9088 free( stream_.deviceBuffer );
\r
9089 stream_.deviceBuffer = 0;
\r
9095 void RtApiOss :: closeStream()
\r
9097 if ( stream_.state == STREAM_CLOSED ) {
\r
9098 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9099 error( RtAudioError::WARNING );
\r
9103 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9104 stream_.callbackInfo.isRunning = false;
\r
9105 MUTEX_LOCK( &stream_.mutex );
\r
9106 if ( stream_.state == STREAM_STOPPED )
\r
9107 pthread_cond_signal( &handle->runnable );
\r
9108 MUTEX_UNLOCK( &stream_.mutex );
\r
9109 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9111 if ( stream_.state == STREAM_RUNNING ) {
\r
9112 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9113 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9115 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9116 stream_.state = STREAM_STOPPED;
\r
9120 pthread_cond_destroy( &handle->runnable );
\r
9121 if ( handle->id[0] ) close( handle->id[0] );
\r
9122 if ( handle->id[1] ) close( handle->id[1] );
\r
9124 stream_.apiHandle = 0;
\r
9127 for ( int i=0; i<2; i++ ) {
\r
9128 if ( stream_.userBuffer[i] ) {
\r
9129 free( stream_.userBuffer[i] );
\r
9130 stream_.userBuffer[i] = 0;
\r
9134 if ( stream_.deviceBuffer ) {
\r
9135 free( stream_.deviceBuffer );
\r
9136 stream_.deviceBuffer = 0;
\r
9139 stream_.mode = UNINITIALIZED;
\r
9140 stream_.state = STREAM_CLOSED;
\r
9143 void RtApiOss :: startStream()
\r
9146 if ( stream_.state == STREAM_RUNNING ) {
\r
9147 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9148 error( RtAudioError::WARNING );
\r
9152 MUTEX_LOCK( &stream_.mutex );
\r
9154 stream_.state = STREAM_RUNNING;
\r
9156 // No need to do anything else here ... OSS automatically starts
\r
9157 // when fed samples.
\r
9159 MUTEX_UNLOCK( &stream_.mutex );
\r
9161 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9162 pthread_cond_signal( &handle->runnable );
\r
9165 void RtApiOss :: stopStream()
\r
9168 if ( stream_.state == STREAM_STOPPED ) {
\r
9169 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9170 error( RtAudioError::WARNING );
\r
9174 MUTEX_LOCK( &stream_.mutex );
\r
9176 // The state might change while waiting on a mutex.
\r
9177 if ( stream_.state == STREAM_STOPPED ) {
\r
9178 MUTEX_UNLOCK( &stream_.mutex );
\r
9183 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9184 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9186 // Flush the output with zeros a few times.
\r
9189 RtAudioFormat format;
\r
9191 if ( stream_.doConvertBuffer[0] ) {
\r
9192 buffer = stream_.deviceBuffer;
\r
9193 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9194 format = stream_.deviceFormat[0];
\r
9197 buffer = stream_.userBuffer[0];
\r
9198 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9199 format = stream_.userFormat;
\r
9202 memset( buffer, 0, samples * formatBytes(format) );
\r
9203 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9204 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9205 if ( result == -1 ) {
\r
9206 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9207 error( RtAudioError::WARNING );
\r
9211 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9212 if ( result == -1 ) {
\r
9213 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9214 errorText_ = errorStream_.str();
\r
9217 handle->triggered = false;
\r
9220 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9221 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9222 if ( result == -1 ) {
\r
9223 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9224 errorText_ = errorStream_.str();
\r
9230 stream_.state = STREAM_STOPPED;
\r
9231 MUTEX_UNLOCK( &stream_.mutex );
\r
9233 if ( result != -1 ) return;
\r
9234 error( RtAudioError::SYSTEM_ERROR );
\r
9237 void RtApiOss :: abortStream()
\r
9240 if ( stream_.state == STREAM_STOPPED ) {
\r
9241 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9242 error( RtAudioError::WARNING );
\r
9246 MUTEX_LOCK( &stream_.mutex );
\r
9248 // The state might change while waiting on a mutex.
\r
9249 if ( stream_.state == STREAM_STOPPED ) {
\r
9250 MUTEX_UNLOCK( &stream_.mutex );
\r
9255 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9256 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9257 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9258 if ( result == -1 ) {
\r
9259 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9260 errorText_ = errorStream_.str();
\r
9263 handle->triggered = false;
\r
9266 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9267 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9268 if ( result == -1 ) {
\r
9269 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9270 errorText_ = errorStream_.str();
\r
9276 stream_.state = STREAM_STOPPED;
\r
9277 MUTEX_UNLOCK( &stream_.mutex );
\r
9279 if ( result != -1 ) return;
\r
9280 error( RtAudioError::SYSTEM_ERROR );
\r
9283 void RtApiOss :: callbackEvent()
\r
9285 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9286 if ( stream_.state == STREAM_STOPPED ) {
\r
9287 MUTEX_LOCK( &stream_.mutex );
\r
9288 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9289 if ( stream_.state != STREAM_RUNNING ) {
\r
9290 MUTEX_UNLOCK( &stream_.mutex );
\r
9293 MUTEX_UNLOCK( &stream_.mutex );
\r
9296 if ( stream_.state == STREAM_CLOSED ) {
\r
9297 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9298 error( RtAudioError::WARNING );
\r
9302 // Invoke user callback to get fresh output data.
\r
9303 int doStopStream = 0;
\r
9304 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9305 double streamTime = getStreamTime();
\r
9306 RtAudioStreamStatus status = 0;
\r
9307 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9308 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9309 handle->xrun[0] = false;
\r
9311 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9312 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9313 handle->xrun[1] = false;
\r
9315 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9316 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9317 if ( doStopStream == 2 ) {
\r
9318 this->abortStream();
\r
9322 MUTEX_LOCK( &stream_.mutex );
\r
9324 // The state might change while waiting on a mutex.
\r
9325 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9330 RtAudioFormat format;
\r
9332 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9334 // Setup parameters and do buffer conversion if necessary.
\r
9335 if ( stream_.doConvertBuffer[0] ) {
\r
9336 buffer = stream_.deviceBuffer;
\r
9337 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9338 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9339 format = stream_.deviceFormat[0];
\r
9342 buffer = stream_.userBuffer[0];
\r
9343 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9344 format = stream_.userFormat;
\r
9347 // Do byte swapping if necessary.
\r
9348 if ( stream_.doByteSwap[0] )
\r
9349 byteSwapBuffer( buffer, samples, format );
\r
9351 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9353 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9354 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9355 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9356 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9357 handle->triggered = true;
\r
9360 // Write samples to device.
\r
9361 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9363 if ( result == -1 ) {
\r
9364 // We'll assume this is an underrun, though there isn't a
\r
9365 // specific means for determining that.
\r
9366 handle->xrun[0] = true;
\r
9367 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9368 error( RtAudioError::WARNING );
\r
9369 // Continue on to input section.
\r
9373 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9375 // Setup parameters.
\r
9376 if ( stream_.doConvertBuffer[1] ) {
\r
9377 buffer = stream_.deviceBuffer;
\r
9378 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9379 format = stream_.deviceFormat[1];
\r
9382 buffer = stream_.userBuffer[1];
\r
9383 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9384 format = stream_.userFormat;
\r
9387 // Read samples from device.
\r
9388 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9390 if ( result == -1 ) {
\r
9391 // We'll assume this is an overrun, though there isn't a
\r
9392 // specific means for determining that.
\r
9393 handle->xrun[1] = true;
\r
9394 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9395 error( RtAudioError::WARNING );
\r
9399 // Do byte swapping if necessary.
\r
9400 if ( stream_.doByteSwap[1] )
\r
9401 byteSwapBuffer( buffer, samples, format );
\r
9403 // Do buffer conversion if necessary.
\r
9404 if ( stream_.doConvertBuffer[1] )
\r
9405 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9409 MUTEX_UNLOCK( &stream_.mutex );
\r
9411 RtApi::tickStreamTime();
\r
9412 if ( doStopStream == 1 ) this->stopStream();
\r
9415 static void *ossCallbackHandler( void *ptr )
\r
9417 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9418 RtApiOss *object = (RtApiOss *) info->object;
\r
9419 bool *isRunning = &info->isRunning;
\r
9421 while ( *isRunning == true ) {
\r
9422 pthread_testcancel();
\r
9423 object->callbackEvent();
\r
9426 pthread_exit( NULL );
\r
9429 //******************** End of __LINUX_OSS__ *********************//
\r
9433 // *************************************************** //
\r
9435 // Protected common (OS-independent) RtAudio methods.
\r
9437 // *************************************************** //
\r
9439 // This method can be modified to control the behavior of error
\r
9440 // message printing.
\r
9441 void RtApi :: error( RtAudioError::Type type )
\r
9443 errorStream_.str(""); // clear the ostringstream
\r
9445 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9446 if ( errorCallback ) {
\r
9447 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9449 if ( firstErrorOccurred_ )
\r
9452 firstErrorOccurred_ = true;
\r
9453 const std::string errorMessage = errorText_;
\r
9455 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9456 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9460 errorCallback( type, errorMessage );
\r
9461 firstErrorOccurred_ = false;
\r
9465 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9466 std::cerr << '\n' << errorText_ << "\n\n";
\r
9467 else if ( type != RtAudioError::WARNING )
\r
9468 throw( RtAudioError( errorText_, type ) );
\r
9471 void RtApi :: verifyStream()
\r
9473 if ( stream_.state == STREAM_CLOSED ) {
\r
9474 errorText_ = "RtApi:: a stream is not open!";
\r
9475 error( RtAudioError::INVALID_USE );
\r
9479 void RtApi :: clearStreamInfo()
\r
9481 stream_.mode = UNINITIALIZED;
\r
9482 stream_.state = STREAM_CLOSED;
\r
9483 stream_.sampleRate = 0;
\r
9484 stream_.bufferSize = 0;
\r
9485 stream_.nBuffers = 0;
\r
9486 stream_.userFormat = 0;
\r
9487 stream_.userInterleaved = true;
\r
9488 stream_.streamTime = 0.0;
\r
9489 stream_.apiHandle = 0;
\r
9490 stream_.deviceBuffer = 0;
\r
9491 stream_.callbackInfo.callback = 0;
\r
9492 stream_.callbackInfo.userData = 0;
\r
9493 stream_.callbackInfo.isRunning = false;
\r
9494 stream_.callbackInfo.errorCallback = 0;
\r
9495 for ( int i=0; i<2; i++ ) {
\r
9496 stream_.device[i] = 11111;
\r
9497 stream_.doConvertBuffer[i] = false;
\r
9498 stream_.deviceInterleaved[i] = true;
\r
9499 stream_.doByteSwap[i] = false;
\r
9500 stream_.nUserChannels[i] = 0;
\r
9501 stream_.nDeviceChannels[i] = 0;
\r
9502 stream_.channelOffset[i] = 0;
\r
9503 stream_.deviceFormat[i] = 0;
\r
9504 stream_.latency[i] = 0;
\r
9505 stream_.userBuffer[i] = 0;
\r
9506 stream_.convertInfo[i].channels = 0;
\r
9507 stream_.convertInfo[i].inJump = 0;
\r
9508 stream_.convertInfo[i].outJump = 0;
\r
9509 stream_.convertInfo[i].inFormat = 0;
\r
9510 stream_.convertInfo[i].outFormat = 0;
\r
9511 stream_.convertInfo[i].inOffset.clear();
\r
9512 stream_.convertInfo[i].outOffset.clear();
\r
9516 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9518 if ( format == RTAUDIO_SINT16 )
\r
9520 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9522 else if ( format == RTAUDIO_FLOAT64 )
\r
9524 else if ( format == RTAUDIO_SINT24 )
\r
9526 else if ( format == RTAUDIO_SINT8 )
\r
9529 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9530 error( RtAudioError::WARNING );
\r
9535 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9537 if ( mode == INPUT ) { // convert device to user buffer
\r
9538 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9539 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9540 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9541 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9543 else { // convert user to device buffer
\r
9544 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9545 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9546 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9547 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9550 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9551 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9553 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9555 // Set up the interleave/deinterleave offsets.
\r
9556 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9557 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9558 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9559 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9560 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9561 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9562 stream_.convertInfo[mode].inJump = 1;
\r
9566 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9567 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9568 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9569 stream_.convertInfo[mode].outJump = 1;
\r
9573 else { // no (de)interleaving
\r
9574 if ( stream_.userInterleaved ) {
\r
9575 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9576 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9577 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9581 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9582 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9583 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9584 stream_.convertInfo[mode].inJump = 1;
\r
9585 stream_.convertInfo[mode].outJump = 1;
\r
9590 // Add channel offset.
\r
9591 if ( firstChannel > 0 ) {
\r
9592 if ( stream_.deviceInterleaved[mode] ) {
\r
9593 if ( mode == OUTPUT ) {
\r
9594 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9595 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9598 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9599 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9603 if ( mode == OUTPUT ) {
\r
9604 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9605 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9608 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9609 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9615 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9617 // This function does format conversion, input/output channel compensation, and
\r
9618 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9619 // the lower three bytes of a 32-bit integer.
\r
9621 // Clear our device buffer when in/out duplex device channels are different
\r
9622 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9623 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9624 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9627 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9629 Float64 *out = (Float64 *)outBuffer;
\r
9631 if (info.inFormat == RTAUDIO_SINT8) {
\r
9632 signed char *in = (signed char *)inBuffer;
\r
9633 scale = 1.0 / 127.5;
\r
9634 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9635 for (j=0; j<info.channels; j++) {
\r
9636 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9637 out[info.outOffset[j]] += 0.5;
\r
9638 out[info.outOffset[j]] *= scale;
\r
9640 in += info.inJump;
\r
9641 out += info.outJump;
\r
9644 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9645 Int16 *in = (Int16 *)inBuffer;
\r
9646 scale = 1.0 / 32767.5;
\r
9647 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9648 for (j=0; j<info.channels; j++) {
\r
9649 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9650 out[info.outOffset[j]] += 0.5;
\r
9651 out[info.outOffset[j]] *= scale;
\r
9653 in += info.inJump;
\r
9654 out += info.outJump;
\r
9657 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9658 Int24 *in = (Int24 *)inBuffer;
\r
9659 scale = 1.0 / 8388607.5;
\r
9660 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9661 for (j=0; j<info.channels; j++) {
\r
9662 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9663 out[info.outOffset[j]] += 0.5;
\r
9664 out[info.outOffset[j]] *= scale;
\r
9666 in += info.inJump;
\r
9667 out += info.outJump;
\r
9670 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9671 Int32 *in = (Int32 *)inBuffer;
\r
9672 scale = 1.0 / 2147483647.5;
\r
9673 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9674 for (j=0; j<info.channels; j++) {
\r
9675 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9676 out[info.outOffset[j]] += 0.5;
\r
9677 out[info.outOffset[j]] *= scale;
\r
9679 in += info.inJump;
\r
9680 out += info.outJump;
\r
9683 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9684 Float32 *in = (Float32 *)inBuffer;
\r
9685 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9686 for (j=0; j<info.channels; j++) {
\r
9687 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9689 in += info.inJump;
\r
9690 out += info.outJump;
\r
9693 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9694 // Channel compensation and/or (de)interleaving only.
\r
9695 Float64 *in = (Float64 *)inBuffer;
\r
9696 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9697 for (j=0; j<info.channels; j++) {
\r
9698 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9700 in += info.inJump;
\r
9701 out += info.outJump;
\r
9705 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9707 Float32 *out = (Float32 *)outBuffer;
\r
9709 if (info.inFormat == RTAUDIO_SINT8) {
\r
9710 signed char *in = (signed char *)inBuffer;
\r
9711 scale = (Float32) ( 1.0 / 127.5 );
\r
9712 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9713 for (j=0; j<info.channels; j++) {
\r
9714 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9715 out[info.outOffset[j]] += 0.5;
\r
9716 out[info.outOffset[j]] *= scale;
\r
9718 in += info.inJump;
\r
9719 out += info.outJump;
\r
9722 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9723 Int16 *in = (Int16 *)inBuffer;
\r
9724 scale = (Float32) ( 1.0 / 32767.5 );
\r
9725 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9726 for (j=0; j<info.channels; j++) {
\r
9727 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9728 out[info.outOffset[j]] += 0.5;
\r
9729 out[info.outOffset[j]] *= scale;
\r
9731 in += info.inJump;
\r
9732 out += info.outJump;
\r
9735 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9736 Int24 *in = (Int24 *)inBuffer;
\r
9737 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9738 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9739 for (j=0; j<info.channels; j++) {
\r
9740 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9741 out[info.outOffset[j]] += 0.5;
\r
9742 out[info.outOffset[j]] *= scale;
\r
9744 in += info.inJump;
\r
9745 out += info.outJump;
\r
9748 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9749 Int32 *in = (Int32 *)inBuffer;
\r
9750 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9751 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9752 for (j=0; j<info.channels; j++) {
\r
9753 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9754 out[info.outOffset[j]] += 0.5;
\r
9755 out[info.outOffset[j]] *= scale;
\r
9757 in += info.inJump;
\r
9758 out += info.outJump;
\r
9761 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9762 // Channel compensation and/or (de)interleaving only.
\r
9763 Float32 *in = (Float32 *)inBuffer;
\r
9764 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9765 for (j=0; j<info.channels; j++) {
\r
9766 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9768 in += info.inJump;
\r
9769 out += info.outJump;
\r
9772 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9773 Float64 *in = (Float64 *)inBuffer;
\r
9774 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9775 for (j=0; j<info.channels; j++) {
\r
9776 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9778 in += info.inJump;
\r
9779 out += info.outJump;
\r
9783 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9784 Int32 *out = (Int32 *)outBuffer;
\r
9785 if (info.inFormat == RTAUDIO_SINT8) {
\r
9786 signed char *in = (signed char *)inBuffer;
\r
9787 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9788 for (j=0; j<info.channels; j++) {
\r
9789 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9790 out[info.outOffset[j]] <<= 24;
\r
9792 in += info.inJump;
\r
9793 out += info.outJump;
\r
9796 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9797 Int16 *in = (Int16 *)inBuffer;
\r
9798 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9799 for (j=0; j<info.channels; j++) {
\r
9800 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9801 out[info.outOffset[j]] <<= 16;
\r
9803 in += info.inJump;
\r
9804 out += info.outJump;
\r
9807 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9808 Int24 *in = (Int24 *)inBuffer;
\r
9809 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9810 for (j=0; j<info.channels; j++) {
\r
9811 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9812 out[info.outOffset[j]] <<= 8;
\r
9814 in += info.inJump;
\r
9815 out += info.outJump;
\r
9818 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9819 // Channel compensation and/or (de)interleaving only.
\r
9820 Int32 *in = (Int32 *)inBuffer;
\r
9821 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9822 for (j=0; j<info.channels; j++) {
\r
9823 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9825 in += info.inJump;
\r
9826 out += info.outJump;
\r
9829 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9830 Float32 *in = (Float32 *)inBuffer;
\r
9831 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9832 for (j=0; j<info.channels; j++) {
\r
9833 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9835 in += info.inJump;
\r
9836 out += info.outJump;
\r
9839 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9840 Float64 *in = (Float64 *)inBuffer;
\r
9841 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9842 for (j=0; j<info.channels; j++) {
\r
9843 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9845 in += info.inJump;
\r
9846 out += info.outJump;
\r
9850 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9851 Int24 *out = (Int24 *)outBuffer;
\r
9852 if (info.inFormat == RTAUDIO_SINT8) {
\r
9853 signed char *in = (signed char *)inBuffer;
\r
9854 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9855 for (j=0; j<info.channels; j++) {
\r
9856 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9857 //out[info.outOffset[j]] <<= 16;
\r
9859 in += info.inJump;
\r
9860 out += info.outJump;
\r
9863 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9864 Int16 *in = (Int16 *)inBuffer;
\r
9865 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9866 for (j=0; j<info.channels; j++) {
\r
9867 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9868 //out[info.outOffset[j]] <<= 8;
\r
9870 in += info.inJump;
\r
9871 out += info.outJump;
\r
9874 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9875 // Channel compensation and/or (de)interleaving only.
\r
9876 Int24 *in = (Int24 *)inBuffer;
\r
9877 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9878 for (j=0; j<info.channels; j++) {
\r
9879 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9881 in += info.inJump;
\r
9882 out += info.outJump;
\r
9885 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9886 Int32 *in = (Int32 *)inBuffer;
\r
9887 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9888 for (j=0; j<info.channels; j++) {
\r
9889 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9890 //out[info.outOffset[j]] >>= 8;
\r
9892 in += info.inJump;
\r
9893 out += info.outJump;
\r
9896 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9897 Float32 *in = (Float32 *)inBuffer;
\r
9898 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9899 for (j=0; j<info.channels; j++) {
\r
9900 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9902 in += info.inJump;
\r
9903 out += info.outJump;
\r
9906 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9907 Float64 *in = (Float64 *)inBuffer;
\r
9908 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9909 for (j=0; j<info.channels; j++) {
\r
9910 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9912 in += info.inJump;
\r
9913 out += info.outJump;
\r
9917 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9918 Int16 *out = (Int16 *)outBuffer;
\r
9919 if (info.inFormat == RTAUDIO_SINT8) {
\r
9920 signed char *in = (signed char *)inBuffer;
\r
9921 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9922 for (j=0; j<info.channels; j++) {
\r
9923 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9924 out[info.outOffset[j]] <<= 8;
\r
9926 in += info.inJump;
\r
9927 out += info.outJump;
\r
9930 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9931 // Channel compensation and/or (de)interleaving only.
\r
9932 Int16 *in = (Int16 *)inBuffer;
\r
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9934 for (j=0; j<info.channels; j++) {
\r
9935 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9937 in += info.inJump;
\r
9938 out += info.outJump;
\r
9941 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9942 Int24 *in = (Int24 *)inBuffer;
\r
9943 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9944 for (j=0; j<info.channels; j++) {
\r
9945 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9947 in += info.inJump;
\r
9948 out += info.outJump;
\r
9951 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9952 Int32 *in = (Int32 *)inBuffer;
\r
9953 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9954 for (j=0; j<info.channels; j++) {
\r
9955 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9957 in += info.inJump;
\r
9958 out += info.outJump;
\r
9961 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9962 Float32 *in = (Float32 *)inBuffer;
\r
9963 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9964 for (j=0; j<info.channels; j++) {
\r
9965 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9967 in += info.inJump;
\r
9968 out += info.outJump;
\r
9971 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9972 Float64 *in = (Float64 *)inBuffer;
\r
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9974 for (j=0; j<info.channels; j++) {
\r
9975 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9977 in += info.inJump;
\r
9978 out += info.outJump;
\r
9982 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9983 signed char *out = (signed char *)outBuffer;
\r
9984 if (info.inFormat == RTAUDIO_SINT8) {
\r
9985 // Channel compensation and/or (de)interleaving only.
\r
9986 signed char *in = (signed char *)inBuffer;
\r
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9988 for (j=0; j<info.channels; j++) {
\r
9989 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9991 in += info.inJump;
\r
9992 out += info.outJump;
\r
9995 if (info.inFormat == RTAUDIO_SINT16) {
\r
9996 Int16 *in = (Int16 *)inBuffer;
\r
9997 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9998 for (j=0; j<info.channels; j++) {
\r
9999 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10001 in += info.inJump;
\r
10002 out += info.outJump;
\r
10005 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10006 Int24 *in = (Int24 *)inBuffer;
\r
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10008 for (j=0; j<info.channels; j++) {
\r
10009 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10011 in += info.inJump;
\r
10012 out += info.outJump;
\r
10015 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10016 Int32 *in = (Int32 *)inBuffer;
\r
10017 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10018 for (j=0; j<info.channels; j++) {
\r
10019 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10021 in += info.inJump;
\r
10022 out += info.outJump;
\r
10025 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10026 Float32 *in = (Float32 *)inBuffer;
\r
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10028 for (j=0; j<info.channels; j++) {
\r
10029 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10031 in += info.inJump;
\r
10032 out += info.outJump;
\r
10035 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10036 Float64 *in = (Float64 *)inBuffer;
\r
10037 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10038 for (j=0; j<info.channels; j++) {
\r
10039 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10041 in += info.inJump;
\r
10042 out += info.outJump;
\r
10048 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10049 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10050 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10052 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10054 register char val;
\r
10055 register char *ptr;
\r
10058 if ( format == RTAUDIO_SINT16 ) {
\r
10059 for ( unsigned int i=0; i<samples; i++ ) {
\r
10060 // Swap 1st and 2nd bytes.
\r
10062 *(ptr) = *(ptr+1);
\r
10065 // Increment 2 bytes.
\r
10069 else if ( format == RTAUDIO_SINT32 ||
\r
10070 format == RTAUDIO_FLOAT32 ) {
\r
10071 for ( unsigned int i=0; i<samples; i++ ) {
\r
10072 // Swap 1st and 4th bytes.
\r
10074 *(ptr) = *(ptr+3);
\r
10077 // Swap 2nd and 3rd bytes.
\r
10080 *(ptr) = *(ptr+1);
\r
10083 // Increment 3 more bytes.
\r
10087 else if ( format == RTAUDIO_SINT24 ) {
\r
10088 for ( unsigned int i=0; i<samples; i++ ) {
\r
10089 // Swap 1st and 3rd bytes.
\r
10091 *(ptr) = *(ptr+2);
\r
10094 // Increment 2 more bytes.
\r
10098 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10099 for ( unsigned int i=0; i<samples; i++ ) {
\r
10100 // Swap 1st and 8th bytes
\r
10102 *(ptr) = *(ptr+7);
\r
10105 // Swap 2nd and 7th bytes
\r
10108 *(ptr) = *(ptr+5);
\r
10111 // Swap 3rd and 6th bytes
\r
10114 *(ptr) = *(ptr+3);
\r
10117 // Swap 4th and 5th bytes
\r
10120 *(ptr) = *(ptr+1);
\r
10123 // Increment 5 more bytes.
\r
10129 // Indentation settings for Vim and Emacs
\r
10131 // Local Variables:
\r
10132 // c-basic-offset: 2
\r
10133 // indent-tabs-mode: nil
\r
10136 // vim: et sts=2 sw=2
\r