1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1686 if ( handle->drainCounter ) {
\r
1687 handle->drainCounter++;
\r
1692 AudioDeviceID inputDevice;
\r
1693 inputDevice = handle->id[1];
\r
1694 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1696 if ( handle->nStreams[1] == 1 ) {
\r
1697 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1698 convertBuffer( stream_.userBuffer[1],
\r
1699 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1700 stream_.convertInfo[1] );
\r
1702 else { // copy to user buffer
\r
1703 memcpy( stream_.userBuffer[1],
\r
1704 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1708 else { // read from multiple streams
\r
1709 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1710 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1712 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1713 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1714 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1715 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1716 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1719 else { // read from multiple multi-channel streams
\r
1720 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1721 Float32 *out, *in;
\r
1723 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1724 UInt32 outChannels = stream_.nUserChannels[1];
\r
1725 if ( stream_.doConvertBuffer[1] ) {
\r
1726 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1727 outChannels = stream_.nDeviceChannels[1];
\r
1730 if ( outInterleaved ) outOffset = 1;
\r
1731 else outOffset = stream_.bufferSize;
\r
1733 channelsLeft = outChannels;
\r
1734 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1736 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1737 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1740 // Account for possible channel offset in first stream
\r
1741 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1742 streamChannels -= stream_.channelOffset[1];
\r
1743 inJump = stream_.channelOffset[1];
\r
1747 // Account for possible unread channels at end of the last stream
\r
1748 if ( streamChannels > channelsLeft ) {
\r
1749 inJump = streamChannels - channelsLeft;
\r
1750 streamChannels = channelsLeft;
\r
1753 // Determine output buffer offsets and skips
\r
1754 if ( outInterleaved ) {
\r
1755 outJump = outChannels;
\r
1756 out += outChannels - channelsLeft;
\r
1760 out += (outChannels - channelsLeft) * outOffset;
\r
1763 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1764 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1765 out[j*outOffset] = *in++;
\r
1770 channelsLeft -= streamChannels;
\r
1774 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1775 convertBuffer( stream_.userBuffer[1],
\r
1776 stream_.deviceBuffer,
\r
1777 stream_.convertInfo[1] );
\r
1783 //MUTEX_UNLOCK( &stream_.mutex );
\r
1785 RtApi::tickStreamTime();
\r
1789 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1793 case kAudioHardwareNotRunningError:
\r
1794 return "kAudioHardwareNotRunningError";
\r
1796 case kAudioHardwareUnspecifiedError:
\r
1797 return "kAudioHardwareUnspecifiedError";
\r
1799 case kAudioHardwareUnknownPropertyError:
\r
1800 return "kAudioHardwareUnknownPropertyError";
\r
1802 case kAudioHardwareBadPropertySizeError:
\r
1803 return "kAudioHardwareBadPropertySizeError";
\r
1805 case kAudioHardwareIllegalOperationError:
\r
1806 return "kAudioHardwareIllegalOperationError";
\r
1808 case kAudioHardwareBadObjectError:
\r
1809 return "kAudioHardwareBadObjectError";
\r
1811 case kAudioHardwareBadDeviceError:
\r
1812 return "kAudioHardwareBadDeviceError";
\r
1814 case kAudioHardwareBadStreamError:
\r
1815 return "kAudioHardwareBadStreamError";
\r
1817 case kAudioHardwareUnsupportedOperationError:
\r
1818 return "kAudioHardwareUnsupportedOperationError";
\r
1820 case kAudioDeviceUnsupportedFormatError:
\r
1821 return "kAudioDeviceUnsupportedFormatError";
\r
1823 case kAudioDevicePermissionsError:
\r
1824 return "kAudioDevicePermissionsError";
\r
1827 return "CoreAudio unknown error";
\r
1831 //******************** End of __MACOSX_CORE__ *********************//
\r
1834 #if defined(__UNIX_JACK__)
\r
1836 // JACK is a low-latency audio server, originally written for the
\r
1837 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1838 // connect a number of different applications to an audio device, as
\r
1839 // well as allowing them to share audio between themselves.
\r
1841 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1842 // have ports connected to the server. The JACK server is typically
\r
1843 // started in a terminal as follows:
\r
1845 // .jackd -d alsa -d hw:0
\r
1847 // or through an interface program such as qjackctl. Many of the
\r
1848 // parameters normally set for a stream are fixed by the JACK server
\r
1849 // and can be specified when the JACK server is started. In
\r
1852 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1854 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1855 // frames, and number of buffers = 4. Once the server is running, it
\r
1856 // is not possible to override these values. If the values are not
\r
1857 // specified in the command-line, the JACK server uses default values.
\r
1859 // The JACK server does not have to be running when an instance of
\r
1860 // RtApiJack is created, though the function getDeviceCount() will
\r
1861 // report 0 devices found until JACK has been started. When no
\r
1862 // devices are available (i.e., the JACK server is not running), a
\r
1863 // stream cannot be opened.
\r
1865 #include <jack/jack.h>
\r
1866 #include <unistd.h>
\r
1869 // A structure to hold various information related to the Jack API
\r
1870 // implementation.
\r
1871 struct JackHandle {
\r
1872 jack_client_t *client;
\r
1873 jack_port_t **ports[2];
\r
1874 std::string deviceName[2];
\r
1876 pthread_cond_t condition;
\r
1877 int drainCounter; // Tracks callback counts when draining
\r
1878 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1881 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1884 static void jackSilentError( const char * ) {};
\r
1886 RtApiJack :: RtApiJack()
\r
1888 // Nothing to do here.
\r
1889 #if !defined(__RTAUDIO_DEBUG__)
\r
1890 // Turn off Jack's internal error reporting.
\r
1891 jack_set_error_function( &jackSilentError );
\r
1895 RtApiJack :: ~RtApiJack()
\r
1897 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1900 unsigned int RtApiJack :: getDeviceCount( void )
\r
1902 // See if we can become a jack client.
\r
1903 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1904 jack_status_t *status = NULL;
\r
1905 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1906 if ( client == 0 ) return 0;
\r
1908 const char **ports;
\r
1909 std::string port, previousPort;
\r
1910 unsigned int nChannels = 0, nDevices = 0;
\r
1911 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1913 // Parse the port names up to the first colon (:).
\r
1914 size_t iColon = 0;
\r
1916 port = (char *) ports[ nChannels ];
\r
1917 iColon = port.find(":");
\r
1918 if ( iColon != std::string::npos ) {
\r
1919 port = port.substr( 0, iColon + 1 );
\r
1920 if ( port != previousPort ) {
\r
1922 previousPort = port;
\r
1925 } while ( ports[++nChannels] );
\r
1929 jack_client_close( client );
\r
1933 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1935 RtAudio::DeviceInfo info;
\r
1936 info.probed = false;
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1941 if ( client == 0 ) {
\r
1942 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1943 error( RtAudioError::WARNING );
\r
1947 const char **ports;
\r
1948 std::string port, previousPort;
\r
1949 unsigned int nPorts = 0, nDevices = 0;
\r
1950 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1952 // Parse the port names up to the first colon (:).
\r
1953 size_t iColon = 0;
\r
1955 port = (char *) ports[ nPorts ];
\r
1956 iColon = port.find(":");
\r
1957 if ( iColon != std::string::npos ) {
\r
1958 port = port.substr( 0, iColon );
\r
1959 if ( port != previousPort ) {
\r
1960 if ( nDevices == device ) info.name = port;
\r
1962 previousPort = port;
\r
1965 } while ( ports[++nPorts] );
\r
1969 if ( device >= nDevices ) {
\r
1970 jack_client_close( client );
\r
1971 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1972 error( RtAudioError::INVALID_USE );
\r
1976 // Get the current jack server sample rate.
\r
1977 info.sampleRates.clear();
\r
1978 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1980 // Count the available ports containing the client name as device
\r
1981 // channels. Jack "input ports" equal RtAudio output channels.
\r
1982 unsigned int nChannels = 0;
\r
1983 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1985 while ( ports[ nChannels ] ) nChannels++;
\r
1987 info.outputChannels = nChannels;
\r
1990 // Jack "output ports" equal RtAudio input channels.
\r
1992 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1994 while ( ports[ nChannels ] ) nChannels++;
\r
1996 info.inputChannels = nChannels;
\r
1999 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2000 jack_client_close(client);
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 // If device opens for both playback and capture, we determine the channels.
\r
2007 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2008 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2010 // Jack always uses 32-bit floats.
\r
2011 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2013 // Jack doesn't provide default devices so we'll use the first available one.
\r
2014 if ( device == 0 && info.outputChannels > 0 )
\r
2015 info.isDefaultOutput = true;
\r
2016 if ( device == 0 && info.inputChannels > 0 )
\r
2017 info.isDefaultInput = true;
\r
2019 jack_client_close(client);
\r
2020 info.probed = true;
\r
2024 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2026 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2028 RtApiJack *object = (RtApiJack *) info->object;
\r
2029 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2034 // This function will be called by a spawned thread when the Jack
\r
2035 // server signals that it is shutting down. It is necessary to handle
\r
2036 // it this way because the jackShutdown() function must return before
\r
2037 // the jack_deactivate() function (in closeStream()) will return.
\r
2038 static void *jackCloseStream( void *ptr )
\r
2040 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2041 RtApiJack *object = (RtApiJack *) info->object;
\r
2043 object->closeStream();
\r
2045 pthread_exit( NULL );
\r
2047 static void jackShutdown( void *infoPointer )
\r
2049 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2050 RtApiJack *object = (RtApiJack *) info->object;
\r
2052 // Check current stream state. If stopped, then we'll assume this
\r
2053 // was called as a result of a call to RtApiJack::stopStream (the
\r
2054 // deactivation of a client handle causes this function to be called).
\r
2055 // If not, we'll assume the Jack server is shutting down or some
\r
2056 // other problem occurred and we should close the stream.
\r
2057 if ( object->isStreamRunning() == false ) return;
\r
2059 ThreadHandle threadId;
\r
2060 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2061 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2064 static int jackXrun( void *infoPointer )
\r
2066 JackHandle *handle = (JackHandle *) infoPointer;
\r
2068 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2069 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2074 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2075 unsigned int firstChannel, unsigned int sampleRate,
\r
2076 RtAudioFormat format, unsigned int *bufferSize,
\r
2077 RtAudio::StreamOptions *options )
\r
2079 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2081 // Look for jack server and try to become a client (only do once per stream).
\r
2082 jack_client_t *client = 0;
\r
2083 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2084 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2085 jack_status_t *status = NULL;
\r
2086 if ( options && !options->streamName.empty() )
\r
2087 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2089 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2090 if ( client == 0 ) {
\r
2091 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2092 error( RtAudioError::WARNING );
\r
2097 // The handle must have been created on an earlier pass.
\r
2098 client = handle->client;
\r
2101 const char **ports;
\r
2102 std::string port, previousPort, deviceName;
\r
2103 unsigned int nPorts = 0, nDevices = 0;
\r
2104 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2106 // Parse the port names up to the first colon (:).
\r
2107 size_t iColon = 0;
\r
2109 port = (char *) ports[ nPorts ];
\r
2110 iColon = port.find(":");
\r
2111 if ( iColon != std::string::npos ) {
\r
2112 port = port.substr( 0, iColon );
\r
2113 if ( port != previousPort ) {
\r
2114 if ( nDevices == device ) deviceName = port;
\r
2116 previousPort = port;
\r
2119 } while ( ports[++nPorts] );
\r
2123 if ( device >= nDevices ) {
\r
2124 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2128 // Count the available ports containing the client name as device
\r
2129 // channels. Jack "input ports" equal RtAudio output channels.
\r
2130 unsigned int nChannels = 0;
\r
2131 unsigned long flag = JackPortIsInput;
\r
2132 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2133 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2135 while ( ports[ nChannels ] ) nChannels++;
\r
2139 // Compare the jack ports for specified client to the requested number of channels.
\r
2140 if ( nChannels < (channels + firstChannel) ) {
\r
2141 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2142 errorText_ = errorStream_.str();
\r
2146 // Check the jack server sample rate.
\r
2147 unsigned int jackRate = jack_get_sample_rate( client );
\r
2148 if ( sampleRate != jackRate ) {
\r
2149 jack_client_close( client );
\r
2150 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2151 errorText_ = errorStream_.str();
\r
2154 stream_.sampleRate = jackRate;
\r
2156 // Get the latency of the JACK port.
\r
2157 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2158 if ( ports[ firstChannel ] ) {
\r
2159 // Added by Ge Wang
\r
2160 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2161 // the range (usually the min and max are equal)
\r
2162 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2163 // get the latency range
\r
2164 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2165 // be optimistic, use the min!
\r
2166 stream_.latency[mode] = latrange.min;
\r
2167 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2171 // The jack server always uses 32-bit floating-point data.
\r
2172 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2173 stream_.userFormat = format;
\r
2175 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2176 else stream_.userInterleaved = true;
\r
2178 // Jack always uses non-interleaved buffers.
\r
2179 stream_.deviceInterleaved[mode] = false;
\r
2181 // Jack always provides host byte-ordered data.
\r
2182 stream_.doByteSwap[mode] = false;
\r
2184 // Get the buffer size. The buffer size and number of buffers
\r
2185 // (periods) is set when the jack server is started.
\r
2186 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2187 *bufferSize = stream_.bufferSize;
\r
2189 stream_.nDeviceChannels[mode] = channels;
\r
2190 stream_.nUserChannels[mode] = channels;
\r
2192 // Set flags for buffer conversion.
\r
2193 stream_.doConvertBuffer[mode] = false;
\r
2194 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2195 stream_.doConvertBuffer[mode] = true;
\r
2196 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2197 stream_.nUserChannels[mode] > 1 )
\r
2198 stream_.doConvertBuffer[mode] = true;
\r
2200 // Allocate our JackHandle structure for the stream.
\r
2201 if ( handle == 0 ) {
\r
2203 handle = new JackHandle;
\r
2205 catch ( std::bad_alloc& ) {
\r
2206 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2210 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2211 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2214 stream_.apiHandle = (void *) handle;
\r
2215 handle->client = client;
\r
2217 handle->deviceName[mode] = deviceName;
\r
2219 // Allocate necessary internal buffers.
\r
2220 unsigned long bufferBytes;
\r
2221 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2222 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2223 if ( stream_.userBuffer[mode] == NULL ) {
\r
2224 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2228 if ( stream_.doConvertBuffer[mode] ) {
\r
2230 bool makeBuffer = true;
\r
2231 if ( mode == OUTPUT )
\r
2232 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2233 else { // mode == INPUT
\r
2234 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2235 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2236 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2237 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2241 if ( makeBuffer ) {
\r
2242 bufferBytes *= *bufferSize;
\r
2243 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2244 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2245 if ( stream_.deviceBuffer == NULL ) {
\r
2246 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2252 // Allocate memory for the Jack ports (channels) identifiers.
\r
2253 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2254 if ( handle->ports[mode] == NULL ) {
\r
2255 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2259 stream_.device[mode] = device;
\r
2260 stream_.channelOffset[mode] = firstChannel;
\r
2261 stream_.state = STREAM_STOPPED;
\r
2262 stream_.callbackInfo.object = (void *) this;
\r
2264 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2265 // We had already set up the stream for output.
\r
2266 stream_.mode = DUPLEX;
\r
2268 stream_.mode = mode;
\r
2269 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2270 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2271 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2274 // Register our ports.
\r
2276 if ( mode == OUTPUT ) {
\r
2277 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2278 snprintf( label, 64, "outport %d", i );
\r
2279 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2280 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2284 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2285 snprintf( label, 64, "inport %d", i );
\r
2286 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2287 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2291 // Setup the buffer conversion information structure. We don't use
\r
2292 // buffers to do channel offsets, so we override that parameter
\r
2294 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2300 pthread_cond_destroy( &handle->condition );
\r
2301 jack_client_close( handle->client );
\r
2303 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2304 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2307 stream_.apiHandle = 0;
\r
2310 for ( int i=0; i<2; i++ ) {
\r
2311 if ( stream_.userBuffer[i] ) {
\r
2312 free( stream_.userBuffer[i] );
\r
2313 stream_.userBuffer[i] = 0;
\r
2317 if ( stream_.deviceBuffer ) {
\r
2318 free( stream_.deviceBuffer );
\r
2319 stream_.deviceBuffer = 0;
\r
2325 void RtApiJack :: closeStream( void )
\r
2327 if ( stream_.state == STREAM_CLOSED ) {
\r
2328 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2329 error( RtAudioError::WARNING );
\r
2333 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2336 if ( stream_.state == STREAM_RUNNING )
\r
2337 jack_deactivate( handle->client );
\r
2339 jack_client_close( handle->client );
\r
2343 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2344 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2345 pthread_cond_destroy( &handle->condition );
\r
2347 stream_.apiHandle = 0;
\r
2350 for ( int i=0; i<2; i++ ) {
\r
2351 if ( stream_.userBuffer[i] ) {
\r
2352 free( stream_.userBuffer[i] );
\r
2353 stream_.userBuffer[i] = 0;
\r
2357 if ( stream_.deviceBuffer ) {
\r
2358 free( stream_.deviceBuffer );
\r
2359 stream_.deviceBuffer = 0;
\r
2362 stream_.mode = UNINITIALIZED;
\r
2363 stream_.state = STREAM_CLOSED;
\r
2366 void RtApiJack :: startStream( void )
\r
2369 if ( stream_.state == STREAM_RUNNING ) {
\r
2370 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2371 error( RtAudioError::WARNING );
\r
2375 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2376 int result = jack_activate( handle->client );
\r
2378 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2382 const char **ports;
\r
2384 // Get the list of available ports.
\r
2385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2387 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2388 if ( ports == NULL) {
\r
2389 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2393 // Now make the port connections. Since RtAudio wasn't designed to
\r
2394 // allow the user to select particular channels of a device, we'll
\r
2395 // just open the first "nChannels" ports with offset.
\r
2396 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2398 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2399 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2402 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2409 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2411 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2412 if ( ports == NULL) {
\r
2413 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2417 // Now make the port connections. See note above.
\r
2418 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2420 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2421 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2424 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2431 handle->drainCounter = 0;
\r
2432 handle->internalDrain = false;
\r
2433 stream_.state = STREAM_RUNNING;
\r
2436 if ( result == 0 ) return;
\r
2437 error( RtAudioError::SYSTEM_ERROR );
\r
2440 void RtApiJack :: stopStream( void )
\r
2443 if ( stream_.state == STREAM_STOPPED ) {
\r
2444 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2445 error( RtAudioError::WARNING );
\r
2449 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2452 if ( handle->drainCounter == 0 ) {
\r
2453 handle->drainCounter = 2;
\r
2454 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2458 jack_deactivate( handle->client );
\r
2459 stream_.state = STREAM_STOPPED;
\r
2462 void RtApiJack :: abortStream( void )
\r
2465 if ( stream_.state == STREAM_STOPPED ) {
\r
2466 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2467 error( RtAudioError::WARNING );
\r
2471 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2472 handle->drainCounter = 2;
\r
2477 // This function will be called by a spawned thread when the user
\r
2478 // callback function signals that the stream should be stopped or
\r
2479 // aborted. It is necessary to handle it this way because the
\r
2480 // callbackEvent() function must return before the jack_deactivate()
\r
2481 // function will return.
\r
2482 static void *jackStopStream( void *ptr )
\r
2484 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2485 RtApiJack *object = (RtApiJack *) info->object;
\r
2487 object->stopStream();
\r
2488 pthread_exit( NULL );
\r
2491 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2493 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2494 if ( stream_.state == STREAM_CLOSED ) {
\r
2495 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2496 error( RtAudioError::WARNING );
\r
2499 if ( stream_.bufferSize != nframes ) {
\r
2500 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2501 error( RtAudioError::WARNING );
\r
2505 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2506 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2508 // Check if we were draining the stream and signal is finished.
\r
2509 if ( handle->drainCounter > 3 ) {
\r
2510 ThreadHandle threadId;
\r
2512 stream_.state = STREAM_STOPPING;
\r
2513 if ( handle->internalDrain == true )
\r
2514 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2516 pthread_cond_signal( &handle->condition );
\r
2520 // Invoke user callback first, to get fresh output data.
\r
2521 if ( handle->drainCounter == 0 ) {
\r
2522 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2523 double streamTime = getStreamTime();
\r
2524 RtAudioStreamStatus status = 0;
\r
2525 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2526 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2527 handle->xrun[0] = false;
\r
2529 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2530 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2531 handle->xrun[1] = false;
\r
2533 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2534 stream_.bufferSize, streamTime, status, info->userData );
\r
2535 if ( cbReturnValue == 2 ) {
\r
2536 stream_.state = STREAM_STOPPING;
\r
2537 handle->drainCounter = 2;
\r
2539 pthread_create( &id, NULL, jackStopStream, info );
\r
2542 else if ( cbReturnValue == 1 ) {
\r
2543 handle->drainCounter = 1;
\r
2544 handle->internalDrain = true;
\r
2548 jack_default_audio_sample_t *jackbuffer;
\r
2549 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2552 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2554 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2555 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2556 memset( jackbuffer, 0, bufferBytes );
\r
2560 else if ( stream_.doConvertBuffer[0] ) {
\r
2562 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2564 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2565 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2566 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2569 else { // no buffer conversion
\r
2570 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2571 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2572 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2576 if ( handle->drainCounter ) {
\r
2577 handle->drainCounter++;
\r
2582 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2584 if ( stream_.doConvertBuffer[1] ) {
\r
2585 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2586 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2587 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2589 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2591 else { // no buffer conversion
\r
2592 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2593 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2594 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2600 RtApi::tickStreamTime();
\r
2603 //******************** End of __UNIX_JACK__ *********************//
\r
2606 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2608 // The ASIO API is designed around a callback scheme, so this
\r
2609 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2610 // Jack. The primary constraint with ASIO is that it only allows
\r
2611 // access to a single driver at a time. Thus, it is not possible to
\r
2612 // have more than one simultaneous RtAudio stream.
\r
2614 // This implementation also requires a number of external ASIO files
\r
2615 // and a few global variables. The ASIO callback scheme does not
\r
2616 // allow for the passing of user data, so we must create a global
\r
2617 // pointer to our callbackInfo structure.
\r
2619 // On unix systems, we make use of a pthread condition variable.
\r
2620 // Since there is no equivalent in Windows, I hacked something based
\r
2621 // on information found in
\r
2622 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2624 #include "asiosys.h"
\r
2626 #include "iasiothiscallresolver.h"
\r
2627 #include "asiodrivers.h"
\r
2630 static AsioDrivers drivers;
\r
2631 static ASIOCallbacks asioCallbacks;
\r
2632 static ASIODriverInfo driverInfo;
\r
2633 static CallbackInfo *asioCallbackInfo;
\r
2634 static bool asioXRun;
\r
2636 struct AsioHandle {
\r
2637 int drainCounter; // Tracks callback counts when draining
\r
2638 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2639 ASIOBufferInfo *bufferInfos;
\r
2643 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2646 // Function declarations (definitions at end of section)
\r
2647 static const char* getAsioErrorString( ASIOError result );
\r
2648 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2649 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2651 RtApiAsio :: RtApiAsio()
\r
2653 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2654 // CoInitialize beforehand, but it must be for appartment threading
\r
2655 // (in which case, CoInitilialize will return S_FALSE here).
\r
2656 coInitialized_ = false;
\r
2657 HRESULT hr = CoInitialize( NULL );
\r
2658 if ( FAILED(hr) ) {
\r
2659 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2660 error( RtAudioError::WARNING );
\r
2662 coInitialized_ = true;
\r
2664 drivers.removeCurrentDriver();
\r
2665 driverInfo.asioVersion = 2;
\r
2667 // See note in DirectSound implementation about GetDesktopWindow().
\r
2668 driverInfo.sysRef = GetForegroundWindow();
\r
2671 RtApiAsio :: ~RtApiAsio()
\r
2673 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2674 if ( coInitialized_ ) CoUninitialize();
\r
2677 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2679 return (unsigned int) drivers.asioGetNumDev();
\r
2682 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2684 RtAudio::DeviceInfo info;
\r
2685 info.probed = false;
\r
2688 unsigned int nDevices = getDeviceCount();
\r
2689 if ( nDevices == 0 ) {
\r
2690 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2691 error( RtAudioError::INVALID_USE );
\r
2695 if ( device >= nDevices ) {
\r
2696 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2697 error( RtAudioError::INVALID_USE );
\r
2701 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2702 if ( stream_.state != STREAM_CLOSED ) {
\r
2703 if ( device >= devices_.size() ) {
\r
2704 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2705 error( RtAudioError::WARNING );
\r
2708 return devices_[ device ];
\r
2711 char driverName[32];
\r
2712 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2713 if ( result != ASE_OK ) {
\r
2714 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2715 errorText_ = errorStream_.str();
\r
2716 error( RtAudioError::WARNING );
\r
2720 info.name = driverName;
\r
2722 if ( !drivers.loadDriver( driverName ) ) {
\r
2723 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2724 errorText_ = errorStream_.str();
\r
2725 error( RtAudioError::WARNING );
\r
2729 result = ASIOInit( &driverInfo );
\r
2730 if ( result != ASE_OK ) {
\r
2731 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2732 errorText_ = errorStream_.str();
\r
2733 error( RtAudioError::WARNING );
\r
2737 // Determine the device channel information.
\r
2738 long inputChannels, outputChannels;
\r
2739 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2740 if ( result != ASE_OK ) {
\r
2741 drivers.removeCurrentDriver();
\r
2742 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2743 errorText_ = errorStream_.str();
\r
2744 error( RtAudioError::WARNING );
\r
2748 info.outputChannels = outputChannels;
\r
2749 info.inputChannels = inputChannels;
\r
2750 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2751 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2753 // Determine the supported sample rates.
\r
2754 info.sampleRates.clear();
\r
2755 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2756 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2757 if ( result == ASE_OK )
\r
2758 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2761 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2762 ASIOChannelInfo channelInfo;
\r
2763 channelInfo.channel = 0;
\r
2764 channelInfo.isInput = true;
\r
2765 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2766 result = ASIOGetChannelInfo( &channelInfo );
\r
2767 if ( result != ASE_OK ) {
\r
2768 drivers.removeCurrentDriver();
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 info.nativeFormats = 0;
\r
2776 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2777 info.nativeFormats |= RTAUDIO_SINT16;
\r
2778 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT32;
\r
2780 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2784 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2785 info.nativeFormats |= RTAUDIO_SINT24;
\r
2787 if ( info.outputChannels > 0 )
\r
2788 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2789 if ( info.inputChannels > 0 )
\r
2790 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2792 info.probed = true;
\r
2793 drivers.removeCurrentDriver();
\r
2797 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2799 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2800 object->callbackEvent( index );
\r
2803 void RtApiAsio :: saveDeviceInfo( void )
\r
2807 unsigned int nDevices = getDeviceCount();
\r
2808 devices_.resize( nDevices );
\r
2809 for ( unsigned int i=0; i<nDevices; i++ )
\r
2810 devices_[i] = getDeviceInfo( i );
\r
2813 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2814 unsigned int firstChannel, unsigned int sampleRate,
\r
2815 RtAudioFormat format, unsigned int *bufferSize,
\r
2816 RtAudio::StreamOptions *options )
\r
2818 // For ASIO, a duplex stream MUST use the same driver.
\r
2819 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2820 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2824 char driverName[32];
\r
2825 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2826 if ( result != ASE_OK ) {
\r
2827 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2828 errorText_ = errorStream_.str();
\r
2832 // Only load the driver once for duplex stream.
\r
2833 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2834 // The getDeviceInfo() function will not work when a stream is open
\r
2835 // because ASIO does not allow multiple devices to run at the same
\r
2836 // time. Thus, we'll probe the system before opening a stream and
\r
2837 // save the results for use by getDeviceInfo().
\r
2838 this->saveDeviceInfo();
\r
2840 if ( !drivers.loadDriver( driverName ) ) {
\r
2841 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2842 errorText_ = errorStream_.str();
\r
2846 result = ASIOInit( &driverInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2849 errorText_ = errorStream_.str();
\r
2854 // Check the device channel count.
\r
2855 long inputChannels, outputChannels;
\r
2856 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2857 if ( result != ASE_OK ) {
\r
2858 drivers.removeCurrentDriver();
\r
2859 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2860 errorText_ = errorStream_.str();
\r
2864 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2865 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2866 drivers.removeCurrentDriver();
\r
2867 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2868 errorText_ = errorStream_.str();
\r
2871 stream_.nDeviceChannels[mode] = channels;
\r
2872 stream_.nUserChannels[mode] = channels;
\r
2873 stream_.channelOffset[mode] = firstChannel;
\r
2875 // Verify the sample rate is supported.
\r
2876 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2877 if ( result != ASE_OK ) {
\r
2878 drivers.removeCurrentDriver();
\r
2879 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2880 errorText_ = errorStream_.str();
\r
2884 // Get the current sample rate
\r
2885 ASIOSampleRate currentRate;
\r
2886 result = ASIOGetSampleRate( ¤tRate );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2890 errorText_ = errorStream_.str();
\r
2894 // Set the sample rate only if necessary
\r
2895 if ( currentRate != sampleRate ) {
\r
2896 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2897 if ( result != ASE_OK ) {
\r
2898 drivers.removeCurrentDriver();
\r
2899 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2900 errorText_ = errorStream_.str();
\r
2905 // Determine the driver data type.
\r
2906 ASIOChannelInfo channelInfo;
\r
2907 channelInfo.channel = 0;
\r
2908 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2909 else channelInfo.isInput = true;
\r
2910 result = ASIOGetChannelInfo( &channelInfo );
\r
2911 if ( result != ASE_OK ) {
\r
2912 drivers.removeCurrentDriver();
\r
2913 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2914 errorText_ = errorStream_.str();
\r
2918 // Assuming WINDOWS host is always little-endian.
\r
2919 stream_.doByteSwap[mode] = false;
\r
2920 stream_.userFormat = format;
\r
2921 stream_.deviceFormat[mode] = 0;
\r
2922 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2923 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2926 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2927 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2928 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2930 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2931 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2932 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2934 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2935 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2936 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2938 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2939 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2940 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2943 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2944 drivers.removeCurrentDriver();
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2946 errorText_ = errorStream_.str();
\r
2950 // Set the buffer size. For a duplex stream, this will end up
\r
2951 // setting the buffer size based on the input constraints, which
\r
2953 long minSize, maxSize, preferSize, granularity;
\r
2954 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2955 if ( result != ASE_OK ) {
\r
2956 drivers.removeCurrentDriver();
\r
2957 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2958 errorText_ = errorStream_.str();
\r
2962 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2963 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2964 else if ( granularity == -1 ) {
\r
2965 // Make sure bufferSize is a power of two.
\r
2966 int log2_of_min_size = 0;
\r
2967 int log2_of_max_size = 0;
\r
2969 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2970 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2971 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2974 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2975 int min_delta_num = log2_of_min_size;
\r
2977 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2978 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2979 if (current_delta < min_delta) {
\r
2980 min_delta = current_delta;
\r
2981 min_delta_num = i;
\r
2985 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2986 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2987 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2989 else if ( granularity != 0 ) {
\r
2990 // Set to an even multiple of granularity, rounding up.
\r
2991 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2994 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2995 drivers.removeCurrentDriver();
\r
2996 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3000 stream_.bufferSize = *bufferSize;
\r
3001 stream_.nBuffers = 2;
\r
3003 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3004 else stream_.userInterleaved = true;
\r
3006 // ASIO always uses non-interleaved buffers.
\r
3007 stream_.deviceInterleaved[mode] = false;
\r
3009 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3010 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3011 if ( handle == 0 ) {
\r
3013 handle = new AsioHandle;
\r
3015 catch ( std::bad_alloc& ) {
\r
3016 //if ( handle == NULL ) {
\r
3017 drivers.removeCurrentDriver();
\r
3018 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3021 handle->bufferInfos = 0;
\r
3023 // Create a manual-reset event.
\r
3024 handle->condition = CreateEvent( NULL, // no security
\r
3025 TRUE, // manual-reset
\r
3026 FALSE, // non-signaled initially
\r
3027 NULL ); // unnamed
\r
3028 stream_.apiHandle = (void *) handle;
\r
3031 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3032 // and output separately, we'll have to dispose of previously
\r
3033 // created output buffers for a duplex stream.
\r
3034 long inputLatency, outputLatency;
\r
3035 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3036 ASIODisposeBuffers();
\r
3037 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3040 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3041 bool buffersAllocated = false;
\r
3042 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3043 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3044 if ( handle->bufferInfos == NULL ) {
\r
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3046 errorText_ = errorStream_.str();
\r
3050 ASIOBufferInfo *infos;
\r
3051 infos = handle->bufferInfos;
\r
3052 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3053 infos->isInput = ASIOFalse;
\r
3054 infos->channelNum = i + stream_.channelOffset[0];
\r
3055 infos->buffers[0] = infos->buffers[1] = 0;
\r
3057 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3058 infos->isInput = ASIOTrue;
\r
3059 infos->channelNum = i + stream_.channelOffset[1];
\r
3060 infos->buffers[0] = infos->buffers[1] = 0;
\r
3063 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3064 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3065 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3066 asioCallbacks.asioMessage = &asioMessages;
\r
3067 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3068 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3069 if ( result != ASE_OK ) {
\r
3070 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3071 errorText_ = errorStream_.str();
\r
3074 buffersAllocated = true;
\r
3076 // Set flags for buffer conversion.
\r
3077 stream_.doConvertBuffer[mode] = false;
\r
3078 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3079 stream_.doConvertBuffer[mode] = true;
\r
3080 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3081 stream_.nUserChannels[mode] > 1 )
\r
3082 stream_.doConvertBuffer[mode] = true;
\r
3084 // Allocate necessary internal buffers
\r
3085 unsigned long bufferBytes;
\r
3086 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3087 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3088 if ( stream_.userBuffer[mode] == NULL ) {
\r
3089 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3093 if ( stream_.doConvertBuffer[mode] ) {
\r
3095 bool makeBuffer = true;
\r
3096 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3097 if ( mode == INPUT ) {
\r
3098 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3099 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3100 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3104 if ( makeBuffer ) {
\r
3105 bufferBytes *= *bufferSize;
\r
3106 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3107 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3108 if ( stream_.deviceBuffer == NULL ) {
\r
3109 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3115 stream_.sampleRate = sampleRate;
\r
3116 stream_.device[mode] = device;
\r
3117 stream_.state = STREAM_STOPPED;
\r
3118 asioCallbackInfo = &stream_.callbackInfo;
\r
3119 stream_.callbackInfo.object = (void *) this;
\r
3120 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3121 // We had already set up an output stream.
\r
3122 stream_.mode = DUPLEX;
\r
3124 stream_.mode = mode;
\r
3126 // Determine device latencies
\r
3127 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3128 if ( result != ASE_OK ) {
\r
3129 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3130 errorText_ = errorStream_.str();
\r
3131 error( RtAudioError::WARNING); // warn but don't fail
\r
3134 stream_.latency[0] = outputLatency;
\r
3135 stream_.latency[1] = inputLatency;
\r
3138 // Setup the buffer conversion information structure. We don't use
\r
3139 // buffers to do channel offsets, so we override that parameter
\r
3141 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3146 if ( buffersAllocated )
\r
3147 ASIODisposeBuffers();
\r
3148 drivers.removeCurrentDriver();
\r
3151 CloseHandle( handle->condition );
\r
3152 if ( handle->bufferInfos )
\r
3153 free( handle->bufferInfos );
\r
3155 stream_.apiHandle = 0;
\r
3158 for ( int i=0; i<2; i++ ) {
\r
3159 if ( stream_.userBuffer[i] ) {
\r
3160 free( stream_.userBuffer[i] );
\r
3161 stream_.userBuffer[i] = 0;
\r
3165 if ( stream_.deviceBuffer ) {
\r
3166 free( stream_.deviceBuffer );
\r
3167 stream_.deviceBuffer = 0;
\r
3173 void RtApiAsio :: closeStream()
\r
3175 if ( stream_.state == STREAM_CLOSED ) {
\r
3176 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3177 error( RtAudioError::WARNING );
\r
3181 if ( stream_.state == STREAM_RUNNING ) {
\r
3182 stream_.state = STREAM_STOPPED;
\r
3185 ASIODisposeBuffers();
\r
3186 drivers.removeCurrentDriver();
\r
3188 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3190 CloseHandle( handle->condition );
\r
3191 if ( handle->bufferInfos )
\r
3192 free( handle->bufferInfos );
\r
3194 stream_.apiHandle = 0;
\r
3197 for ( int i=0; i<2; i++ ) {
\r
3198 if ( stream_.userBuffer[i] ) {
\r
3199 free( stream_.userBuffer[i] );
\r
3200 stream_.userBuffer[i] = 0;
\r
3204 if ( stream_.deviceBuffer ) {
\r
3205 free( stream_.deviceBuffer );
\r
3206 stream_.deviceBuffer = 0;
\r
3209 stream_.mode = UNINITIALIZED;
\r
3210 stream_.state = STREAM_CLOSED;
\r
3213 bool stopThreadCalled = false;
\r
3215 void RtApiAsio :: startStream()
\r
3218 if ( stream_.state == STREAM_RUNNING ) {
\r
3219 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3220 error( RtAudioError::WARNING );
\r
3224 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3225 ASIOError result = ASIOStart();
\r
3226 if ( result != ASE_OK ) {
\r
3227 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3228 errorText_ = errorStream_.str();
\r
3232 handle->drainCounter = 0;
\r
3233 handle->internalDrain = false;
\r
3234 ResetEvent( handle->condition );
\r
3235 stream_.state = STREAM_RUNNING;
\r
3239 stopThreadCalled = false;
\r
3241 if ( result == ASE_OK ) return;
\r
3242 error( RtAudioError::SYSTEM_ERROR );
\r
3245 void RtApiAsio :: stopStream()
\r
3248 if ( stream_.state == STREAM_STOPPED ) {
\r
3249 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3250 error( RtAudioError::WARNING );
\r
3254 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3256 if ( handle->drainCounter == 0 ) {
\r
3257 handle->drainCounter = 2;
\r
3258 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3262 stream_.state = STREAM_STOPPED;
\r
3264 ASIOError result = ASIOStop();
\r
3265 if ( result != ASE_OK ) {
\r
3266 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3267 errorText_ = errorStream_.str();
\r
3270 if ( result == ASE_OK ) return;
\r
3271 error( RtAudioError::SYSTEM_ERROR );
\r
3274 void RtApiAsio :: abortStream()
\r
3277 if ( stream_.state == STREAM_STOPPED ) {
\r
3278 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3279 error( RtAudioError::WARNING );
\r
3283 // The following lines were commented-out because some behavior was
\r
3284 // noted where the device buffers need to be zeroed to avoid
\r
3285 // continuing sound, even when the device buffers are completely
\r
3286 // disposed. So now, calling abort is the same as calling stop.
\r
3287 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3288 // handle->drainCounter = 2;
\r
3292 // This function will be called by a spawned thread when the user
\r
3293 // callback function signals that the stream should be stopped or
\r
3294 // aborted. It is necessary to handle it this way because the
\r
3295 // callbackEvent() function must return before the ASIOStop()
\r
3296 // function will return.
\r
3297 static unsigned __stdcall asioStopStream( void *ptr )
\r
3299 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3300 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3302 object->stopStream();
\r
3303 _endthreadex( 0 );
\r
3307 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3309 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3310 if ( stream_.state == STREAM_CLOSED ) {
\r
3311 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3312 error( RtAudioError::WARNING );
\r
3316 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3317 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3319 // Check if we were draining the stream and signal if finished.
\r
3320 if ( handle->drainCounter > 3 ) {
\r
3322 stream_.state = STREAM_STOPPING;
\r
3323 if ( handle->internalDrain == false )
\r
3324 SetEvent( handle->condition );
\r
3325 else { // spawn a thread to stop the stream
\r
3326 unsigned threadId;
\r
3327 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3328 &stream_.callbackInfo, 0, &threadId );
\r
3333 // Invoke user callback to get fresh output data UNLESS we are
\r
3334 // draining stream.
\r
3335 if ( handle->drainCounter == 0 ) {
\r
3336 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3337 double streamTime = getStreamTime();
\r
3338 RtAudioStreamStatus status = 0;
\r
3339 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3340 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3343 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3344 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3347 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3348 stream_.bufferSize, streamTime, status, info->userData );
\r
3349 if ( cbReturnValue == 2 ) {
\r
3350 stream_.state = STREAM_STOPPING;
\r
3351 handle->drainCounter = 2;
\r
3352 unsigned threadId;
\r
3353 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3354 &stream_.callbackInfo, 0, &threadId );
\r
3357 else if ( cbReturnValue == 1 ) {
\r
3358 handle->drainCounter = 1;
\r
3359 handle->internalDrain = true;
\r
3363 unsigned int nChannels, bufferBytes, i, j;
\r
3364 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3365 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3367 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3369 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3373 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3377 else if ( stream_.doConvertBuffer[0] ) {
\r
3379 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3380 if ( stream_.doByteSwap[0] )
\r
3381 byteSwapBuffer( stream_.deviceBuffer,
\r
3382 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3383 stream_.deviceFormat[0] );
\r
3385 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3386 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3387 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3388 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3394 if ( stream_.doByteSwap[0] )
\r
3395 byteSwapBuffer( stream_.userBuffer[0],
\r
3396 stream_.bufferSize * stream_.nUserChannels[0],
\r
3397 stream_.userFormat );
\r
3399 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3400 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3401 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3402 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3407 if ( handle->drainCounter ) {
\r
3408 handle->drainCounter++;
\r
3413 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3415 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3417 if (stream_.doConvertBuffer[1]) {
\r
3419 // Always interleave ASIO input data.
\r
3420 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3421 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3422 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3423 handle->bufferInfos[i].buffers[bufferIndex],
\r
3427 if ( stream_.doByteSwap[1] )
\r
3428 byteSwapBuffer( stream_.deviceBuffer,
\r
3429 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3430 stream_.deviceFormat[1] );
\r
3431 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3435 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3436 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3437 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3438 handle->bufferInfos[i].buffers[bufferIndex],
\r
3443 if ( stream_.doByteSwap[1] )
\r
3444 byteSwapBuffer( stream_.userBuffer[1],
\r
3445 stream_.bufferSize * stream_.nUserChannels[1],
\r
3446 stream_.userFormat );
\r
3451 // The following call was suggested by Malte Clasen. While the API
\r
3452 // documentation indicates it should not be required, some device
\r
3453 // drivers apparently do not function correctly without it.
\r
3454 ASIOOutputReady();
\r
3456 RtApi::tickStreamTime();
\r
3460 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3462 // The ASIO documentation says that this usually only happens during
\r
3463 // external sync. Audio processing is not stopped by the driver,
\r
3464 // actual sample rate might not have even changed, maybe only the
\r
3465 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3468 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3470 object->stopStream();
\r
3472 catch ( RtAudioError &exception ) {
\r
3473 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3477 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3480 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3484 switch( selector ) {
\r
3485 case kAsioSelectorSupported:
\r
3486 if ( value == kAsioResetRequest
\r
3487 || value == kAsioEngineVersion
\r
3488 || value == kAsioResyncRequest
\r
3489 || value == kAsioLatenciesChanged
\r
3490 // The following three were added for ASIO 2.0, you don't
\r
3491 // necessarily have to support them.
\r
3492 || value == kAsioSupportsTimeInfo
\r
3493 || value == kAsioSupportsTimeCode
\r
3494 || value == kAsioSupportsInputMonitor)
\r
3497 case kAsioResetRequest:
\r
3498 // Defer the task and perform the reset of the driver during the
\r
3499 // next "safe" situation. You cannot reset the driver right now,
\r
3500 // as this code is called from the driver. Reset the driver is
\r
3501 // done by completely destruct is. I.e. ASIOStop(),
\r
3502 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3504 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3507 case kAsioResyncRequest:
\r
3508 // This informs the application that the driver encountered some
\r
3509 // non-fatal data loss. It is used for synchronization purposes
\r
3510 // of different media. Added mainly to work around the Win16Mutex
\r
3511 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3512 // which could lose data because the Mutex was held too long by
\r
3513 // another thread. However a driver can issue it in other
\r
3514 // situations, too.
\r
3515 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3519 case kAsioLatenciesChanged:
\r
3520 // This will inform the host application that the drivers were
\r
3521 // latencies changed. Beware, it this does not mean that the
\r
3522 // buffer sizes have changed! You might need to update internal
\r
3524 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3527 case kAsioEngineVersion:
\r
3528 // Return the supported ASIO version of the host application. If
\r
3529 // a host application does not implement this selector, ASIO 1.0
\r
3530 // is assumed by the driver.
\r
3533 case kAsioSupportsTimeInfo:
\r
3534 // Informs the driver whether the
\r
3535 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3536 // For compatibility with ASIO 1.0 drivers the host application
\r
3537 // should always support the "old" bufferSwitch method, too.
\r
3540 case kAsioSupportsTimeCode:
\r
3541 // Informs the driver whether application is interested in time
\r
3542 // code info. If an application does not need to know about time
\r
3543 // code, the driver has less work to do.
\r
3550 static const char* getAsioErrorString( ASIOError result )
\r
3555 const char*message;
\r
3558 static const Messages m[] =
\r
3560 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3561 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3562 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3563 { ASE_InvalidMode, "Invalid mode." },
\r
3564 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3565 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3566 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3569 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3570 if ( m[i].value == result ) return m[i].message;
\r
3572 return "Unknown error.";
\r
3575 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3579 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3584 #include <audioclient.h>
\r
3586 #include <mmdeviceapi.h>
\r
3587 #include <functiondiscoverykeys_devpkey.h>
\r
3589 //=============================================================================
\r
3591 #define SAFE_RELEASE( objectPtr )\
\r
3594 objectPtr->Release();\
\r
3595 objectPtr = NULL;\
\r
3598 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3600 //-----------------------------------------------------------------------------
\r
3602 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3603 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3604 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3605 // provide intermediate storage for read / write synchronization.
\r
3606 class WasapiBuffer
\r
3610 : buffer_( NULL ),
\r
3619 // sets the length of the internal ring buffer
\r
3620 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3623 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3625 bufferSize_ = bufferSize;
\r
3630 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3631 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3633 if ( !buffer || // incoming buffer is NULL
\r
3634 bufferSize == 0 || // incoming buffer has no data
\r
3635 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3640 unsigned int relOutIndex = outIndex_;
\r
3641 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3642 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3643 relOutIndex += bufferSize_;
\r
3646 // "in" index can end on the "out" index but cannot begin at it
\r
3647 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3648 return false; // not enough space between "in" index and "out" index
\r
3651 // copy buffer from external to internal
\r
3652 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3653 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3654 int fromInSize = bufferSize - fromZeroSize;
\r
3658 case RTAUDIO_SINT8:
\r
3659 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3660 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3662 case RTAUDIO_SINT16:
\r
3663 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3664 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3666 case RTAUDIO_SINT24:
\r
3667 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3668 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3670 case RTAUDIO_SINT32:
\r
3671 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3672 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3674 case RTAUDIO_FLOAT32:
\r
3675 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3676 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3678 case RTAUDIO_FLOAT64:
\r
3679 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3680 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3684 // update "in" index
\r
3685 inIndex_ += bufferSize;
\r
3686 inIndex_ %= bufferSize_;
\r
3691 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3692 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3694 if ( !buffer || // incoming buffer is NULL
\r
3695 bufferSize == 0 || // incoming buffer has no data
\r
3696 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3701 unsigned int relInIndex = inIndex_;
\r
3702 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3703 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3704 relInIndex += bufferSize_;
\r
3707 // "out" index can begin at and end on the "in" index
\r
3708 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3709 return false; // not enough space between "out" index and "in" index
\r
3712 // copy buffer from internal to external
\r
3713 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3714 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3715 int fromOutSize = bufferSize - fromZeroSize;
\r
3719 case RTAUDIO_SINT8:
\r
3720 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3721 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3723 case RTAUDIO_SINT16:
\r
3724 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3725 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3727 case RTAUDIO_SINT24:
\r
3728 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3729 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3731 case RTAUDIO_SINT32:
\r
3732 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3733 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3735 case RTAUDIO_FLOAT32:
\r
3736 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3737 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3739 case RTAUDIO_FLOAT64:
\r
3740 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3741 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3745 // update "out" index
\r
3746 outIndex_ += bufferSize;
\r
3747 outIndex_ %= bufferSize_;
\r
3754 unsigned int bufferSize_;
\r
3755 unsigned int inIndex_;
\r
3756 unsigned int outIndex_;
\r
3759 //-----------------------------------------------------------------------------
\r
3761 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3762 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3763 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3764 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3765 // one rate and its multiple. RtApiWasapi will not populate a device's sample rate list with rates
\r
3766 // that may cause artifacts via this conversion.
\r
3767 void convertBufferWasapi( char* outBuffer,
\r
3768 const char* inBuffer,
\r
3769 const unsigned int& inChannelCount,
\r
3770 const unsigned int& outChannelCount,
\r
3771 const unsigned int& inSampleRate,
\r
3772 const unsigned int& outSampleRate,
\r
3773 const unsigned int& inSampleCount,
\r
3774 unsigned int& outSampleCount,
\r
3775 const RtAudioFormat& format )
\r
3777 // calculate the new outSampleCount and relative sampleStep
\r
3778 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3779 float sampleStep = 1.0f / sampleRatio;
\r
3780 float inSampleFraction = 0.0f;
\r
3781 unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );
\r
3783 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3785 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3786 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3788 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3792 case RTAUDIO_SINT8:
\r
3793 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3795 case RTAUDIO_SINT16:
\r
3796 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3798 case RTAUDIO_SINT24:
\r
3799 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3801 case RTAUDIO_SINT32:
\r
3802 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3804 case RTAUDIO_FLOAT32:
\r
3805 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3807 case RTAUDIO_FLOAT64:
\r
3808 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3812 // jump to next in sample
\r
3813 inSampleFraction += sampleStep;
\r
3817 //-----------------------------------------------------------------------------
\r
3819 // A structure to hold various information related to the WASAPI implementation.
\r
3820 struct WasapiHandle
\r
3822 IAudioClient* captureAudioClient;
\r
3823 IAudioClient* renderAudioClient;
\r
3824 IAudioCaptureClient* captureClient;
\r
3825 IAudioRenderClient* renderClient;
\r
3826 HANDLE captureEvent;
\r
3827 HANDLE renderEvent;
\r
3830 : captureAudioClient( NULL ),
\r
3831 renderAudioClient( NULL ),
\r
3832 captureClient( NULL ),
\r
3833 renderClient( NULL ),
\r
3834 captureEvent( NULL ),
\r
3835 renderEvent( NULL ) {}
\r
3838 //=============================================================================
\r
3840 RtApiWasapi::RtApiWasapi()
\r
3841 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3843 // WASAPI can run either apartment or multi-threaded
\r
3844 HRESULT hr = CoInitialize( NULL );
\r
3846 if ( !FAILED( hr ) )
\r
3847 coInitialized_ = true;
\r
3849 // Instantiate device enumerator
\r
3850 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3851 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3852 ( void** ) &deviceEnumerator_ );
\r
3854 if ( FAILED( hr ) ) {
\r
3855 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3856 error( RtAudioError::DRIVER_ERROR );
\r
3860 //-----------------------------------------------------------------------------
\r
3862 RtApiWasapi::~RtApiWasapi()
\r
3864 // if this object previously called CoInitialize()
\r
3865 if ( coInitialized_ ) {
\r
3869 if ( stream_.state != STREAM_CLOSED ) {
\r
3873 SAFE_RELEASE( deviceEnumerator_ );
\r
3876 //=============================================================================
\r
3878 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3880 unsigned int captureDeviceCount = 0;
\r
3881 unsigned int renderDeviceCount = 0;
\r
3883 IMMDeviceCollection* captureDevices = NULL;
\r
3884 IMMDeviceCollection* renderDevices = NULL;
\r
3886 // Count capture devices
\r
3887 errorText_.clear();
\r
3888 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3889 if ( FAILED( hr ) ) {
\r
3890 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3894 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3895 if ( FAILED( hr ) ) {
\r
3896 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3900 // Count render devices
\r
3901 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3902 if ( FAILED( hr ) ) {
\r
3903 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3907 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3908 if ( FAILED( hr ) ) {
\r
3909 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3914 // release all references
\r
3915 SAFE_RELEASE( captureDevices );
\r
3916 SAFE_RELEASE( renderDevices );
\r
3918 if ( errorText_.empty() )
\r
3919 return captureDeviceCount + renderDeviceCount;
\r
3921 error( RtAudioError::DRIVER_ERROR );
\r
3925 //-----------------------------------------------------------------------------
\r
3927 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3929 RtAudio::DeviceInfo info;
\r
3930 unsigned int captureDeviceCount = 0;
\r
3931 unsigned int renderDeviceCount = 0;
\r
3932 std::wstring deviceName;
\r
3933 std::string defaultDeviceName;
\r
3934 bool isCaptureDevice = false;
\r
3936 PROPVARIANT deviceNameProp;
\r
3937 PROPVARIANT defaultDeviceNameProp;
\r
3939 IMMDeviceCollection* captureDevices = NULL;
\r
3940 IMMDeviceCollection* renderDevices = NULL;
\r
3941 IMMDevice* devicePtr = NULL;
\r
3942 IMMDevice* defaultDevicePtr = NULL;
\r
3943 IAudioClient* audioClient = NULL;
\r
3944 IPropertyStore* devicePropStore = NULL;
\r
3945 IPropertyStore* defaultDevicePropStore = NULL;
\r
3947 WAVEFORMATEX* deviceFormat = NULL;
\r
3948 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3951 info.probed = false;
\r
3953 // Count capture devices
\r
3954 errorText_.clear();
\r
3955 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3956 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3957 if ( FAILED( hr ) ) {
\r
3958 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3962 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3963 if ( FAILED( hr ) ) {
\r
3964 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3968 // Count render devices
\r
3969 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3970 if ( FAILED( hr ) ) {
\r
3971 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3975 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3976 if ( FAILED( hr ) ) {
\r
3977 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3981 // validate device index
\r
3982 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3983 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3984 errorType = RtAudioError::INVALID_USE;
\r
3988 // determine whether index falls within capture or render devices
\r
3989 if ( device >= renderDeviceCount ) {
\r
3990 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3991 if ( FAILED( hr ) ) {
\r
3992 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
3995 isCaptureDevice = true;
\r
3998 hr = renderDevices->Item( device, &devicePtr );
\r
3999 if ( FAILED( hr ) ) {
\r
4000 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4003 isCaptureDevice = false;
\r
4006 // get default device name
\r
4007 if ( isCaptureDevice ) {
\r
4008 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4009 if ( FAILED( hr ) ) {
\r
4010 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4015 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4016 if ( FAILED( hr ) ) {
\r
4017 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4022 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4023 if ( FAILED( hr ) ) {
\r
4024 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4027 PropVariantInit( &defaultDeviceNameProp );
\r
4029 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4030 if ( FAILED( hr ) ) {
\r
4031 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4035 deviceName = defaultDeviceNameProp.pwszVal;
\r
4036 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4039 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4040 if ( FAILED( hr ) ) {
\r
4041 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4045 PropVariantInit( &deviceNameProp );
\r
4047 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4048 if ( FAILED( hr ) ) {
\r
4049 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4053 deviceName = deviceNameProp.pwszVal;
\r
4054 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4057 if ( isCaptureDevice ) {
\r
4058 info.isDefaultInput = info.name == defaultDeviceName;
\r
4059 info.isDefaultOutput = false;
\r
4062 info.isDefaultInput = false;
\r
4063 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4067 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4068 if ( FAILED( hr ) ) {
\r
4069 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4073 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4074 if ( FAILED( hr ) ) {
\r
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4079 if ( isCaptureDevice ) {
\r
4080 info.inputChannels = deviceFormat->nChannels;
\r
4081 info.outputChannels = 0;
\r
4082 info.duplexChannels = 0;
\r
4085 info.inputChannels = 0;
\r
4086 info.outputChannels = deviceFormat->nChannels;
\r
4087 info.duplexChannels = 0;
\r
4091 info.sampleRates.clear();
\r
4093 // allow support for sample rates that are multiples of the base rate
\r
4094 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4095 if ( SAMPLE_RATES[i] < deviceFormat->nSamplesPerSec ) {
\r
4096 if ( deviceFormat->nSamplesPerSec % SAMPLE_RATES[i] == 0 ) {
\r
4097 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4101 if ( SAMPLE_RATES[i] % deviceFormat->nSamplesPerSec == 0 ) {
\r
4102 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4108 info.nativeFormats = 0;
\r
4110 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4111 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4112 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4114 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4115 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4117 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4118 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4121 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4122 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4123 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4125 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4126 info.nativeFormats |= RTAUDIO_SINT8;
\r
4128 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4129 info.nativeFormats |= RTAUDIO_SINT16;
\r
4131 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4132 info.nativeFormats |= RTAUDIO_SINT24;
\r
4134 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4135 info.nativeFormats |= RTAUDIO_SINT32;
\r
4140 info.probed = true;
\r
4143 // release all references
\r
4144 PropVariantClear( &deviceNameProp );
\r
4145 PropVariantClear( &defaultDeviceNameProp );
\r
4147 SAFE_RELEASE( captureDevices );
\r
4148 SAFE_RELEASE( renderDevices );
\r
4149 SAFE_RELEASE( devicePtr );
\r
4150 SAFE_RELEASE( defaultDevicePtr );
\r
4151 SAFE_RELEASE( audioClient );
\r
4152 SAFE_RELEASE( devicePropStore );
\r
4153 SAFE_RELEASE( defaultDevicePropStore );
\r
4155 CoTaskMemFree( deviceFormat );
\r
4156 CoTaskMemFree( closestMatchFormat );
\r
4158 if ( !errorText_.empty() )
\r
4159 error( errorType );
\r
4163 //-----------------------------------------------------------------------------
\r
4165 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4167 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4168 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4176 //-----------------------------------------------------------------------------
\r
4178 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4180 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4181 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4189 //-----------------------------------------------------------------------------
\r
4191 void RtApiWasapi::closeStream( void )
\r
4193 if ( stream_.state == STREAM_CLOSED ) {
\r
4194 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4195 error( RtAudioError::WARNING );
\r
4199 if ( stream_.state != STREAM_STOPPED )
\r
4202 // clean up stream memory
\r
4203 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4204 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4206 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4207 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4209 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4210 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4212 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4213 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4215 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4216 stream_.apiHandle = NULL;
\r
4218 for ( int i = 0; i < 2; i++ ) {
\r
4219 if ( stream_.userBuffer[i] ) {
\r
4220 free( stream_.userBuffer[i] );
\r
4221 stream_.userBuffer[i] = 0;
\r
4225 if ( stream_.deviceBuffer ) {
\r
4226 free( stream_.deviceBuffer );
\r
4227 stream_.deviceBuffer = 0;
\r
4230 // update stream state
\r
4231 stream_.state = STREAM_CLOSED;
\r
4234 //-----------------------------------------------------------------------------
\r
4236 void RtApiWasapi::startStream( void )
\r
4240 if ( stream_.state == STREAM_RUNNING ) {
\r
4241 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4242 error( RtAudioError::WARNING );
\r
4246 // update stream state
\r
4247 stream_.state = STREAM_RUNNING;
\r
4249 // create WASAPI stream thread
\r
4250 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4252 if ( !stream_.callbackInfo.thread ) {
\r
4253 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4254 error( RtAudioError::THREAD_ERROR );
\r
4257 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4258 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4262 //-----------------------------------------------------------------------------
\r
4264 void RtApiWasapi::stopStream( void )
\r
4268 if ( stream_.state == STREAM_STOPPED ) {
\r
4269 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4270 error( RtAudioError::WARNING );
\r
4274 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4275 stream_.state = STREAM_STOPPING;
\r
4277 // wait until stream thread is stopped
\r
4278 while( stream_.state != STREAM_STOPPED ) {
\r
4282 // Wait for the last buffer to play before stopping.
\r
4283 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4285 // stop capture client if applicable
\r
4286 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4287 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4288 if ( FAILED( hr ) ) {
\r
4289 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4290 error( RtAudioError::DRIVER_ERROR );
\r
4295 // stop render client if applicable
\r
4296 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4297 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4298 if ( FAILED( hr ) ) {
\r
4299 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4300 error( RtAudioError::DRIVER_ERROR );
\r
4305 // close thread handle
\r
4306 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4307 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4308 error( RtAudioError::THREAD_ERROR );
\r
4312 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4315 //-----------------------------------------------------------------------------
\r
4317 void RtApiWasapi::abortStream( void )
\r
4321 if ( stream_.state == STREAM_STOPPED ) {
\r
4322 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4323 error( RtAudioError::WARNING );
\r
4327 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4328 stream_.state = STREAM_STOPPING;
\r
4330 // wait until stream thread is stopped
\r
4331 while ( stream_.state != STREAM_STOPPED ) {
\r
4335 // stop capture client if applicable
\r
4336 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4337 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4338 if ( FAILED( hr ) ) {
\r
4339 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4340 error( RtAudioError::DRIVER_ERROR );
\r
4345 // stop render client if applicable
\r
4346 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4347 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4348 if ( FAILED( hr ) ) {
\r
4349 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4350 error( RtAudioError::DRIVER_ERROR );
\r
4355 // close thread handle
\r
4356 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4357 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4358 error( RtAudioError::THREAD_ERROR );
\r
4362 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4365 //-----------------------------------------------------------------------------
\r
4367 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4368 unsigned int firstChannel, unsigned int sampleRate,
\r
4369 RtAudioFormat format, unsigned int* bufferSize,
\r
4370 RtAudio::StreamOptions* options )
\r
4372 bool methodResult = FAILURE;
\r
4373 unsigned int captureDeviceCount = 0;
\r
4374 unsigned int renderDeviceCount = 0;
\r
4376 IMMDeviceCollection* captureDevices = NULL;
\r
4377 IMMDeviceCollection* renderDevices = NULL;
\r
4378 IMMDevice* devicePtr = NULL;
\r
4379 WAVEFORMATEX* deviceFormat = NULL;
\r
4380 unsigned int bufferBytes;
\r
4381 stream_.state = STREAM_STOPPED;
\r
4383 // create API Handle if not already created
\r
4384 if ( !stream_.apiHandle )
\r
4385 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4387 // Count capture devices
\r
4388 errorText_.clear();
\r
4389 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4390 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4391 if ( FAILED( hr ) ) {
\r
4392 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4396 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4397 if ( FAILED( hr ) ) {
\r
4398 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4402 // Count render devices
\r
4403 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4404 if ( FAILED( hr ) ) {
\r
4405 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4409 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4410 if ( FAILED( hr ) ) {
\r
4411 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4415 // validate device index
\r
4416 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4417 errorType = RtAudioError::INVALID_USE;
\r
4418 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4422 // determine whether index falls within capture or render devices
\r
4423 if ( device >= renderDeviceCount ) {
\r
4424 if ( mode != INPUT ) {
\r
4425 errorType = RtAudioError::INVALID_USE;
\r
4426 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4430 // retrieve captureAudioClient from devicePtr
\r
4431 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4433 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4434 if ( FAILED( hr ) ) {
\r
4435 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4439 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4440 NULL, ( void** ) &captureAudioClient );
\r
4441 if ( FAILED( hr ) ) {
\r
4442 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4446 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4447 if ( FAILED( hr ) ) {
\r
4448 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4452 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4453 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4456 if ( mode != OUTPUT ) {
\r
4457 errorType = RtAudioError::INVALID_USE;
\r
4458 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4462 // retrieve renderAudioClient from devicePtr
\r
4463 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4465 hr = renderDevices->Item( device, &devicePtr );
\r
4466 if ( FAILED( hr ) ) {
\r
4467 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4471 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4472 NULL, ( void** ) &renderAudioClient );
\r
4473 if ( FAILED( hr ) ) {
\r
4474 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4478 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4479 if ( FAILED( hr ) ) {
\r
4480 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4484 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4485 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4488 // fill stream data
\r
4489 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4490 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4491 stream_.mode = DUPLEX;
\r
4494 stream_.mode = mode;
\r
4497 stream_.device[mode] = device;
\r
4498 stream_.doByteSwap[mode] = false;
\r
4499 stream_.sampleRate = sampleRate;
\r
4500 stream_.bufferSize = *bufferSize;
\r
4501 stream_.nBuffers = 1;
\r
4502 stream_.nUserChannels[mode] = channels;
\r
4503 stream_.channelOffset[mode] = firstChannel;
\r
4504 stream_.userFormat = format;
\r
4505 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4507 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4508 stream_.userInterleaved = false;
\r
4510 stream_.userInterleaved = true;
\r
4511 stream_.deviceInterleaved[mode] = true;
\r
4513 // Set flags for buffer conversion.
\r
4514 stream_.doConvertBuffer[mode] = false;
\r
4515 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4516 stream_.doConvertBuffer[mode] = true;
\r
4517 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4518 stream_.nUserChannels[mode] > 1 )
\r
4519 stream_.doConvertBuffer[mode] = true;
\r
4521 if ( stream_.doConvertBuffer[mode] )
\r
4522 setConvertInfo( mode, 0 );
\r
4524 // Allocate necessary internal buffers
\r
4525 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4527 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4528 if ( !stream_.userBuffer[mode] ) {
\r
4529 errorType = RtAudioError::MEMORY_ERROR;
\r
4530 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4534 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4535 stream_.callbackInfo.priority = 15;
\r
4537 stream_.callbackInfo.priority = 0;
\r
4539 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4540 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4542 methodResult = SUCCESS;
\r
4546 SAFE_RELEASE( captureDevices );
\r
4547 SAFE_RELEASE( renderDevices );
\r
4548 SAFE_RELEASE( devicePtr );
\r
4549 CoTaskMemFree( deviceFormat );
\r
4551 // if method failed, close the stream
\r
4552 if ( methodResult == FAILURE )
\r
4555 if ( !errorText_.empty() )
\r
4556 error( errorType );
\r
4557 return methodResult;
\r
4560 //=============================================================================
\r
4562 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4565 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4570 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4573 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4578 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4581 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4586 //-----------------------------------------------------------------------------
\r
4588 void RtApiWasapi::wasapiThread()
\r
4590 // as this is a new thread, we must CoInitialize it
\r
4591 CoInitialize( NULL );
\r
4595 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4596 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4597 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4598 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4599 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4600 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4602 WAVEFORMATEX* captureFormat = NULL;
\r
4603 WAVEFORMATEX* renderFormat = NULL;
\r
4604 float captureSrRatio = 0.0f;
\r
4605 float renderSrRatio = 0.0f;
\r
4606 WasapiBuffer captureBuffer;
\r
4607 WasapiBuffer renderBuffer;
\r
4609 // declare local stream variables
\r
4610 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4611 BYTE* streamBuffer = NULL;
\r
4612 unsigned long captureFlags = 0;
\r
4613 unsigned int bufferFrameCount = 0;
\r
4614 unsigned int numFramesPadding = 0;
\r
4615 unsigned int convBufferSize = 0;
\r
4616 bool callbackPushed = false;
\r
4617 bool callbackPulled = false;
\r
4618 bool callbackStopped = false;
\r
4619 int callbackResult = 0;
\r
4621 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4622 char* convBuffer = NULL;
\r
4623 unsigned int deviceBufferSize = 0;
\r
4625 errorText_.clear();
\r
4626 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4628 // Attempt to assign "Pro Audio" characteristic to thread
\r
4629 HMODULE AvrtDll = LoadLibrary( "AVRT.dll" );
\r
4631 DWORD taskIndex = 0;
\r
4632 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4633 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4634 FreeLibrary( AvrtDll );
\r
4637 // start capture stream if applicable
\r
4638 if ( captureAudioClient ) {
\r
4639 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4640 if ( FAILED( hr ) ) {
\r
4641 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4645 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4647 // initialize capture stream according to desire buffer size
\r
4648 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4649 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4651 if ( !captureClient ) {
\r
4652 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4653 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4654 desiredBufferPeriod,
\r
4655 desiredBufferPeriod,
\r
4658 if ( FAILED( hr ) ) {
\r
4659 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4663 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4664 ( void** ) &captureClient );
\r
4665 if ( FAILED( hr ) ) {
\r
4666 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4670 // configure captureEvent to trigger on every available capture buffer
\r
4671 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4672 if ( !captureEvent ) {
\r
4673 errorType = RtAudioError::SYSTEM_ERROR;
\r
4674 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4678 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4679 if ( FAILED( hr ) ) {
\r
4680 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4684 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4685 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4688 unsigned int inBufferSize = 0;
\r
4689 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4690 if ( FAILED( hr ) ) {
\r
4691 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4695 // scale outBufferSize according to stream->user sample rate ratio
\r
4696 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4697 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4699 // set captureBuffer size
\r
4700 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4702 // reset the capture stream
\r
4703 hr = captureAudioClient->Reset();
\r
4704 if ( FAILED( hr ) ) {
\r
4705 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4709 // start the capture stream
\r
4710 hr = captureAudioClient->Start();
\r
4711 if ( FAILED( hr ) ) {
\r
4712 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4717 // start render stream if applicable
\r
4718 if ( renderAudioClient ) {
\r
4719 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4720 if ( FAILED( hr ) ) {
\r
4721 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4725 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4727 // initialize render stream according to desire buffer size
\r
4728 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4729 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4731 if ( !renderClient ) {
\r
4732 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4733 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4734 desiredBufferPeriod,
\r
4735 desiredBufferPeriod,
\r
4738 if ( FAILED( hr ) ) {
\r
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4743 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4744 ( void** ) &renderClient );
\r
4745 if ( FAILED( hr ) ) {
\r
4746 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4750 // configure renderEvent to trigger on every available render buffer
\r
4751 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4752 if ( !renderEvent ) {
\r
4753 errorType = RtAudioError::SYSTEM_ERROR;
\r
4754 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4758 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4759 if ( FAILED( hr ) ) {
\r
4760 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4764 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4765 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4768 unsigned int outBufferSize = 0;
\r
4769 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4770 if ( FAILED( hr ) ) {
\r
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4775 // scale inBufferSize according to user->stream sample rate ratio
\r
4776 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4777 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4779 // set renderBuffer size
\r
4780 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4782 // reset the render stream
\r
4783 hr = renderAudioClient->Reset();
\r
4784 if ( FAILED( hr ) ) {
\r
4785 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4789 // start the render stream
\r
4790 hr = renderAudioClient->Start();
\r
4791 if ( FAILED( hr ) ) {
\r
4792 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4797 if ( stream_.mode == INPUT ) {
\r
4798 deviceBufferSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4800 else if ( stream_.mode == OUTPUT ) {
\r
4801 deviceBufferSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4803 else if ( stream_.mode == DUPLEX ) {
\r
4804 deviceBufferSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4805 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4808 convBuffer = ( char* ) malloc( deviceBufferSize );
\r
4809 stream_.deviceBuffer = ( char* ) malloc( deviceBufferSize );
\r
4810 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4811 errorType = RtAudioError::MEMORY_ERROR;
\r
4812 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4816 // stream process loop
\r
4817 while ( stream_.state != STREAM_STOPPING ) {
\r
4818 if ( !callbackPulled ) {
\r
4821 // 1. Pull callback buffer from inputBuffer
\r
4822 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4823 // Convert callback buffer to user format
\r
4825 if ( captureAudioClient ) {
\r
4826 // Pull callback buffer from inputBuffer
\r
4827 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4828 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4829 stream_.deviceFormat[INPUT] );
\r
4831 if ( callbackPulled ) {
\r
4832 // Convert callback buffer to user sample rate and channel count
\r
4833 convertBufferWasapi( stream_.deviceBuffer,
\r
4835 stream_.nDeviceChannels[INPUT],
\r
4836 stream_.nUserChannels[INPUT],
\r
4837 captureFormat->nSamplesPerSec,
\r
4838 stream_.sampleRate,
\r
4839 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4841 stream_.deviceFormat[INPUT] );
\r
4843 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4844 // Convert callback buffer to user format
\r
4845 convertBuffer( stream_.userBuffer[INPUT],
\r
4846 stream_.deviceBuffer,
\r
4847 stream_.convertInfo[INPUT] );
\r
4850 // no conversion, simple copy deviceBuffer to userBuffer
\r
4851 memcpy( stream_.userBuffer[INPUT],
\r
4852 stream_.deviceBuffer,
\r
4853 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4858 // if there is no capture stream, set callbackPulled flag
\r
4859 callbackPulled = true;
\r
4862 // Execute Callback
\r
4863 // ================
\r
4864 // 1. Execute user callback method
\r
4865 // 2. Handle return value from callback
\r
4867 // if callback has not requested the stream to stop
\r
4868 if ( callbackPulled && !callbackStopped ) {
\r
4869 // Execute user callback method
\r
4870 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4871 stream_.userBuffer[INPUT],
\r
4872 stream_.bufferSize,
\r
4874 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4875 stream_.callbackInfo.userData );
\r
4877 // Handle return value from callback
\r
4878 if ( callbackResult == 1 ) {
\r
4879 // instantiate a thread to stop this thread
\r
4880 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4881 if ( !threadHandle ) {
\r
4882 errorType = RtAudioError::THREAD_ERROR;
\r
4883 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4886 else if ( !CloseHandle( threadHandle ) ) {
\r
4887 errorType = RtAudioError::THREAD_ERROR;
\r
4888 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4892 callbackStopped = true;
\r
4894 else if ( callbackResult == 2 ) {
\r
4895 // instantiate a thread to stop this thread
\r
4896 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4897 if ( !threadHandle ) {
\r
4898 errorType = RtAudioError::THREAD_ERROR;
\r
4899 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4902 else if ( !CloseHandle( threadHandle ) ) {
\r
4903 errorType = RtAudioError::THREAD_ERROR;
\r
4904 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4908 callbackStopped = true;
\r
4913 // Callback Output
\r
4914 // ===============
\r
4915 // 1. Convert callback buffer to stream format
\r
4916 // 2. Convert callback buffer to stream sample rate and channel count
\r
4917 // 3. Push callback buffer into outputBuffer
\r
4919 if ( renderAudioClient && callbackPulled ) {
\r
4920 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4921 // Convert callback buffer to stream format
\r
4922 convertBuffer( stream_.deviceBuffer,
\r
4923 stream_.userBuffer[OUTPUT],
\r
4924 stream_.convertInfo[OUTPUT] );
\r
4926 // Convert callback buffer to stream sample rate and channel count
\r
4927 convertBufferWasapi( convBuffer,
\r
4928 stream_.deviceBuffer,
\r
4929 stream_.nUserChannels[OUTPUT],
\r
4930 stream_.nDeviceChannels[OUTPUT],
\r
4931 stream_.sampleRate,
\r
4932 renderFormat->nSamplesPerSec,
\r
4933 stream_.bufferSize,
\r
4935 stream_.deviceFormat[OUTPUT] );
\r
4938 // Convert callback buffer to stream sample rate and channel count
\r
4939 convertBufferWasapi( convBuffer,
\r
4940 stream_.userBuffer[OUTPUT],
\r
4941 stream_.nUserChannels[OUTPUT],
\r
4942 stream_.nDeviceChannels[OUTPUT],
\r
4943 stream_.sampleRate,
\r
4944 renderFormat->nSamplesPerSec,
\r
4945 stream_.bufferSize,
\r
4947 stream_.deviceFormat[OUTPUT] );
\r
4950 // Push callback buffer into outputBuffer
\r
4951 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4952 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4953 stream_.deviceFormat[OUTPUT] );
\r
4958 // 1. Get capture buffer from stream
\r
4959 // 2. Push capture buffer into inputBuffer
\r
4960 // 3. If 2. was successful: Release capture buffer
\r
4962 if ( captureAudioClient ) {
\r
4963 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4964 if ( !callbackPulled ) {
\r
4965 WaitForSingleObject( captureEvent, INFINITE );
\r
4968 // Get capture buffer from stream
\r
4969 hr = captureClient->GetBuffer( &streamBuffer,
\r
4970 &bufferFrameCount,
\r
4971 &captureFlags, NULL, NULL );
\r
4972 if ( FAILED( hr ) ) {
\r
4973 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4977 if ( bufferFrameCount != 0 ) {
\r
4978 // Push capture buffer into inputBuffer
\r
4979 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4980 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4981 stream_.deviceFormat[INPUT] ) )
\r
4983 // Release capture buffer
\r
4984 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4985 if ( FAILED( hr ) ) {
\r
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4992 // Inform WASAPI that capture was unsuccessful
\r
4993 hr = captureClient->ReleaseBuffer( 0 );
\r
4994 if ( FAILED( hr ) ) {
\r
4995 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5002 // Inform WASAPI that capture was unsuccessful
\r
5003 hr = captureClient->ReleaseBuffer( 0 );
\r
5004 if ( FAILED( hr ) ) {
\r
5005 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5013 // 1. Get render buffer from stream
\r
5014 // 2. Pull next buffer from outputBuffer
\r
5015 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5016 // Release render buffer
\r
5018 if ( renderAudioClient ) {
\r
5019 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5020 if ( callbackPulled && !callbackPushed ) {
\r
5021 WaitForSingleObject( renderEvent, INFINITE );
\r
5024 // Get render buffer from stream
\r
5025 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5026 if ( FAILED( hr ) ) {
\r
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5031 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5032 if ( FAILED( hr ) ) {
\r
5033 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5037 bufferFrameCount -= numFramesPadding;
\r
5039 if ( bufferFrameCount != 0 ) {
\r
5040 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5041 if ( FAILED( hr ) ) {
\r
5042 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5046 // Pull next buffer from outputBuffer
\r
5047 // Fill render buffer with next buffer
\r
5048 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5049 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5050 stream_.deviceFormat[OUTPUT] ) )
\r
5052 // Release render buffer
\r
5053 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5054 if ( FAILED( hr ) ) {
\r
5055 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5061 // Inform WASAPI that render was unsuccessful
\r
5062 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5063 if ( FAILED( hr ) ) {
\r
5064 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5071 // Inform WASAPI that render was unsuccessful
\r
5072 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5073 if ( FAILED( hr ) ) {
\r
5074 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5080 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5081 if ( callbackPushed ) {
\r
5082 callbackPulled = false;
\r
5085 // tick stream time
\r
5086 RtApi::tickStreamTime();
\r
5091 CoTaskMemFree( captureFormat );
\r
5092 CoTaskMemFree( renderFormat );
\r
5094 //delete convBuffer;
\r
5095 free ( convBuffer );
\r
5099 // update stream state
\r
5100 stream_.state = STREAM_STOPPED;
\r
5102 if ( errorText_.empty() )
\r
5105 error( errorType );
\r
5108 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5112 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5114 // Modified by Robin Davies, October 2005
\r
5115 // - Improvements to DirectX pointer chasing.
\r
5116 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5117 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5118 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5119 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5121 #include <dsound.h>
\r
5122 #include <assert.h>
\r
5123 #include <algorithm>
\r
5125 #if defined(__MINGW32__)
\r
5126 // missing from latest mingw winapi
\r
5127 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5128 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5129 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5130 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5133 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5135 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5136 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5139 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5141 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5142 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5143 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5144 return pointer >= earlierPointer && pointer < laterPointer;
\r
5147 // A structure to hold various information related to the DirectSound
\r
5148 // API implementation.
\r
5150 unsigned int drainCounter; // Tracks callback counts when draining
\r
5151 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5155 UINT bufferPointer[2];
\r
5156 DWORD dsBufferSize[2];
\r
5157 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5161 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5164 // Declarations for utility functions, callbacks, and structures
\r
5165 // specific to the DirectSound implementation.
\r
5166 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5167 LPCTSTR description,
\r
5169 LPVOID lpContext );
\r
5171 static const char* getErrorString( int code );
\r
5173 static unsigned __stdcall callbackHandler( void *ptr );
\r
5182 : found(false) { validId[0] = false; validId[1] = false; }
\r
5185 struct DsProbeData {
\r
5187 std::vector<struct DsDevice>* dsDevices;
\r
5190 RtApiDs :: RtApiDs()
\r
5192 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5193 // accept whatever the mainline chose for a threading model.
\r
5194 coInitialized_ = false;
\r
5195 HRESULT hr = CoInitialize( NULL );
\r
5196 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5199 RtApiDs :: ~RtApiDs()
\r
5201 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5202 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5205 // The DirectSound default output is always the first device.
\r
5206 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5211 // The DirectSound default input is always the first input device,
\r
5212 // which is the first capture device enumerated.
\r
5213 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5218 unsigned int RtApiDs :: getDeviceCount( void )
\r
5220 // Set query flag for previously found devices to false, so that we
\r
5221 // can check for any devices that have disappeared.
\r
5222 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5223 dsDevices[i].found = false;
\r
5225 // Query DirectSound devices.
\r
5226 struct DsProbeData probeInfo;
\r
5227 probeInfo.isInput = false;
\r
5228 probeInfo.dsDevices = &dsDevices;
\r
5229 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5230 if ( FAILED( result ) ) {
\r
5231 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5232 errorText_ = errorStream_.str();
\r
5233 error( RtAudioError::WARNING );
\r
5236 // Query DirectSoundCapture devices.
\r
5237 probeInfo.isInput = true;
\r
5238 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5239 if ( FAILED( result ) ) {
\r
5240 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5241 errorText_ = errorStream_.str();
\r
5242 error( RtAudioError::WARNING );
\r
5245 // Clean out any devices that may have disappeared.
\r
5246 std::vector< int > indices;
\r
5247 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5248 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5249 //unsigned int nErased = 0;
\r
5250 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5251 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5252 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5254 return static_cast<unsigned int>(dsDevices.size());
\r
5257 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5259 RtAudio::DeviceInfo info;
\r
5260 info.probed = false;
\r
5262 if ( dsDevices.size() == 0 ) {
\r
5263 // Force a query of all devices
\r
5265 if ( dsDevices.size() == 0 ) {
\r
5266 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5267 error( RtAudioError::INVALID_USE );
\r
5272 if ( device >= dsDevices.size() ) {
\r
5273 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5274 error( RtAudioError::INVALID_USE );
\r
5279 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5281 LPDIRECTSOUND output;
\r
5283 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5284 if ( FAILED( result ) ) {
\r
5285 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5286 errorText_ = errorStream_.str();
\r
5287 error( RtAudioError::WARNING );
\r
5291 outCaps.dwSize = sizeof( outCaps );
\r
5292 result = output->GetCaps( &outCaps );
\r
5293 if ( FAILED( result ) ) {
\r
5294 output->Release();
\r
5295 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5296 errorText_ = errorStream_.str();
\r
5297 error( RtAudioError::WARNING );
\r
5301 // Get output channel information.
\r
5302 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5304 // Get sample rate information.
\r
5305 info.sampleRates.clear();
\r
5306 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5307 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5308 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5309 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5312 // Get format information.
\r
5313 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5314 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5316 output->Release();
\r
5318 if ( getDefaultOutputDevice() == device )
\r
5319 info.isDefaultOutput = true;
\r
5321 if ( dsDevices[ device ].validId[1] == false ) {
\r
5322 info.name = dsDevices[ device ].name;
\r
5323 info.probed = true;
\r
5329 LPDIRECTSOUNDCAPTURE input;
\r
5330 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5331 if ( FAILED( result ) ) {
\r
5332 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5333 errorText_ = errorStream_.str();
\r
5334 error( RtAudioError::WARNING );
\r
5339 inCaps.dwSize = sizeof( inCaps );
\r
5340 result = input->GetCaps( &inCaps );
\r
5341 if ( FAILED( result ) ) {
\r
5343 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5344 errorText_ = errorStream_.str();
\r
5345 error( RtAudioError::WARNING );
\r
5349 // Get input channel information.
\r
5350 info.inputChannels = inCaps.dwChannels;
\r
5352 // Get sample rate and format information.
\r
5353 std::vector<unsigned int> rates;
\r
5354 if ( inCaps.dwChannels >= 2 ) {
\r
5355 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5362 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5364 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5370 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5374 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5377 else if ( inCaps.dwChannels == 1 ) {
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5387 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5391 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5393 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5397 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5400 else info.inputChannels = 0; // technically, this would be an error
\r
5404 if ( info.inputChannels == 0 ) return info;
\r
5406 // Copy the supported rates to the info structure but avoid duplication.
\r
5408 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5410 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5411 if ( rates[i] == info.sampleRates[j] ) {
\r
5416 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5418 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5420 // If device opens for both playback and capture, we determine the channels.
\r
5421 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5422 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5424 if ( device == 0 ) info.isDefaultInput = true;
\r
5426 // Copy name and return.
\r
5427 info.name = dsDevices[ device ].name;
\r
5428 info.probed = true;
\r
5432 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5433 unsigned int firstChannel, unsigned int sampleRate,
\r
5434 RtAudioFormat format, unsigned int *bufferSize,
\r
5435 RtAudio::StreamOptions *options )
\r
5437 if ( channels + firstChannel > 2 ) {
\r
5438 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5442 size_t nDevices = dsDevices.size();
\r
5443 if ( nDevices == 0 ) {
\r
5444 // This should not happen because a check is made before this function is called.
\r
5445 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5449 if ( device >= nDevices ) {
\r
5450 // This should not happen because a check is made before this function is called.
\r
5451 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5455 if ( mode == OUTPUT ) {
\r
5456 if ( dsDevices[ device ].validId[0] == false ) {
\r
5457 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5458 errorText_ = errorStream_.str();
\r
5462 else { // mode == INPUT
\r
5463 if ( dsDevices[ device ].validId[1] == false ) {
\r
5464 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5465 errorText_ = errorStream_.str();
\r
5470 // According to a note in PortAudio, using GetDesktopWindow()
\r
5471 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5472 // that occur when the application's window is not the foreground
\r
5473 // window. Also, if the application window closes before the
\r
5474 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5475 // problems when using GetDesktopWindow() but it seems fine now
\r
5476 // (January 2010). I'll leave it commented here.
\r
5477 // HWND hWnd = GetForegroundWindow();
\r
5478 HWND hWnd = GetDesktopWindow();
\r
5480 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5481 // two. This is a judgement call and a value of two is probably too
\r
5482 // low for capture, but it should work for playback.
\r
5484 if ( options ) nBuffers = options->numberOfBuffers;
\r
5485 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5486 if ( nBuffers < 2 ) nBuffers = 3;
\r
5488 // Check the lower range of the user-specified buffer size and set
\r
5489 // (arbitrarily) to a lower bound of 32.
\r
5490 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5492 // Create the wave format structure. The data format setting will
\r
5493 // be determined later.
\r
5494 WAVEFORMATEX waveFormat;
\r
5495 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5496 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5497 waveFormat.nChannels = channels + firstChannel;
\r
5498 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5500 // Determine the device buffer size. By default, we'll use the value
\r
5501 // defined above (32K), but we will grow it to make allowances for
\r
5502 // very large software buffer sizes.
\r
5503 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5504 DWORD dsPointerLeadTime = 0;
\r
5506 void *ohandle = 0, *bhandle = 0;
\r
5508 if ( mode == OUTPUT ) {
\r
5510 LPDIRECTSOUND output;
\r
5511 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5512 if ( FAILED( result ) ) {
\r
5513 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5514 errorText_ = errorStream_.str();
\r
5519 outCaps.dwSize = sizeof( outCaps );
\r
5520 result = output->GetCaps( &outCaps );
\r
5521 if ( FAILED( result ) ) {
\r
5522 output->Release();
\r
5523 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5524 errorText_ = errorStream_.str();
\r
5528 // Check channel information.
\r
5529 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5530 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5531 errorText_ = errorStream_.str();
\r
5535 // Check format information. Use 16-bit format unless not
\r
5536 // supported or user requests 8-bit.
\r
5537 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5538 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5539 waveFormat.wBitsPerSample = 16;
\r
5540 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5543 waveFormat.wBitsPerSample = 8;
\r
5544 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5546 stream_.userFormat = format;
\r
5548 // Update wave format structure and buffer information.
\r
5549 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5550 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5551 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5553 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5554 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5555 dsBufferSize *= 2;
\r
5557 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5558 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5559 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5560 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5561 if ( FAILED( result ) ) {
\r
5562 output->Release();
\r
5563 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5564 errorText_ = errorStream_.str();
\r
5568 // Even though we will write to the secondary buffer, we need to
\r
5569 // access the primary buffer to set the correct output format
\r
5570 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5571 // buffer description.
\r
5572 DSBUFFERDESC bufferDescription;
\r
5573 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5574 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5575 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5577 // Obtain the primary buffer
\r
5578 LPDIRECTSOUNDBUFFER buffer;
\r
5579 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5580 if ( FAILED( result ) ) {
\r
5581 output->Release();
\r
5582 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5583 errorText_ = errorStream_.str();
\r
5587 // Set the primary DS buffer sound format.
\r
5588 result = buffer->SetFormat( &waveFormat );
\r
5589 if ( FAILED( result ) ) {
\r
5590 output->Release();
\r
5591 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5592 errorText_ = errorStream_.str();
\r
5596 // Setup the secondary DS buffer description.
\r
5597 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5598 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5599 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5600 DSBCAPS_GLOBALFOCUS |
\r
5601 DSBCAPS_GETCURRENTPOSITION2 |
\r
5602 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5603 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5604 bufferDescription.lpwfxFormat = &waveFormat;
\r
5606 // Try to create the secondary DS buffer. If that doesn't work,
\r
5607 // try to use software mixing. Otherwise, there's a problem.
\r
5608 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5609 if ( FAILED( result ) ) {
\r
5610 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5611 DSBCAPS_GLOBALFOCUS |
\r
5612 DSBCAPS_GETCURRENTPOSITION2 |
\r
5613 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5614 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5615 if ( FAILED( result ) ) {
\r
5616 output->Release();
\r
5617 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5618 errorText_ = errorStream_.str();
\r
5623 // Get the buffer size ... might be different from what we specified.
\r
5625 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5626 result = buffer->GetCaps( &dsbcaps );
\r
5627 if ( FAILED( result ) ) {
\r
5628 output->Release();
\r
5629 buffer->Release();
\r
5630 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5631 errorText_ = errorStream_.str();
\r
5635 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5637 // Lock the DS buffer
\r
5640 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5641 if ( FAILED( result ) ) {
\r
5642 output->Release();
\r
5643 buffer->Release();
\r
5644 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5645 errorText_ = errorStream_.str();
\r
5649 // Zero the DS buffer
\r
5650 ZeroMemory( audioPtr, dataLen );
\r
5652 // Unlock the DS buffer
\r
5653 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5654 if ( FAILED( result ) ) {
\r
5655 output->Release();
\r
5656 buffer->Release();
\r
5657 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5658 errorText_ = errorStream_.str();
\r
5662 ohandle = (void *) output;
\r
5663 bhandle = (void *) buffer;
\r
5666 if ( mode == INPUT ) {
\r
5668 LPDIRECTSOUNDCAPTURE input;
\r
5669 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5670 if ( FAILED( result ) ) {
\r
5671 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5672 errorText_ = errorStream_.str();
\r
5677 inCaps.dwSize = sizeof( inCaps );
\r
5678 result = input->GetCaps( &inCaps );
\r
5679 if ( FAILED( result ) ) {
\r
5681 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5682 errorText_ = errorStream_.str();
\r
5686 // Check channel information.
\r
5687 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5688 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5692 // Check format information. Use 16-bit format unless user
\r
5693 // requests 8-bit.
\r
5694 DWORD deviceFormats;
\r
5695 if ( channels + firstChannel == 2 ) {
\r
5696 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5697 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5698 waveFormat.wBitsPerSample = 8;
\r
5699 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5701 else { // assume 16-bit is supported
\r
5702 waveFormat.wBitsPerSample = 16;
\r
5703 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5706 else { // channel == 1
\r
5707 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5708 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5709 waveFormat.wBitsPerSample = 8;
\r
5710 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5712 else { // assume 16-bit is supported
\r
5713 waveFormat.wBitsPerSample = 16;
\r
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5717 stream_.userFormat = format;
\r
5719 // Update wave format structure and buffer information.
\r
5720 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5721 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5722 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5724 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5725 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5726 dsBufferSize *= 2;
\r
5728 // Setup the secondary DS buffer description.
\r
5729 DSCBUFFERDESC bufferDescription;
\r
5730 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5731 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5732 bufferDescription.dwFlags = 0;
\r
5733 bufferDescription.dwReserved = 0;
\r
5734 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5735 bufferDescription.lpwfxFormat = &waveFormat;
\r
5737 // Create the capture buffer.
\r
5738 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5739 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5740 if ( FAILED( result ) ) {
\r
5742 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5743 errorText_ = errorStream_.str();
\r
5747 // Get the buffer size ... might be different from what we specified.
\r
5748 DSCBCAPS dscbcaps;
\r
5749 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5750 result = buffer->GetCaps( &dscbcaps );
\r
5751 if ( FAILED( result ) ) {
\r
5753 buffer->Release();
\r
5754 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5755 errorText_ = errorStream_.str();
\r
5759 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5761 // NOTE: We could have a problem here if this is a duplex stream
\r
5762 // and the play and capture hardware buffer sizes are different
\r
5763 // (I'm actually not sure if that is a problem or not).
\r
5764 // Currently, we are not verifying that.
\r
5766 // Lock the capture buffer
\r
5769 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5770 if ( FAILED( result ) ) {
\r
5772 buffer->Release();
\r
5773 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5774 errorText_ = errorStream_.str();
\r
5778 // Zero the buffer
\r
5779 ZeroMemory( audioPtr, dataLen );
\r
5781 // Unlock the buffer
\r
5782 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5783 if ( FAILED( result ) ) {
\r
5785 buffer->Release();
\r
5786 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5787 errorText_ = errorStream_.str();
\r
5791 ohandle = (void *) input;
\r
5792 bhandle = (void *) buffer;
\r
5795 // Set various stream parameters
\r
5796 DsHandle *handle = 0;
\r
5797 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5798 stream_.nUserChannels[mode] = channels;
\r
5799 stream_.bufferSize = *bufferSize;
\r
5800 stream_.channelOffset[mode] = firstChannel;
\r
5801 stream_.deviceInterleaved[mode] = true;
\r
5802 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5803 else stream_.userInterleaved = true;
\r
5805 // Set flag for buffer conversion
\r
5806 stream_.doConvertBuffer[mode] = false;
\r
5807 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5808 stream_.doConvertBuffer[mode] = true;
\r
5809 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5810 stream_.doConvertBuffer[mode] = true;
\r
5811 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5812 stream_.nUserChannels[mode] > 1 )
\r
5813 stream_.doConvertBuffer[mode] = true;
\r
5815 // Allocate necessary internal buffers
\r
5816 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5817 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5818 if ( stream_.userBuffer[mode] == NULL ) {
\r
5819 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5823 if ( stream_.doConvertBuffer[mode] ) {
\r
5825 bool makeBuffer = true;
\r
5826 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5827 if ( mode == INPUT ) {
\r
5828 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5829 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5830 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5834 if ( makeBuffer ) {
\r
5835 bufferBytes *= *bufferSize;
\r
5836 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5837 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5838 if ( stream_.deviceBuffer == NULL ) {
\r
5839 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5845 // Allocate our DsHandle structures for the stream.
\r
5846 if ( stream_.apiHandle == 0 ) {
\r
5848 handle = new DsHandle;
\r
5850 catch ( std::bad_alloc& ) {
\r
5851 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5855 // Create a manual-reset event.
\r
5856 handle->condition = CreateEvent( NULL, // no security
\r
5857 TRUE, // manual-reset
\r
5858 FALSE, // non-signaled initially
\r
5859 NULL ); // unnamed
\r
5860 stream_.apiHandle = (void *) handle;
\r
5863 handle = (DsHandle *) stream_.apiHandle;
\r
5864 handle->id[mode] = ohandle;
\r
5865 handle->buffer[mode] = bhandle;
\r
5866 handle->dsBufferSize[mode] = dsBufferSize;
\r
5867 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5869 stream_.device[mode] = device;
\r
5870 stream_.state = STREAM_STOPPED;
\r
5871 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5872 // We had already set up an output stream.
\r
5873 stream_.mode = DUPLEX;
\r
5875 stream_.mode = mode;
\r
5876 stream_.nBuffers = nBuffers;
\r
5877 stream_.sampleRate = sampleRate;
\r
5879 // Setup the buffer conversion information structure.
\r
5880 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5882 // Setup the callback thread.
\r
5883 if ( stream_.callbackInfo.isRunning == false ) {
\r
5884 unsigned threadId;
\r
5885 stream_.callbackInfo.isRunning = true;
\r
5886 stream_.callbackInfo.object = (void *) this;
\r
5887 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5888 &stream_.callbackInfo, 0, &threadId );
\r
5889 if ( stream_.callbackInfo.thread == 0 ) {
\r
5890 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5894 // Boost DS thread priority
\r
5895 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5901 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5902 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5903 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5904 if ( buffer ) buffer->Release();
\r
5905 object->Release();
\r
5907 if ( handle->buffer[1] ) {
\r
5908 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5909 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5910 if ( buffer ) buffer->Release();
\r
5911 object->Release();
\r
5913 CloseHandle( handle->condition );
\r
5915 stream_.apiHandle = 0;
\r
5918 for ( int i=0; i<2; i++ ) {
\r
5919 if ( stream_.userBuffer[i] ) {
\r
5920 free( stream_.userBuffer[i] );
\r
5921 stream_.userBuffer[i] = 0;
\r
5925 if ( stream_.deviceBuffer ) {
\r
5926 free( stream_.deviceBuffer );
\r
5927 stream_.deviceBuffer = 0;
\r
5930 stream_.state = STREAM_CLOSED;
\r
5934 void RtApiDs :: closeStream()
\r
5936 if ( stream_.state == STREAM_CLOSED ) {
\r
5937 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5938 error( RtAudioError::WARNING );
\r
5942 // Stop the callback thread.
\r
5943 stream_.callbackInfo.isRunning = false;
\r
5944 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5945 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5947 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5949 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5950 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5951 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5954 buffer->Release();
\r
5956 object->Release();
\r
5958 if ( handle->buffer[1] ) {
\r
5959 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5960 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5963 buffer->Release();
\r
5965 object->Release();
\r
5967 CloseHandle( handle->condition );
\r
5969 stream_.apiHandle = 0;
\r
5972 for ( int i=0; i<2; i++ ) {
\r
5973 if ( stream_.userBuffer[i] ) {
\r
5974 free( stream_.userBuffer[i] );
\r
5975 stream_.userBuffer[i] = 0;
\r
5979 if ( stream_.deviceBuffer ) {
\r
5980 free( stream_.deviceBuffer );
\r
5981 stream_.deviceBuffer = 0;
\r
5984 stream_.mode = UNINITIALIZED;
\r
5985 stream_.state = STREAM_CLOSED;
\r
5988 void RtApiDs :: startStream()
\r
5991 if ( stream_.state == STREAM_RUNNING ) {
\r
5992 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5993 error( RtAudioError::WARNING );
\r
5997 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5999 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6000 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6001 // this is already in effect.
\r
6002 timeBeginPeriod( 1 );
\r
6004 buffersRolling = false;
\r
6005 duplexPrerollBytes = 0;
\r
6007 if ( stream_.mode == DUPLEX ) {
\r
6008 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6009 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6012 HRESULT result = 0;
\r
6013 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6015 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6016 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6017 if ( FAILED( result ) ) {
\r
6018 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6019 errorText_ = errorStream_.str();
\r
6024 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6026 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6027 result = buffer->Start( DSCBSTART_LOOPING );
\r
6028 if ( FAILED( result ) ) {
\r
6029 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6030 errorText_ = errorStream_.str();
\r
6035 handle->drainCounter = 0;
\r
6036 handle->internalDrain = false;
\r
6037 ResetEvent( handle->condition );
\r
6038 stream_.state = STREAM_RUNNING;
\r
6041 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6044 void RtApiDs :: stopStream()
\r
6047 if ( stream_.state == STREAM_STOPPED ) {
\r
6048 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6049 error( RtAudioError::WARNING );
\r
6053 HRESULT result = 0;
\r
6056 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6057 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6058 if ( handle->drainCounter == 0 ) {
\r
6059 handle->drainCounter = 2;
\r
6060 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6063 stream_.state = STREAM_STOPPED;
\r
6065 // Stop the buffer and clear memory
\r
6066 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6067 result = buffer->Stop();
\r
6068 if ( FAILED( result ) ) {
\r
6069 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6070 errorText_ = errorStream_.str();
\r
6074 // Lock the buffer and clear it so that if we start to play again,
\r
6075 // we won't have old data playing.
\r
6076 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6077 if ( FAILED( result ) ) {
\r
6078 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6079 errorText_ = errorStream_.str();
\r
6083 // Zero the DS buffer
\r
6084 ZeroMemory( audioPtr, dataLen );
\r
6086 // Unlock the DS buffer
\r
6087 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6088 if ( FAILED( result ) ) {
\r
6089 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6090 errorText_ = errorStream_.str();
\r
6094 // If we start playing again, we must begin at beginning of buffer.
\r
6095 handle->bufferPointer[0] = 0;
\r
6098 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6099 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6103 stream_.state = STREAM_STOPPED;
\r
6105 result = buffer->Stop();
\r
6106 if ( FAILED( result ) ) {
\r
6107 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6108 errorText_ = errorStream_.str();
\r
6112 // Lock the buffer and clear it so that if we start to play again,
\r
6113 // we won't have old data playing.
\r
6114 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6115 if ( FAILED( result ) ) {
\r
6116 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6117 errorText_ = errorStream_.str();
\r
6121 // Zero the DS buffer
\r
6122 ZeroMemory( audioPtr, dataLen );
\r
6124 // Unlock the DS buffer
\r
6125 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6126 if ( FAILED( result ) ) {
\r
6127 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6128 errorText_ = errorStream_.str();
\r
6132 // If we start recording again, we must begin at beginning of buffer.
\r
6133 handle->bufferPointer[1] = 0;
\r
6137 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6138 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6141 void RtApiDs :: abortStream()
\r
6144 if ( stream_.state == STREAM_STOPPED ) {
\r
6145 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6146 error( RtAudioError::WARNING );
\r
6150 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6151 handle->drainCounter = 2;
\r
6156 void RtApiDs :: callbackEvent()
\r
6158 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6159 Sleep( 50 ); // sleep 50 milliseconds
\r
6163 if ( stream_.state == STREAM_CLOSED ) {
\r
6164 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6165 error( RtAudioError::WARNING );
\r
6169 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6170 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6172 // Check if we were draining the stream and signal is finished.
\r
6173 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6175 stream_.state = STREAM_STOPPING;
\r
6176 if ( handle->internalDrain == false )
\r
6177 SetEvent( handle->condition );
\r
6183 // Invoke user callback to get fresh output data UNLESS we are
\r
6184 // draining stream.
\r
6185 if ( handle->drainCounter == 0 ) {
\r
6186 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6187 double streamTime = getStreamTime();
\r
6188 RtAudioStreamStatus status = 0;
\r
6189 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6190 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6191 handle->xrun[0] = false;
\r
6193 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6194 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6195 handle->xrun[1] = false;
\r
6197 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6198 stream_.bufferSize, streamTime, status, info->userData );
\r
6199 if ( cbReturnValue == 2 ) {
\r
6200 stream_.state = STREAM_STOPPING;
\r
6201 handle->drainCounter = 2;
\r
6205 else if ( cbReturnValue == 1 ) {
\r
6206 handle->drainCounter = 1;
\r
6207 handle->internalDrain = true;
\r
6212 DWORD currentWritePointer, safeWritePointer;
\r
6213 DWORD currentReadPointer, safeReadPointer;
\r
6214 UINT nextWritePointer;
\r
6216 LPVOID buffer1 = NULL;
\r
6217 LPVOID buffer2 = NULL;
\r
6218 DWORD bufferSize1 = 0;
\r
6219 DWORD bufferSize2 = 0;
\r
6224 if ( buffersRolling == false ) {
\r
6225 if ( stream_.mode == DUPLEX ) {
\r
6226 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6228 // It takes a while for the devices to get rolling. As a result,
\r
6229 // there's no guarantee that the capture and write device pointers
\r
6230 // will move in lockstep. Wait here for both devices to start
\r
6231 // rolling, and then set our buffer pointers accordingly.
\r
6232 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6233 // bytes later than the write buffer.
\r
6235 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6236 // take place between the two GetCurrentPosition calls... but I'm
\r
6237 // really not sure how to solve the problem. Temporarily boost to
\r
6238 // Realtime priority, maybe; but I'm not sure what priority the
\r
6239 // DirectSound service threads run at. We *should* be roughly
\r
6240 // within a ms or so of correct.
\r
6242 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6243 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6245 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6247 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6248 if ( FAILED( result ) ) {
\r
6249 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6250 errorText_ = errorStream_.str();
\r
6251 error( RtAudioError::SYSTEM_ERROR );
\r
6254 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6255 if ( FAILED( result ) ) {
\r
6256 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6257 errorText_ = errorStream_.str();
\r
6258 error( RtAudioError::SYSTEM_ERROR );
\r
6262 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6263 if ( FAILED( result ) ) {
\r
6264 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6265 errorText_ = errorStream_.str();
\r
6266 error( RtAudioError::SYSTEM_ERROR );
\r
6269 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6270 if ( FAILED( result ) ) {
\r
6271 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6272 errorText_ = errorStream_.str();
\r
6273 error( RtAudioError::SYSTEM_ERROR );
\r
6276 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6280 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6282 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6283 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6284 handle->bufferPointer[1] = safeReadPointer;
\r
6286 else if ( stream_.mode == OUTPUT ) {
\r
6288 // Set the proper nextWritePosition after initial startup.
\r
6289 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6290 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6291 if ( FAILED( result ) ) {
\r
6292 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6293 errorText_ = errorStream_.str();
\r
6294 error( RtAudioError::SYSTEM_ERROR );
\r
6297 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6298 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6301 buffersRolling = true;
\r
6304 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6306 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6308 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6309 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6310 bufferBytes *= formatBytes( stream_.userFormat );
\r
6311 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6314 // Setup parameters and do buffer conversion if necessary.
\r
6315 if ( stream_.doConvertBuffer[0] ) {
\r
6316 buffer = stream_.deviceBuffer;
\r
6317 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6318 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6319 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6322 buffer = stream_.userBuffer[0];
\r
6323 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6324 bufferBytes *= formatBytes( stream_.userFormat );
\r
6327 // No byte swapping necessary in DirectSound implementation.
\r
6329 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6330 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6332 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6333 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6335 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6336 nextWritePointer = handle->bufferPointer[0];
\r
6338 DWORD endWrite, leadPointer;
\r
6340 // Find out where the read and "safe write" pointers are.
\r
6341 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6342 if ( FAILED( result ) ) {
\r
6343 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6344 errorText_ = errorStream_.str();
\r
6345 error( RtAudioError::SYSTEM_ERROR );
\r
6349 // We will copy our output buffer into the region between
\r
6350 // safeWritePointer and leadPointer. If leadPointer is not
\r
6351 // beyond the next endWrite position, wait until it is.
\r
6352 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6353 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6354 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6355 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6356 endWrite = nextWritePointer + bufferBytes;
\r
6358 // Check whether the entire write region is behind the play pointer.
\r
6359 if ( leadPointer >= endWrite ) break;
\r
6361 // If we are here, then we must wait until the leadPointer advances
\r
6362 // beyond the end of our next write region. We use the
\r
6363 // Sleep() function to suspend operation until that happens.
\r
6364 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6365 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6366 if ( millis < 1.0 ) millis = 1.0;
\r
6367 Sleep( (DWORD) millis );
\r
6370 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6371 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6372 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6373 handle->xrun[0] = true;
\r
6374 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6375 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6376 handle->bufferPointer[0] = nextWritePointer;
\r
6377 endWrite = nextWritePointer + bufferBytes;
\r
6380 // Lock free space in the buffer
\r
6381 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6382 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6383 if ( FAILED( result ) ) {
\r
6384 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6385 errorText_ = errorStream_.str();
\r
6386 error( RtAudioError::SYSTEM_ERROR );
\r
6390 // Copy our buffer into the DS buffer
\r
6391 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6392 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6394 // Update our buffer offset and unlock sound buffer
\r
6395 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6396 if ( FAILED( result ) ) {
\r
6397 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6398 errorText_ = errorStream_.str();
\r
6399 error( RtAudioError::SYSTEM_ERROR );
\r
6402 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6403 handle->bufferPointer[0] = nextWritePointer;
\r
6405 if ( handle->drainCounter ) {
\r
6406 handle->drainCounter++;
\r
6411 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6413 // Setup parameters.
\r
6414 if ( stream_.doConvertBuffer[1] ) {
\r
6415 buffer = stream_.deviceBuffer;
\r
6416 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6417 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6420 buffer = stream_.userBuffer[1];
\r
6421 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6422 bufferBytes *= formatBytes( stream_.userFormat );
\r
6425 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6426 long nextReadPointer = handle->bufferPointer[1];
\r
6427 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6429 // Find out where the write and "safe read" pointers are.
\r
6430 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6431 if ( FAILED( result ) ) {
\r
6432 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6433 errorText_ = errorStream_.str();
\r
6434 error( RtAudioError::SYSTEM_ERROR );
\r
6438 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6439 DWORD endRead = nextReadPointer + bufferBytes;
\r
6441 // Handling depends on whether we are INPUT or DUPLEX.
\r
6442 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6443 // then a wait here will drag the write pointers into the forbidden zone.
\r
6445 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6446 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6447 // practical way to sync up the read and write pointers reliably, given the
\r
6448 // the very complex relationship between phase and increment of the read and write
\r
6451 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6452 // provide a pre-roll period of 0.5 seconds in which we return
\r
6453 // zeros from the read buffer while the pointers sync up.
\r
6455 if ( stream_.mode == DUPLEX ) {
\r
6456 if ( safeReadPointer < endRead ) {
\r
6457 if ( duplexPrerollBytes <= 0 ) {
\r
6458 // Pre-roll time over. Be more agressive.
\r
6459 int adjustment = endRead-safeReadPointer;
\r
6461 handle->xrun[1] = true;
\r
6463 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6464 // and perform fine adjustments later.
\r
6465 // - small adjustments: back off by twice as much.
\r
6466 if ( adjustment >= 2*bufferBytes )
\r
6467 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6469 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6471 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6475 // In pre=roll time. Just do it.
\r
6476 nextReadPointer = safeReadPointer - bufferBytes;
\r
6477 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6479 endRead = nextReadPointer + bufferBytes;
\r
6482 else { // mode == INPUT
\r
6483 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6484 // See comments for playback.
\r
6485 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6486 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6487 if ( millis < 1.0 ) millis = 1.0;
\r
6488 Sleep( (DWORD) millis );
\r
6490 // Wake up and find out where we are now.
\r
6491 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6492 if ( FAILED( result ) ) {
\r
6493 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6494 errorText_ = errorStream_.str();
\r
6495 error( RtAudioError::SYSTEM_ERROR );
\r
6499 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6503 // Lock free space in the buffer
\r
6504 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6505 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6506 if ( FAILED( result ) ) {
\r
6507 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6508 errorText_ = errorStream_.str();
\r
6509 error( RtAudioError::SYSTEM_ERROR );
\r
6513 if ( duplexPrerollBytes <= 0 ) {
\r
6514 // Copy our buffer into the DS buffer
\r
6515 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6516 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6519 memset( buffer, 0, bufferSize1 );
\r
6520 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6521 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6524 // Update our buffer offset and unlock sound buffer
\r
6525 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6526 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6527 if ( FAILED( result ) ) {
\r
6528 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6529 errorText_ = errorStream_.str();
\r
6530 error( RtAudioError::SYSTEM_ERROR );
\r
6533 handle->bufferPointer[1] = nextReadPointer;
\r
6535 // No byte swapping necessary in DirectSound implementation.
\r
6537 // If necessary, convert 8-bit data from unsigned to signed.
\r
6538 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6539 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6541 // Do buffer conversion if necessary.
\r
6542 if ( stream_.doConvertBuffer[1] )
\r
6543 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6547 RtApi::tickStreamTime();
\r
6550 // Definitions for utility functions and callbacks
\r
6551 // specific to the DirectSound implementation.
\r
6553 static unsigned __stdcall callbackHandler( void *ptr )
\r
6555 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6556 RtApiDs *object = (RtApiDs *) info->object;
\r
6557 bool* isRunning = &info->isRunning;
\r
6559 while ( *isRunning == true ) {
\r
6560 object->callbackEvent();
\r
6563 _endthreadex( 0 );
\r
6567 #include "tchar.h"
\r
6569 static std::string convertTChar( LPCTSTR name )
\r
6571 #if defined( UNICODE ) || defined( _UNICODE )
\r
6572 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6573 std::string s( length-1, '\0' );
\r
6574 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6576 std::string s( name );
\r
6582 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6583 LPCTSTR description,
\r
6584 LPCTSTR /*module*/,
\r
6585 LPVOID lpContext )
\r
6587 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6588 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6591 bool validDevice = false;
\r
6592 if ( probeInfo.isInput == true ) {
\r
6594 LPDIRECTSOUNDCAPTURE object;
\r
6596 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6597 if ( hr != DS_OK ) return TRUE;
\r
6599 caps.dwSize = sizeof(caps);
\r
6600 hr = object->GetCaps( &caps );
\r
6601 if ( hr == DS_OK ) {
\r
6602 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6603 validDevice = true;
\r
6605 object->Release();
\r
6609 LPDIRECTSOUND object;
\r
6610 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6611 if ( hr != DS_OK ) return TRUE;
\r
6613 caps.dwSize = sizeof(caps);
\r
6614 hr = object->GetCaps( &caps );
\r
6615 if ( hr == DS_OK ) {
\r
6616 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6617 validDevice = true;
\r
6619 object->Release();
\r
6622 // If good device, then save its name and guid.
\r
6623 std::string name = convertTChar( description );
\r
6624 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6625 if ( lpguid == NULL )
\r
6626 name = "Default Device";
\r
6627 if ( validDevice ) {
\r
6628 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6629 if ( dsDevices[i].name == name ) {
\r
6630 dsDevices[i].found = true;
\r
6631 if ( probeInfo.isInput ) {
\r
6632 dsDevices[i].id[1] = lpguid;
\r
6633 dsDevices[i].validId[1] = true;
\r
6636 dsDevices[i].id[0] = lpguid;
\r
6637 dsDevices[i].validId[0] = true;
\r
6644 device.name = name;
\r
6645 device.found = true;
\r
6646 if ( probeInfo.isInput ) {
\r
6647 device.id[1] = lpguid;
\r
6648 device.validId[1] = true;
\r
6651 device.id[0] = lpguid;
\r
6652 device.validId[0] = true;
\r
6654 dsDevices.push_back( device );
\r
6660 static const char* getErrorString( int code )
\r
6664 case DSERR_ALLOCATED:
\r
6665 return "Already allocated";
\r
6667 case DSERR_CONTROLUNAVAIL:
\r
6668 return "Control unavailable";
\r
6670 case DSERR_INVALIDPARAM:
\r
6671 return "Invalid parameter";
\r
6673 case DSERR_INVALIDCALL:
\r
6674 return "Invalid call";
\r
6676 case DSERR_GENERIC:
\r
6677 return "Generic error";
\r
6679 case DSERR_PRIOLEVELNEEDED:
\r
6680 return "Priority level needed";
\r
6682 case DSERR_OUTOFMEMORY:
\r
6683 return "Out of memory";
\r
6685 case DSERR_BADFORMAT:
\r
6686 return "The sample rate or the channel format is not supported";
\r
6688 case DSERR_UNSUPPORTED:
\r
6689 return "Not supported";
\r
6691 case DSERR_NODRIVER:
\r
6692 return "No driver";
\r
6694 case DSERR_ALREADYINITIALIZED:
\r
6695 return "Already initialized";
\r
6697 case DSERR_NOAGGREGATION:
\r
6698 return "No aggregation";
\r
6700 case DSERR_BUFFERLOST:
\r
6701 return "Buffer lost";
\r
6703 case DSERR_OTHERAPPHASPRIO:
\r
6704 return "Another application already has priority";
\r
6706 case DSERR_UNINITIALIZED:
\r
6707 return "Uninitialized";
\r
6710 return "DirectSound unknown error";
\r
6713 //******************** End of __WINDOWS_DS__ *********************//
\r
6717 #if defined(__LINUX_ALSA__)
\r
6719 #include <alsa/asoundlib.h>
\r
6720 #include <unistd.h>
\r
6722 // A structure to hold various information related to the ALSA API
\r
6723 // implementation.
\r
6724 struct AlsaHandle {
\r
6725 snd_pcm_t *handles[2];
\r
6726 bool synchronized;
\r
6728 pthread_cond_t runnable_cv;
\r
6732 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6735 static void *alsaCallbackHandler( void * ptr );
\r
6737 RtApiAlsa :: RtApiAlsa()
\r
6739 // Nothing to do here.
\r
6742 RtApiAlsa :: ~RtApiAlsa()
\r
6744 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6747 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6749 unsigned nDevices = 0;
\r
6750 int result, subdevice, card;
\r
6752 snd_ctl_t *handle;
\r
6754 // Count cards and devices
\r
6756 snd_card_next( &card );
\r
6757 while ( card >= 0 ) {
\r
6758 sprintf( name, "hw:%d", card );
\r
6759 result = snd_ctl_open( &handle, name, 0 );
\r
6760 if ( result < 0 ) {
\r
6761 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6762 errorText_ = errorStream_.str();
\r
6763 error( RtAudioError::WARNING );
\r
6768 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6769 if ( result < 0 ) {
\r
6770 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6771 errorText_ = errorStream_.str();
\r
6772 error( RtAudioError::WARNING );
\r
6775 if ( subdevice < 0 )
\r
6780 snd_ctl_close( handle );
\r
6781 snd_card_next( &card );
\r
6784 result = snd_ctl_open( &handle, "default", 0 );
\r
6785 if (result == 0) {
\r
6787 snd_ctl_close( handle );
\r
6793 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6795 RtAudio::DeviceInfo info;
\r
6796 info.probed = false;
\r
6798 unsigned nDevices = 0;
\r
6799 int result, subdevice, card;
\r
6801 snd_ctl_t *chandle;
\r
6803 // Count cards and devices
\r
6805 snd_card_next( &card );
\r
6806 while ( card >= 0 ) {
\r
6807 sprintf( name, "hw:%d", card );
\r
6808 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6809 if ( result < 0 ) {
\r
6810 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6811 errorText_ = errorStream_.str();
\r
6812 error( RtAudioError::WARNING );
\r
6817 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6818 if ( result < 0 ) {
\r
6819 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6820 errorText_ = errorStream_.str();
\r
6821 error( RtAudioError::WARNING );
\r
6824 if ( subdevice < 0 ) break;
\r
6825 if ( nDevices == device ) {
\r
6826 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6832 snd_ctl_close( chandle );
\r
6833 snd_card_next( &card );
\r
6836 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6837 if ( result == 0 ) {
\r
6838 if ( nDevices == device ) {
\r
6839 strcpy( name, "default" );
\r
6845 if ( nDevices == 0 ) {
\r
6846 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6847 error( RtAudioError::INVALID_USE );
\r
6851 if ( device >= nDevices ) {
\r
6852 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6853 error( RtAudioError::INVALID_USE );
\r
6859 // If a stream is already open, we cannot probe the stream devices.
\r
6860 // Thus, use the saved results.
\r
6861 if ( stream_.state != STREAM_CLOSED &&
\r
6862 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6863 snd_ctl_close( chandle );
\r
6864 if ( device >= devices_.size() ) {
\r
6865 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6866 error( RtAudioError::WARNING );
\r
6869 return devices_[ device ];
\r
6872 int openMode = SND_PCM_ASYNC;
\r
6873 snd_pcm_stream_t stream;
\r
6874 snd_pcm_info_t *pcminfo;
\r
6875 snd_pcm_info_alloca( &pcminfo );
\r
6876 snd_pcm_t *phandle;
\r
6877 snd_pcm_hw_params_t *params;
\r
6878 snd_pcm_hw_params_alloca( ¶ms );
\r
6880 // First try for playback unless default device (which has subdev -1)
\r
6881 stream = SND_PCM_STREAM_PLAYBACK;
\r
6882 snd_pcm_info_set_stream( pcminfo, stream );
\r
6883 if ( subdevice != -1 ) {
\r
6884 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6885 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6887 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6888 if ( result < 0 ) {
\r
6889 // Device probably doesn't support playback.
\r
6890 goto captureProbe;
\r
6894 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6895 if ( result < 0 ) {
\r
6896 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6897 errorText_ = errorStream_.str();
\r
6898 error( RtAudioError::WARNING );
\r
6899 goto captureProbe;
\r
6902 // The device is open ... fill the parameter structure.
\r
6903 result = snd_pcm_hw_params_any( phandle, params );
\r
6904 if ( result < 0 ) {
\r
6905 snd_pcm_close( phandle );
\r
6906 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6907 errorText_ = errorStream_.str();
\r
6908 error( RtAudioError::WARNING );
\r
6909 goto captureProbe;
\r
6912 // Get output channel information.
\r
6913 unsigned int value;
\r
6914 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6915 if ( result < 0 ) {
\r
6916 snd_pcm_close( phandle );
\r
6917 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6918 errorText_ = errorStream_.str();
\r
6919 error( RtAudioError::WARNING );
\r
6920 goto captureProbe;
\r
6922 info.outputChannels = value;
\r
6923 snd_pcm_close( phandle );
\r
6926 stream = SND_PCM_STREAM_CAPTURE;
\r
6927 snd_pcm_info_set_stream( pcminfo, stream );
\r
6929 // Now try for capture unless default device (with subdev = -1)
\r
6930 if ( subdevice != -1 ) {
\r
6931 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6932 snd_ctl_close( chandle );
\r
6933 if ( result < 0 ) {
\r
6934 // Device probably doesn't support capture.
\r
6935 if ( info.outputChannels == 0 ) return info;
\r
6936 goto probeParameters;
\r
6940 snd_ctl_close( chandle );
\r
6942 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6943 if ( result < 0 ) {
\r
6944 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6945 errorText_ = errorStream_.str();
\r
6946 error( RtAudioError::WARNING );
\r
6947 if ( info.outputChannels == 0 ) return info;
\r
6948 goto probeParameters;
\r
6951 // The device is open ... fill the parameter structure.
\r
6952 result = snd_pcm_hw_params_any( phandle, params );
\r
6953 if ( result < 0 ) {
\r
6954 snd_pcm_close( phandle );
\r
6955 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6956 errorText_ = errorStream_.str();
\r
6957 error( RtAudioError::WARNING );
\r
6958 if ( info.outputChannels == 0 ) return info;
\r
6959 goto probeParameters;
\r
6962 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6963 if ( result < 0 ) {
\r
6964 snd_pcm_close( phandle );
\r
6965 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6966 errorText_ = errorStream_.str();
\r
6967 error( RtAudioError::WARNING );
\r
6968 if ( info.outputChannels == 0 ) return info;
\r
6969 goto probeParameters;
\r
6971 info.inputChannels = value;
\r
6972 snd_pcm_close( phandle );
\r
6974 // If device opens for both playback and capture, we determine the channels.
\r
6975 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6976 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6978 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6979 if ( device == 0 && info.outputChannels > 0 )
\r
6980 info.isDefaultOutput = true;
\r
6981 if ( device == 0 && info.inputChannels > 0 )
\r
6982 info.isDefaultInput = true;
\r
6985 // At this point, we just need to figure out the supported data
\r
6986 // formats and sample rates. We'll proceed by opening the device in
\r
6987 // the direction with the maximum number of channels, or playback if
\r
6988 // they are equal. This might limit our sample rate options, but so
\r
6991 if ( info.outputChannels >= info.inputChannels )
\r
6992 stream = SND_PCM_STREAM_PLAYBACK;
\r
6994 stream = SND_PCM_STREAM_CAPTURE;
\r
6995 snd_pcm_info_set_stream( pcminfo, stream );
\r
6997 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6998 if ( result < 0 ) {
\r
6999 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7000 errorText_ = errorStream_.str();
\r
7001 error( RtAudioError::WARNING );
\r
7005 // The device is open ... fill the parameter structure.
\r
7006 result = snd_pcm_hw_params_any( phandle, params );
\r
7007 if ( result < 0 ) {
\r
7008 snd_pcm_close( phandle );
\r
7009 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7010 errorText_ = errorStream_.str();
\r
7011 error( RtAudioError::WARNING );
\r
7015 // Test our discrete set of sample rate values.
\r
7016 info.sampleRates.clear();
\r
7017 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7018 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7019 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7021 if ( info.sampleRates.size() == 0 ) {
\r
7022 snd_pcm_close( phandle );
\r
7023 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7024 errorText_ = errorStream_.str();
\r
7025 error( RtAudioError::WARNING );
\r
7029 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7030 snd_pcm_format_t format;
\r
7031 info.nativeFormats = 0;
\r
7032 format = SND_PCM_FORMAT_S8;
\r
7033 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7034 info.nativeFormats |= RTAUDIO_SINT8;
\r
7035 format = SND_PCM_FORMAT_S16;
\r
7036 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7037 info.nativeFormats |= RTAUDIO_SINT16;
\r
7038 format = SND_PCM_FORMAT_S24;
\r
7039 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7040 info.nativeFormats |= RTAUDIO_SINT24;
\r
7041 format = SND_PCM_FORMAT_S32;
\r
7042 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7043 info.nativeFormats |= RTAUDIO_SINT32;
\r
7044 format = SND_PCM_FORMAT_FLOAT;
\r
7045 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7046 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7047 format = SND_PCM_FORMAT_FLOAT64;
\r
7048 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7049 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7051 // Check that we have at least one supported format
\r
7052 if ( info.nativeFormats == 0 ) {
\r
7053 snd_pcm_close( phandle );
\r
7054 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7055 errorText_ = errorStream_.str();
\r
7056 error( RtAudioError::WARNING );
\r
7060 // Get the device name
\r
7062 result = snd_card_get_name( card, &cardname );
\r
7063 if ( result >= 0 ) {
\r
7064 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7069 // That's all ... close the device and return
\r
7070 snd_pcm_close( phandle );
\r
7071 info.probed = true;
\r
7075 void RtApiAlsa :: saveDeviceInfo( void )
\r
7079 unsigned int nDevices = getDeviceCount();
\r
7080 devices_.resize( nDevices );
\r
7081 for ( unsigned int i=0; i<nDevices; i++ )
\r
7082 devices_[i] = getDeviceInfo( i );
\r
7085 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7086 unsigned int firstChannel, unsigned int sampleRate,
\r
7087 RtAudioFormat format, unsigned int *bufferSize,
\r
7088 RtAudio::StreamOptions *options )
\r
7091 #if defined(__RTAUDIO_DEBUG__)
\r
7092 snd_output_t *out;
\r
7093 snd_output_stdio_attach(&out, stderr, 0);
\r
7096 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7098 unsigned nDevices = 0;
\r
7099 int result, subdevice, card;
\r
7101 snd_ctl_t *chandle;
\r
7103 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7104 snprintf(name, sizeof(name), "%s", "default");
\r
7106 // Count cards and devices
\r
7108 snd_card_next( &card );
\r
7109 while ( card >= 0 ) {
\r
7110 sprintf( name, "hw:%d", card );
\r
7111 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7112 if ( result < 0 ) {
\r
7113 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7114 errorText_ = errorStream_.str();
\r
7119 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7120 if ( result < 0 ) break;
\r
7121 if ( subdevice < 0 ) break;
\r
7122 if ( nDevices == device ) {
\r
7123 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7124 snd_ctl_close( chandle );
\r
7129 snd_ctl_close( chandle );
\r
7130 snd_card_next( &card );
\r
7133 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7134 if ( result == 0 ) {
\r
7135 if ( nDevices == device ) {
\r
7136 strcpy( name, "default" );
\r
7142 if ( nDevices == 0 ) {
\r
7143 // This should not happen because a check is made before this function is called.
\r
7144 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7148 if ( device >= nDevices ) {
\r
7149 // This should not happen because a check is made before this function is called.
\r
7150 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7157 // The getDeviceInfo() function will not work for a device that is
\r
7158 // already open. Thus, we'll probe the system before opening a
\r
7159 // stream and save the results for use by getDeviceInfo().
\r
7160 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7161 this->saveDeviceInfo();
\r
7163 snd_pcm_stream_t stream;
\r
7164 if ( mode == OUTPUT )
\r
7165 stream = SND_PCM_STREAM_PLAYBACK;
\r
7167 stream = SND_PCM_STREAM_CAPTURE;
\r
7169 snd_pcm_t *phandle;
\r
7170 int openMode = SND_PCM_ASYNC;
\r
7171 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7172 if ( result < 0 ) {
\r
7173 if ( mode == OUTPUT )
\r
7174 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7176 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7177 errorText_ = errorStream_.str();
\r
7181 // Fill the parameter structure.
\r
7182 snd_pcm_hw_params_t *hw_params;
\r
7183 snd_pcm_hw_params_alloca( &hw_params );
\r
7184 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7185 if ( result < 0 ) {
\r
7186 snd_pcm_close( phandle );
\r
7187 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7188 errorText_ = errorStream_.str();
\r
7192 #if defined(__RTAUDIO_DEBUG__)
\r
7193 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7194 snd_pcm_hw_params_dump( hw_params, out );
\r
7197 // Set access ... check user preference.
\r
7198 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7199 stream_.userInterleaved = false;
\r
7200 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7201 if ( result < 0 ) {
\r
7202 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7203 stream_.deviceInterleaved[mode] = true;
\r
7206 stream_.deviceInterleaved[mode] = false;
\r
7209 stream_.userInterleaved = true;
\r
7210 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7211 if ( result < 0 ) {
\r
7212 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7213 stream_.deviceInterleaved[mode] = false;
\r
7216 stream_.deviceInterleaved[mode] = true;
\r
7219 if ( result < 0 ) {
\r
7220 snd_pcm_close( phandle );
\r
7221 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7222 errorText_ = errorStream_.str();
\r
7226 // Determine how to set the device format.
\r
7227 stream_.userFormat = format;
\r
7228 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7230 if ( format == RTAUDIO_SINT8 )
\r
7231 deviceFormat = SND_PCM_FORMAT_S8;
\r
7232 else if ( format == RTAUDIO_SINT16 )
\r
7233 deviceFormat = SND_PCM_FORMAT_S16;
\r
7234 else if ( format == RTAUDIO_SINT24 )
\r
7235 deviceFormat = SND_PCM_FORMAT_S24;
\r
7236 else if ( format == RTAUDIO_SINT32 )
\r
7237 deviceFormat = SND_PCM_FORMAT_S32;
\r
7238 else if ( format == RTAUDIO_FLOAT32 )
\r
7239 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7240 else if ( format == RTAUDIO_FLOAT64 )
\r
7241 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7243 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7244 stream_.deviceFormat[mode] = format;
\r
7248 // The user requested format is not natively supported by the device.
\r
7249 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7250 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7255 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7256 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7257 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7261 deviceFormat = SND_PCM_FORMAT_S32;
\r
7262 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7263 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7267 deviceFormat = SND_PCM_FORMAT_S24;
\r
7268 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7269 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7273 deviceFormat = SND_PCM_FORMAT_S16;
\r
7274 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7275 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7279 deviceFormat = SND_PCM_FORMAT_S8;
\r
7280 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7281 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7285 // If we get here, no supported format was found.
\r
7286 snd_pcm_close( phandle );
\r
7287 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7288 errorText_ = errorStream_.str();
\r
7292 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7293 if ( result < 0 ) {
\r
7294 snd_pcm_close( phandle );
\r
7295 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7296 errorText_ = errorStream_.str();
\r
7300 // Determine whether byte-swaping is necessary.
\r
7301 stream_.doByteSwap[mode] = false;
\r
7302 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7303 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7304 if ( result == 0 )
\r
7305 stream_.doByteSwap[mode] = true;
\r
7306 else if (result < 0) {
\r
7307 snd_pcm_close( phandle );
\r
7308 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7309 errorText_ = errorStream_.str();
\r
7314 // Set the sample rate.
\r
7315 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7316 if ( result < 0 ) {
\r
7317 snd_pcm_close( phandle );
\r
7318 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7319 errorText_ = errorStream_.str();
\r
7323 // Determine the number of channels for this device. We support a possible
\r
7324 // minimum device channel number > than the value requested by the user.
\r
7325 stream_.nUserChannels[mode] = channels;
\r
7326 unsigned int value;
\r
7327 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7328 unsigned int deviceChannels = value;
\r
7329 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7330 snd_pcm_close( phandle );
\r
7331 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7332 errorText_ = errorStream_.str();
\r
7336 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7337 if ( result < 0 ) {
\r
7338 snd_pcm_close( phandle );
\r
7339 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7340 errorText_ = errorStream_.str();
\r
7343 deviceChannels = value;
\r
7344 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7345 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7347 // Set the device channels.
\r
7348 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7349 if ( result < 0 ) {
\r
7350 snd_pcm_close( phandle );
\r
7351 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7352 errorText_ = errorStream_.str();
\r
7356 // Set the buffer (or period) size.
\r
7358 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7359 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7360 if ( result < 0 ) {
\r
7361 snd_pcm_close( phandle );
\r
7362 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7363 errorText_ = errorStream_.str();
\r
7366 *bufferSize = periodSize;
\r
7368 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7369 unsigned int periods = 0;
\r
7370 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7371 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7372 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7373 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7374 if ( result < 0 ) {
\r
7375 snd_pcm_close( phandle );
\r
7376 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7377 errorText_ = errorStream_.str();
\r
7381 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7382 // MUST be the same in both directions!
\r
7383 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7384 snd_pcm_close( phandle );
\r
7385 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7386 errorText_ = errorStream_.str();
\r
7390 stream_.bufferSize = *bufferSize;
\r
7392 // Install the hardware configuration
\r
7393 result = snd_pcm_hw_params( phandle, hw_params );
\r
7394 if ( result < 0 ) {
\r
7395 snd_pcm_close( phandle );
\r
7396 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7397 errorText_ = errorStream_.str();
\r
7401 #if defined(__RTAUDIO_DEBUG__)
\r
7402 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7403 snd_pcm_hw_params_dump( hw_params, out );
\r
7406 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7407 snd_pcm_sw_params_t *sw_params = NULL;
\r
7408 snd_pcm_sw_params_alloca( &sw_params );
\r
7409 snd_pcm_sw_params_current( phandle, sw_params );
\r
7410 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7411 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7412 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7414 // The following two settings were suggested by Theo Veenker
\r
7415 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7416 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7418 // here are two options for a fix
\r
7419 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7420 snd_pcm_uframes_t val;
\r
7421 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7422 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7424 result = snd_pcm_sw_params( phandle, sw_params );
\r
7425 if ( result < 0 ) {
\r
7426 snd_pcm_close( phandle );
\r
7427 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7428 errorText_ = errorStream_.str();
\r
7432 #if defined(__RTAUDIO_DEBUG__)
\r
7433 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7434 snd_pcm_sw_params_dump( sw_params, out );
\r
7437 // Set flags for buffer conversion
\r
7438 stream_.doConvertBuffer[mode] = false;
\r
7439 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7440 stream_.doConvertBuffer[mode] = true;
\r
7441 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7442 stream_.doConvertBuffer[mode] = true;
\r
7443 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7444 stream_.nUserChannels[mode] > 1 )
\r
7445 stream_.doConvertBuffer[mode] = true;
\r
7447 // Allocate the ApiHandle if necessary and then save.
\r
7448 AlsaHandle *apiInfo = 0;
\r
7449 if ( stream_.apiHandle == 0 ) {
\r
7451 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7453 catch ( std::bad_alloc& ) {
\r
7454 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7458 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7459 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7463 stream_.apiHandle = (void *) apiInfo;
\r
7464 apiInfo->handles[0] = 0;
\r
7465 apiInfo->handles[1] = 0;
\r
7468 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7470 apiInfo->handles[mode] = phandle;
\r
7473 // Allocate necessary internal buffers.
\r
7474 unsigned long bufferBytes;
\r
7475 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7476 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7477 if ( stream_.userBuffer[mode] == NULL ) {
\r
7478 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7482 if ( stream_.doConvertBuffer[mode] ) {
\r
7484 bool makeBuffer = true;
\r
7485 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7486 if ( mode == INPUT ) {
\r
7487 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7488 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7489 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7493 if ( makeBuffer ) {
\r
7494 bufferBytes *= *bufferSize;
\r
7495 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7496 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7497 if ( stream_.deviceBuffer == NULL ) {
\r
7498 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7504 stream_.sampleRate = sampleRate;
\r
7505 stream_.nBuffers = periods;
\r
7506 stream_.device[mode] = device;
\r
7507 stream_.state = STREAM_STOPPED;
\r
7509 // Setup the buffer conversion information structure.
\r
7510 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7512 // Setup thread if necessary.
\r
7513 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7514 // We had already set up an output stream.
\r
7515 stream_.mode = DUPLEX;
\r
7516 // Link the streams if possible.
\r
7517 apiInfo->synchronized = false;
\r
7518 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7519 apiInfo->synchronized = true;
\r
7521 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7522 error( RtAudioError::WARNING );
\r
7526 stream_.mode = mode;
\r
7528 // Setup callback thread.
\r
7529 stream_.callbackInfo.object = (void *) this;
\r
7531 // Set the thread attributes for joinable and realtime scheduling
\r
7532 // priority (optional). The higher priority will only take affect
\r
7533 // if the program is run as root or suid. Note, under Linux
\r
7534 // processes with CAP_SYS_NICE privilege, a user can change
\r
7535 // scheduling policy and priority (thus need not be root). See
\r
7536 // POSIX "capabilities".
\r
7537 pthread_attr_t attr;
\r
7538 pthread_attr_init( &attr );
\r
7539 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7541 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7542 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7543 // We previously attempted to increase the audio callback priority
\r
7544 // to SCHED_RR here via the attributes. However, while no errors
\r
7545 // were reported in doing so, it did not work. So, now this is
\r
7546 // done in the alsaCallbackHandler function.
\r
7547 stream_.callbackInfo.doRealtime = true;
\r
7548 int priority = options->priority;
\r
7549 int min = sched_get_priority_min( SCHED_RR );
\r
7550 int max = sched_get_priority_max( SCHED_RR );
\r
7551 if ( priority < min ) priority = min;
\r
7552 else if ( priority > max ) priority = max;
\r
7553 stream_.callbackInfo.priority = priority;
\r
7557 stream_.callbackInfo.isRunning = true;
\r
7558 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7559 pthread_attr_destroy( &attr );
\r
7561 stream_.callbackInfo.isRunning = false;
\r
7562 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7571 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7572 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7573 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7575 stream_.apiHandle = 0;
\r
7578 if ( phandle) snd_pcm_close( phandle );
\r
7580 for ( int i=0; i<2; i++ ) {
\r
7581 if ( stream_.userBuffer[i] ) {
\r
7582 free( stream_.userBuffer[i] );
\r
7583 stream_.userBuffer[i] = 0;
\r
7587 if ( stream_.deviceBuffer ) {
\r
7588 free( stream_.deviceBuffer );
\r
7589 stream_.deviceBuffer = 0;
\r
7592 stream_.state = STREAM_CLOSED;
\r
7596 void RtApiAlsa :: closeStream()
\r
7598 if ( stream_.state == STREAM_CLOSED ) {
\r
7599 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7600 error( RtAudioError::WARNING );
\r
7604 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7605 stream_.callbackInfo.isRunning = false;
\r
7606 MUTEX_LOCK( &stream_.mutex );
\r
7607 if ( stream_.state == STREAM_STOPPED ) {
\r
7608 apiInfo->runnable = true;
\r
7609 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7611 MUTEX_UNLOCK( &stream_.mutex );
\r
7612 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7614 if ( stream_.state == STREAM_RUNNING ) {
\r
7615 stream_.state = STREAM_STOPPED;
\r
7616 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7617 snd_pcm_drop( apiInfo->handles[0] );
\r
7618 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7619 snd_pcm_drop( apiInfo->handles[1] );
\r
7623 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7624 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7625 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7627 stream_.apiHandle = 0;
\r
7630 for ( int i=0; i<2; i++ ) {
\r
7631 if ( stream_.userBuffer[i] ) {
\r
7632 free( stream_.userBuffer[i] );
\r
7633 stream_.userBuffer[i] = 0;
\r
7637 if ( stream_.deviceBuffer ) {
\r
7638 free( stream_.deviceBuffer );
\r
7639 stream_.deviceBuffer = 0;
\r
7642 stream_.mode = UNINITIALIZED;
\r
7643 stream_.state = STREAM_CLOSED;
\r
7646 void RtApiAlsa :: startStream()
\r
7648 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7651 if ( stream_.state == STREAM_RUNNING ) {
\r
7652 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7653 error( RtAudioError::WARNING );
\r
7657 MUTEX_LOCK( &stream_.mutex );
\r
7660 snd_pcm_state_t state;
\r
7661 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7662 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7663 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7664 state = snd_pcm_state( handle[0] );
\r
7665 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7666 result = snd_pcm_prepare( handle[0] );
\r
7667 if ( result < 0 ) {
\r
7668 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7669 errorText_ = errorStream_.str();
\r
7675 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7676 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7677 state = snd_pcm_state( handle[1] );
\r
7678 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7679 result = snd_pcm_prepare( handle[1] );
\r
7680 if ( result < 0 ) {
\r
7681 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7682 errorText_ = errorStream_.str();
\r
7688 stream_.state = STREAM_RUNNING;
\r
7691 apiInfo->runnable = true;
\r
7692 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7693 MUTEX_UNLOCK( &stream_.mutex );
\r
7695 if ( result >= 0 ) return;
\r
7696 error( RtAudioError::SYSTEM_ERROR );
\r
7699 void RtApiAlsa :: stopStream()
\r
7702 if ( stream_.state == STREAM_STOPPED ) {
\r
7703 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7704 error( RtAudioError::WARNING );
\r
7708 stream_.state = STREAM_STOPPED;
\r
7709 MUTEX_LOCK( &stream_.mutex );
\r
7712 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7713 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7714 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7715 if ( apiInfo->synchronized )
\r
7716 result = snd_pcm_drop( handle[0] );
\r
7718 result = snd_pcm_drain( handle[0] );
\r
7719 if ( result < 0 ) {
\r
7720 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7721 errorText_ = errorStream_.str();
\r
7726 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7727 result = snd_pcm_drop( handle[1] );
\r
7728 if ( result < 0 ) {
\r
7729 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7730 errorText_ = errorStream_.str();
\r
7736 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7737 MUTEX_UNLOCK( &stream_.mutex );
\r
7739 if ( result >= 0 ) return;
\r
7740 error( RtAudioError::SYSTEM_ERROR );
\r
7743 void RtApiAlsa :: abortStream()
\r
7746 if ( stream_.state == STREAM_STOPPED ) {
\r
7747 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7748 error( RtAudioError::WARNING );
\r
7752 stream_.state = STREAM_STOPPED;
\r
7753 MUTEX_LOCK( &stream_.mutex );
\r
7756 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7757 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7758 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7759 result = snd_pcm_drop( handle[0] );
\r
7760 if ( result < 0 ) {
\r
7761 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7762 errorText_ = errorStream_.str();
\r
7767 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7768 result = snd_pcm_drop( handle[1] );
\r
7769 if ( result < 0 ) {
\r
7770 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7771 errorText_ = errorStream_.str();
\r
7777 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7778 MUTEX_UNLOCK( &stream_.mutex );
\r
7780 if ( result >= 0 ) return;
\r
7781 error( RtAudioError::SYSTEM_ERROR );
\r
7784 void RtApiAlsa :: callbackEvent()
\r
7786 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7787 if ( stream_.state == STREAM_STOPPED ) {
\r
7788 MUTEX_LOCK( &stream_.mutex );
\r
7789 while ( !apiInfo->runnable )
\r
7790 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7792 if ( stream_.state != STREAM_RUNNING ) {
\r
7793 MUTEX_UNLOCK( &stream_.mutex );
\r
7796 MUTEX_UNLOCK( &stream_.mutex );
\r
7799 if ( stream_.state == STREAM_CLOSED ) {
\r
7800 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7801 error( RtAudioError::WARNING );
\r
7805 int doStopStream = 0;
\r
7806 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7807 double streamTime = getStreamTime();
\r
7808 RtAudioStreamStatus status = 0;
\r
7809 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7810 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7811 apiInfo->xrun[0] = false;
\r
7813 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7814 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7815 apiInfo->xrun[1] = false;
\r
7817 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7818 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7820 if ( doStopStream == 2 ) {
\r
7825 MUTEX_LOCK( &stream_.mutex );
\r
7827 // The state might change while waiting on a mutex.
\r
7828 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7833 snd_pcm_t **handle;
\r
7834 snd_pcm_sframes_t frames;
\r
7835 RtAudioFormat format;
\r
7836 handle = (snd_pcm_t **) apiInfo->handles;
\r
7838 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7840 // Setup parameters.
\r
7841 if ( stream_.doConvertBuffer[1] ) {
\r
7842 buffer = stream_.deviceBuffer;
\r
7843 channels = stream_.nDeviceChannels[1];
\r
7844 format = stream_.deviceFormat[1];
\r
7847 buffer = stream_.userBuffer[1];
\r
7848 channels = stream_.nUserChannels[1];
\r
7849 format = stream_.userFormat;
\r
7852 // Read samples from device in interleaved/non-interleaved format.
\r
7853 if ( stream_.deviceInterleaved[1] )
\r
7854 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7856 void *bufs[channels];
\r
7857 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7858 for ( int i=0; i<channels; i++ )
\r
7859 bufs[i] = (void *) (buffer + (i * offset));
\r
7860 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7863 if ( result < (int) stream_.bufferSize ) {
\r
7864 // Either an error or overrun occured.
\r
7865 if ( result == -EPIPE ) {
\r
7866 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7867 if ( state == SND_PCM_STATE_XRUN ) {
\r
7868 apiInfo->xrun[1] = true;
\r
7869 result = snd_pcm_prepare( handle[1] );
\r
7870 if ( result < 0 ) {
\r
7871 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7872 errorText_ = errorStream_.str();
\r
7876 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7877 errorText_ = errorStream_.str();
\r
7881 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7882 errorText_ = errorStream_.str();
\r
7884 error( RtAudioError::WARNING );
\r
7888 // Do byte swapping if necessary.
\r
7889 if ( stream_.doByteSwap[1] )
\r
7890 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7892 // Do buffer conversion if necessary.
\r
7893 if ( stream_.doConvertBuffer[1] )
\r
7894 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7896 // Check stream latency
\r
7897 result = snd_pcm_delay( handle[1], &frames );
\r
7898 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7903 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7905 // Setup parameters and do buffer conversion if necessary.
\r
7906 if ( stream_.doConvertBuffer[0] ) {
\r
7907 buffer = stream_.deviceBuffer;
\r
7908 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7909 channels = stream_.nDeviceChannels[0];
\r
7910 format = stream_.deviceFormat[0];
\r
7913 buffer = stream_.userBuffer[0];
\r
7914 channels = stream_.nUserChannels[0];
\r
7915 format = stream_.userFormat;
\r
7918 // Do byte swapping if necessary.
\r
7919 if ( stream_.doByteSwap[0] )
\r
7920 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7922 // Write samples to device in interleaved/non-interleaved format.
\r
7923 if ( stream_.deviceInterleaved[0] )
\r
7924 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7926 void *bufs[channels];
\r
7927 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7928 for ( int i=0; i<channels; i++ )
\r
7929 bufs[i] = (void *) (buffer + (i * offset));
\r
7930 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7933 if ( result < (int) stream_.bufferSize ) {
\r
7934 // Either an error or underrun occured.
\r
7935 if ( result == -EPIPE ) {
\r
7936 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7937 if ( state == SND_PCM_STATE_XRUN ) {
\r
7938 apiInfo->xrun[0] = true;
\r
7939 result = snd_pcm_prepare( handle[0] );
\r
7940 if ( result < 0 ) {
\r
7941 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7942 errorText_ = errorStream_.str();
\r
7946 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7947 errorText_ = errorStream_.str();
\r
7951 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7952 errorText_ = errorStream_.str();
\r
7954 error( RtAudioError::WARNING );
\r
7958 // Check stream latency
\r
7959 result = snd_pcm_delay( handle[0], &frames );
\r
7960 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7964 MUTEX_UNLOCK( &stream_.mutex );
\r
7966 RtApi::tickStreamTime();
\r
7967 if ( doStopStream == 1 ) this->stopStream();
\r
7970 static void *alsaCallbackHandler( void *ptr )
\r
7972 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7973 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7974 bool *isRunning = &info->isRunning;
\r
7976 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7977 if ( &info->doRealtime ) {
\r
7978 pthread_t tID = pthread_self(); // ID of this thread
\r
7979 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7980 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7984 while ( *isRunning == true ) {
\r
7985 pthread_testcancel();
\r
7986 object->callbackEvent();
\r
7989 pthread_exit( NULL );
\r
7992 //******************** End of __LINUX_ALSA__ *********************//
\r
7995 #if defined(__LINUX_PULSE__)
\r
7997 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
7998 // and Tristan Matthews.
\r
8000 #include <pulse/error.h>
\r
8001 #include <pulse/simple.h>
\r
8004 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8005 44100, 48000, 96000, 0};
\r
8007 struct rtaudio_pa_format_mapping_t {
\r
8008 RtAudioFormat rtaudio_format;
\r
8009 pa_sample_format_t pa_format;
\r
8012 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8013 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8014 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8015 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8016 {0, PA_SAMPLE_INVALID}};
\r
8018 struct PulseAudioHandle {
\r
8019 pa_simple *s_play;
\r
8022 pthread_cond_t runnable_cv;
\r
8024 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8027 RtApiPulse::~RtApiPulse()
\r
8029 if ( stream_.state != STREAM_CLOSED )
\r
8033 unsigned int RtApiPulse::getDeviceCount( void )
\r
8038 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8040 RtAudio::DeviceInfo info;
\r
8041 info.probed = true;
\r
8042 info.name = "PulseAudio";
\r
8043 info.outputChannels = 2;
\r
8044 info.inputChannels = 2;
\r
8045 info.duplexChannels = 2;
\r
8046 info.isDefaultOutput = true;
\r
8047 info.isDefaultInput = true;
\r
8049 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8050 info.sampleRates.push_back( *sr );
\r
8052 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8057 static void *pulseaudio_callback( void * user )
\r
8059 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8060 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8061 volatile bool *isRunning = &cbi->isRunning;
\r
8063 while ( *isRunning ) {
\r
8064 pthread_testcancel();
\r
8065 context->callbackEvent();
\r
8068 pthread_exit( NULL );
\r
8071 void RtApiPulse::closeStream( void )
\r
8073 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8075 stream_.callbackInfo.isRunning = false;
\r
8077 MUTEX_LOCK( &stream_.mutex );
\r
8078 if ( stream_.state == STREAM_STOPPED ) {
\r
8079 pah->runnable = true;
\r
8080 pthread_cond_signal( &pah->runnable_cv );
\r
8082 MUTEX_UNLOCK( &stream_.mutex );
\r
8084 pthread_join( pah->thread, 0 );
\r
8085 if ( pah->s_play ) {
\r
8086 pa_simple_flush( pah->s_play, NULL );
\r
8087 pa_simple_free( pah->s_play );
\r
8090 pa_simple_free( pah->s_rec );
\r
8092 pthread_cond_destroy( &pah->runnable_cv );
\r
8094 stream_.apiHandle = 0;
\r
8097 if ( stream_.userBuffer[0] ) {
\r
8098 free( stream_.userBuffer[0] );
\r
8099 stream_.userBuffer[0] = 0;
\r
8101 if ( stream_.userBuffer[1] ) {
\r
8102 free( stream_.userBuffer[1] );
\r
8103 stream_.userBuffer[1] = 0;
\r
8106 stream_.state = STREAM_CLOSED;
\r
8107 stream_.mode = UNINITIALIZED;
\r
8110 void RtApiPulse::callbackEvent( void )
\r
8112 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8114 if ( stream_.state == STREAM_STOPPED ) {
\r
8115 MUTEX_LOCK( &stream_.mutex );
\r
8116 while ( !pah->runnable )
\r
8117 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8119 if ( stream_.state != STREAM_RUNNING ) {
\r
8120 MUTEX_UNLOCK( &stream_.mutex );
\r
8123 MUTEX_UNLOCK( &stream_.mutex );
\r
8126 if ( stream_.state == STREAM_CLOSED ) {
\r
8127 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8128 "this shouldn't happen!";
\r
8129 error( RtAudioError::WARNING );
\r
8133 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8134 double streamTime = getStreamTime();
\r
8135 RtAudioStreamStatus status = 0;
\r
8136 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8137 stream_.bufferSize, streamTime, status,
\r
8138 stream_.callbackInfo.userData );
\r
8140 if ( doStopStream == 2 ) {
\r
8145 MUTEX_LOCK( &stream_.mutex );
\r
8146 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8147 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8149 if ( stream_.state != STREAM_RUNNING )
\r
8154 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8155 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8156 convertBuffer( stream_.deviceBuffer,
\r
8157 stream_.userBuffer[OUTPUT],
\r
8158 stream_.convertInfo[OUTPUT] );
\r
8159 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8160 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8162 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8163 formatBytes( stream_.userFormat );
\r
8165 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8166 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8167 pa_strerror( pa_error ) << ".";
\r
8168 errorText_ = errorStream_.str();
\r
8169 error( RtAudioError::WARNING );
\r
8173 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8174 if ( stream_.doConvertBuffer[INPUT] )
\r
8175 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8176 formatBytes( stream_.deviceFormat[INPUT] );
\r
8178 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8179 formatBytes( stream_.userFormat );
\r
8181 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8182 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8183 pa_strerror( pa_error ) << ".";
\r
8184 errorText_ = errorStream_.str();
\r
8185 error( RtAudioError::WARNING );
\r
8187 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8188 convertBuffer( stream_.userBuffer[INPUT],
\r
8189 stream_.deviceBuffer,
\r
8190 stream_.convertInfo[INPUT] );
\r
8195 MUTEX_UNLOCK( &stream_.mutex );
\r
8196 RtApi::tickStreamTime();
\r
8198 if ( doStopStream == 1 )
\r
8202 void RtApiPulse::startStream( void )
\r
8204 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8206 if ( stream_.state == STREAM_CLOSED ) {
\r
8207 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8208 error( RtAudioError::INVALID_USE );
\r
8211 if ( stream_.state == STREAM_RUNNING ) {
\r
8212 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8213 error( RtAudioError::WARNING );
\r
8217 MUTEX_LOCK( &stream_.mutex );
\r
8219 stream_.state = STREAM_RUNNING;
\r
8221 pah->runnable = true;
\r
8222 pthread_cond_signal( &pah->runnable_cv );
\r
8223 MUTEX_UNLOCK( &stream_.mutex );
\r
8226 void RtApiPulse::stopStream( void )
\r
8228 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8230 if ( stream_.state == STREAM_CLOSED ) {
\r
8231 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8232 error( RtAudioError::INVALID_USE );
\r
8235 if ( stream_.state == STREAM_STOPPED ) {
\r
8236 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8237 error( RtAudioError::WARNING );
\r
8241 stream_.state = STREAM_STOPPED;
\r
8242 MUTEX_LOCK( &stream_.mutex );
\r
8244 if ( pah && pah->s_play ) {
\r
8246 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8247 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8248 pa_strerror( pa_error ) << ".";
\r
8249 errorText_ = errorStream_.str();
\r
8250 MUTEX_UNLOCK( &stream_.mutex );
\r
8251 error( RtAudioError::SYSTEM_ERROR );
\r
8256 stream_.state = STREAM_STOPPED;
\r
8257 MUTEX_UNLOCK( &stream_.mutex );
\r
8260 void RtApiPulse::abortStream( void )
\r
8262 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8264 if ( stream_.state == STREAM_CLOSED ) {
\r
8265 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8266 error( RtAudioError::INVALID_USE );
\r
8269 if ( stream_.state == STREAM_STOPPED ) {
\r
8270 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8271 error( RtAudioError::WARNING );
\r
8275 stream_.state = STREAM_STOPPED;
\r
8276 MUTEX_LOCK( &stream_.mutex );
\r
8278 if ( pah && pah->s_play ) {
\r
8280 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8281 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8282 pa_strerror( pa_error ) << ".";
\r
8283 errorText_ = errorStream_.str();
\r
8284 MUTEX_UNLOCK( &stream_.mutex );
\r
8285 error( RtAudioError::SYSTEM_ERROR );
\r
8290 stream_.state = STREAM_STOPPED;
\r
8291 MUTEX_UNLOCK( &stream_.mutex );
\r
8294 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8295 unsigned int channels, unsigned int firstChannel,
\r
8296 unsigned int sampleRate, RtAudioFormat format,
\r
8297 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8299 PulseAudioHandle *pah = 0;
\r
8300 unsigned long bufferBytes = 0;
\r
8301 pa_sample_spec ss;
\r
8303 if ( device != 0 ) return false;
\r
8304 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8305 if ( channels != 1 && channels != 2 ) {
\r
8306 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8309 ss.channels = channels;
\r
8311 if ( firstChannel != 0 ) return false;
\r
8313 bool sr_found = false;
\r
8314 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8315 if ( sampleRate == *sr ) {
\r
8317 stream_.sampleRate = sampleRate;
\r
8318 ss.rate = sampleRate;
\r
8322 if ( !sr_found ) {
\r
8323 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8327 bool sf_found = 0;
\r
8328 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8329 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8330 if ( format == sf->rtaudio_format ) {
\r
8332 stream_.userFormat = sf->rtaudio_format;
\r
8333 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8334 ss.format = sf->pa_format;
\r
8338 if ( !sf_found ) { // Use internal data format conversion.
\r
8339 stream_.userFormat = format;
\r
8340 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8341 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8344 // Set other stream parameters.
\r
8345 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8346 else stream_.userInterleaved = true;
\r
8347 stream_.deviceInterleaved[mode] = true;
\r
8348 stream_.nBuffers = 1;
\r
8349 stream_.doByteSwap[mode] = false;
\r
8350 stream_.nUserChannels[mode] = channels;
\r
8351 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8352 stream_.channelOffset[mode] = 0;
\r
8353 std::string streamName = "RtAudio";
\r
8355 // Set flags for buffer conversion.
\r
8356 stream_.doConvertBuffer[mode] = false;
\r
8357 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8358 stream_.doConvertBuffer[mode] = true;
\r
8359 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8360 stream_.doConvertBuffer[mode] = true;
\r
8362 // Allocate necessary internal buffers.
\r
8363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8364 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8365 if ( stream_.userBuffer[mode] == NULL ) {
\r
8366 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8369 stream_.bufferSize = *bufferSize;
\r
8371 if ( stream_.doConvertBuffer[mode] ) {
\r
8373 bool makeBuffer = true;
\r
8374 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8375 if ( mode == INPUT ) {
\r
8376 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8377 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8378 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8382 if ( makeBuffer ) {
\r
8383 bufferBytes *= *bufferSize;
\r
8384 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8385 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8386 if ( stream_.deviceBuffer == NULL ) {
\r
8387 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8393 stream_.device[mode] = device;
\r
8395 // Setup the buffer conversion information structure.
\r
8396 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8398 if ( !stream_.apiHandle ) {
\r
8399 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8401 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8405 stream_.apiHandle = pah;
\r
8406 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8407 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8411 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8414 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8417 pa_buffer_attr buffer_attr;
\r
8418 buffer_attr.fragsize = bufferBytes;
\r
8419 buffer_attr.maxlength = -1;
\r
8421 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8422 if ( !pah->s_rec ) {
\r
8423 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8428 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8429 if ( !pah->s_play ) {
\r
8430 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8438 if ( stream_.mode == UNINITIALIZED )
\r
8439 stream_.mode = mode;
\r
8440 else if ( stream_.mode == mode )
\r
8443 stream_.mode = DUPLEX;
\r
8445 if ( !stream_.callbackInfo.isRunning ) {
\r
8446 stream_.callbackInfo.object = this;
\r
8447 stream_.callbackInfo.isRunning = true;
\r
8448 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8449 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8454 stream_.state = STREAM_STOPPED;
\r
8458 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8459 pthread_cond_destroy( &pah->runnable_cv );
\r
8461 stream_.apiHandle = 0;
\r
8464 for ( int i=0; i<2; i++ ) {
\r
8465 if ( stream_.userBuffer[i] ) {
\r
8466 free( stream_.userBuffer[i] );
\r
8467 stream_.userBuffer[i] = 0;
\r
8471 if ( stream_.deviceBuffer ) {
\r
8472 free( stream_.deviceBuffer );
\r
8473 stream_.deviceBuffer = 0;
\r
8479 //******************** End of __LINUX_PULSE__ *********************//
\r
8482 #if defined(__LINUX_OSS__)
\r
8484 #include <unistd.h>
\r
8485 #include <sys/ioctl.h>
\r
8486 #include <unistd.h>
\r
8487 #include <fcntl.h>
\r
8488 #include <sys/soundcard.h>
\r
8489 #include <errno.h>
\r
8492 static void *ossCallbackHandler(void * ptr);
\r
8494 // A structure to hold various information related to the OSS API
\r
8495 // implementation.
\r
8496 struct OssHandle {
\r
8497 int id[2]; // device ids
\r
8500 pthread_cond_t runnable;
\r
8503 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8506 RtApiOss :: RtApiOss()
\r
8508 // Nothing to do here.
\r
8511 RtApiOss :: ~RtApiOss()
\r
8513 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8516 unsigned int RtApiOss :: getDeviceCount( void )
\r
8518 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8519 if ( mixerfd == -1 ) {
\r
8520 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8521 error( RtAudioError::WARNING );
\r
8525 oss_sysinfo sysinfo;
\r
8526 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8528 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8529 error( RtAudioError::WARNING );
\r
8534 return sysinfo.numaudios;
\r
8537 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8539 RtAudio::DeviceInfo info;
\r
8540 info.probed = false;
\r
8542 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8543 if ( mixerfd == -1 ) {
\r
8544 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8545 error( RtAudioError::WARNING );
\r
8549 oss_sysinfo sysinfo;
\r
8550 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8551 if ( result == -1 ) {
\r
8553 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8554 error( RtAudioError::WARNING );
\r
8558 unsigned nDevices = sysinfo.numaudios;
\r
8559 if ( nDevices == 0 ) {
\r
8561 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8562 error( RtAudioError::INVALID_USE );
\r
8566 if ( device >= nDevices ) {
\r
8568 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8569 error( RtAudioError::INVALID_USE );
\r
8573 oss_audioinfo ainfo;
\r
8574 ainfo.dev = device;
\r
8575 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8577 if ( result == -1 ) {
\r
8578 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8579 errorText_ = errorStream_.str();
\r
8580 error( RtAudioError::WARNING );
\r
8585 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8586 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8587 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8588 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8589 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8592 // Probe data formats ... do for input
\r
8593 unsigned long mask = ainfo.iformats;
\r
8594 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8595 info.nativeFormats |= RTAUDIO_SINT16;
\r
8596 if ( mask & AFMT_S8 )
\r
8597 info.nativeFormats |= RTAUDIO_SINT8;
\r
8598 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8599 info.nativeFormats |= RTAUDIO_SINT32;
\r
8600 if ( mask & AFMT_FLOAT )
\r
8601 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8602 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8603 info.nativeFormats |= RTAUDIO_SINT24;
\r
8605 // Check that we have at least one supported format
\r
8606 if ( info.nativeFormats == 0 ) {
\r
8607 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8608 errorText_ = errorStream_.str();
\r
8609 error( RtAudioError::WARNING );
\r
8613 // Probe the supported sample rates.
\r
8614 info.sampleRates.clear();
\r
8615 if ( ainfo.nrates ) {
\r
8616 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8617 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8618 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8619 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8626 // Check min and max rate values;
\r
8627 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8628 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8629 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8633 if ( info.sampleRates.size() == 0 ) {
\r
8634 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8635 errorText_ = errorStream_.str();
\r
8636 error( RtAudioError::WARNING );
\r
8639 info.probed = true;
\r
8640 info.name = ainfo.name;
\r
8647 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8648 unsigned int firstChannel, unsigned int sampleRate,
\r
8649 RtAudioFormat format, unsigned int *bufferSize,
\r
8650 RtAudio::StreamOptions *options )
\r
8652 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8653 if ( mixerfd == -1 ) {
\r
8654 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8658 oss_sysinfo sysinfo;
\r
8659 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8660 if ( result == -1 ) {
\r
8662 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8666 unsigned nDevices = sysinfo.numaudios;
\r
8667 if ( nDevices == 0 ) {
\r
8668 // This should not happen because a check is made before this function is called.
\r
8670 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8674 if ( device >= nDevices ) {
\r
8675 // This should not happen because a check is made before this function is called.
\r
8677 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8681 oss_audioinfo ainfo;
\r
8682 ainfo.dev = device;
\r
8683 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8685 if ( result == -1 ) {
\r
8686 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8687 errorText_ = errorStream_.str();
\r
8691 // Check if device supports input or output
\r
8692 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8693 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8694 if ( mode == OUTPUT )
\r
8695 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8697 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8698 errorText_ = errorStream_.str();
\r
8703 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8704 if ( mode == OUTPUT )
\r
8705 flags |= O_WRONLY;
\r
8706 else { // mode == INPUT
\r
8707 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8708 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8709 close( handle->id[0] );
\r
8710 handle->id[0] = 0;
\r
8711 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8712 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8713 errorText_ = errorStream_.str();
\r
8716 // Check that the number previously set channels is the same.
\r
8717 if ( stream_.nUserChannels[0] != channels ) {
\r
8718 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8719 errorText_ = errorStream_.str();
\r
8725 flags |= O_RDONLY;
\r
8728 // Set exclusive access if specified.
\r
8729 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8731 // Try to open the device.
\r
8733 fd = open( ainfo.devnode, flags, 0 );
\r
8735 if ( errno == EBUSY )
\r
8736 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8738 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8739 errorText_ = errorStream_.str();
\r
8743 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8745 if ( flags | O_RDWR ) {
\r
8746 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8747 if ( result == -1) {
\r
8748 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8749 errorText_ = errorStream_.str();
\r
8755 // Check the device channel support.
\r
8756 stream_.nUserChannels[mode] = channels;
\r
8757 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8759 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8760 errorText_ = errorStream_.str();
\r
8764 // Set the number of channels.
\r
8765 int deviceChannels = channels + firstChannel;
\r
8766 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8767 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8769 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8770 errorText_ = errorStream_.str();
\r
8773 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8775 // Get the data format mask
\r
8777 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8778 if ( result == -1 ) {
\r
8780 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8781 errorText_ = errorStream_.str();
\r
8785 // Determine how to set the device format.
\r
8786 stream_.userFormat = format;
\r
8787 int deviceFormat = -1;
\r
8788 stream_.doByteSwap[mode] = false;
\r
8789 if ( format == RTAUDIO_SINT8 ) {
\r
8790 if ( mask & AFMT_S8 ) {
\r
8791 deviceFormat = AFMT_S8;
\r
8792 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8795 else if ( format == RTAUDIO_SINT16 ) {
\r
8796 if ( mask & AFMT_S16_NE ) {
\r
8797 deviceFormat = AFMT_S16_NE;
\r
8798 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8800 else if ( mask & AFMT_S16_OE ) {
\r
8801 deviceFormat = AFMT_S16_OE;
\r
8802 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8803 stream_.doByteSwap[mode] = true;
\r
8806 else if ( format == RTAUDIO_SINT24 ) {
\r
8807 if ( mask & AFMT_S24_NE ) {
\r
8808 deviceFormat = AFMT_S24_NE;
\r
8809 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8811 else if ( mask & AFMT_S24_OE ) {
\r
8812 deviceFormat = AFMT_S24_OE;
\r
8813 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8814 stream_.doByteSwap[mode] = true;
\r
8817 else if ( format == RTAUDIO_SINT32 ) {
\r
8818 if ( mask & AFMT_S32_NE ) {
\r
8819 deviceFormat = AFMT_S32_NE;
\r
8820 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8822 else if ( mask & AFMT_S32_OE ) {
\r
8823 deviceFormat = AFMT_S32_OE;
\r
8824 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8825 stream_.doByteSwap[mode] = true;
\r
8829 if ( deviceFormat == -1 ) {
\r
8830 // The user requested format is not natively supported by the device.
\r
8831 if ( mask & AFMT_S16_NE ) {
\r
8832 deviceFormat = AFMT_S16_NE;
\r
8833 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8835 else if ( mask & AFMT_S32_NE ) {
\r
8836 deviceFormat = AFMT_S32_NE;
\r
8837 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8839 else if ( mask & AFMT_S24_NE ) {
\r
8840 deviceFormat = AFMT_S24_NE;
\r
8841 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8843 else if ( mask & AFMT_S16_OE ) {
\r
8844 deviceFormat = AFMT_S16_OE;
\r
8845 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8846 stream_.doByteSwap[mode] = true;
\r
8848 else if ( mask & AFMT_S32_OE ) {
\r
8849 deviceFormat = AFMT_S32_OE;
\r
8850 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8851 stream_.doByteSwap[mode] = true;
\r
8853 else if ( mask & AFMT_S24_OE ) {
\r
8854 deviceFormat = AFMT_S24_OE;
\r
8855 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8856 stream_.doByteSwap[mode] = true;
\r
8858 else if ( mask & AFMT_S8) {
\r
8859 deviceFormat = AFMT_S8;
\r
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8864 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8865 // This really shouldn't happen ...
\r
8867 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8868 errorText_ = errorStream_.str();
\r
8872 // Set the data format.
\r
8873 int temp = deviceFormat;
\r
8874 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8875 if ( result == -1 || deviceFormat != temp ) {
\r
8877 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8878 errorText_ = errorStream_.str();
\r
8882 // Attempt to set the buffer size. According to OSS, the minimum
\r
8883 // number of buffers is two. The supposed minimum buffer size is 16
\r
8884 // bytes, so that will be our lower bound. The argument to this
\r
8885 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8886 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8887 // We'll check the actual value used near the end of the setup
\r
8889 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8890 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8892 if ( options ) buffers = options->numberOfBuffers;
\r
8893 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8894 if ( buffers < 2 ) buffers = 3;
\r
8895 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8896 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8897 if ( result == -1 ) {
\r
8899 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8900 errorText_ = errorStream_.str();
\r
8903 stream_.nBuffers = buffers;
\r
8905 // Save buffer size (in sample frames).
\r
8906 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8907 stream_.bufferSize = *bufferSize;
\r
8909 // Set the sample rate.
\r
8910 int srate = sampleRate;
\r
8911 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8912 if ( result == -1 ) {
\r
8914 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8915 errorText_ = errorStream_.str();
\r
8919 // Verify the sample rate setup worked.
\r
8920 if ( abs( srate - sampleRate ) > 100 ) {
\r
8922 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8923 errorText_ = errorStream_.str();
\r
8926 stream_.sampleRate = sampleRate;
\r
8928 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8929 // We're doing duplex setup here.
\r
8930 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8931 stream_.nDeviceChannels[0] = deviceChannels;
\r
8934 // Set interleaving parameters.
\r
8935 stream_.userInterleaved = true;
\r
8936 stream_.deviceInterleaved[mode] = true;
\r
8937 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8938 stream_.userInterleaved = false;
\r
8940 // Set flags for buffer conversion
\r
8941 stream_.doConvertBuffer[mode] = false;
\r
8942 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8943 stream_.doConvertBuffer[mode] = true;
\r
8944 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8945 stream_.doConvertBuffer[mode] = true;
\r
8946 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8947 stream_.nUserChannels[mode] > 1 )
\r
8948 stream_.doConvertBuffer[mode] = true;
\r
8950 // Allocate the stream handles if necessary and then save.
\r
8951 if ( stream_.apiHandle == 0 ) {
\r
8953 handle = new OssHandle;
\r
8955 catch ( std::bad_alloc& ) {
\r
8956 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8960 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8961 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8965 stream_.apiHandle = (void *) handle;
\r
8968 handle = (OssHandle *) stream_.apiHandle;
\r
8970 handle->id[mode] = fd;
\r
8972 // Allocate necessary internal buffers.
\r
8973 unsigned long bufferBytes;
\r
8974 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8975 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8976 if ( stream_.userBuffer[mode] == NULL ) {
\r
8977 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8981 if ( stream_.doConvertBuffer[mode] ) {
\r
8983 bool makeBuffer = true;
\r
8984 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8985 if ( mode == INPUT ) {
\r
8986 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8987 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8988 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8992 if ( makeBuffer ) {
\r
8993 bufferBytes *= *bufferSize;
\r
8994 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8995 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8996 if ( stream_.deviceBuffer == NULL ) {
\r
8997 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9003 stream_.device[mode] = device;
\r
9004 stream_.state = STREAM_STOPPED;
\r
9006 // Setup the buffer conversion information structure.
\r
9007 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9009 // Setup thread if necessary.
\r
9010 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9011 // We had already set up an output stream.
\r
9012 stream_.mode = DUPLEX;
\r
9013 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9016 stream_.mode = mode;
\r
9018 // Setup callback thread.
\r
9019 stream_.callbackInfo.object = (void *) this;
\r
9021 // Set the thread attributes for joinable and realtime scheduling
\r
9022 // priority. The higher priority will only take affect if the
\r
9023 // program is run as root or suid.
\r
9024 pthread_attr_t attr;
\r
9025 pthread_attr_init( &attr );
\r
9026 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9027 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9028 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9029 struct sched_param param;
\r
9030 int priority = options->priority;
\r
9031 int min = sched_get_priority_min( SCHED_RR );
\r
9032 int max = sched_get_priority_max( SCHED_RR );
\r
9033 if ( priority < min ) priority = min;
\r
9034 else if ( priority > max ) priority = max;
\r
9035 param.sched_priority = priority;
\r
9036 pthread_attr_setschedparam( &attr, ¶m );
\r
9037 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9040 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9042 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9045 stream_.callbackInfo.isRunning = true;
\r
9046 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9047 pthread_attr_destroy( &attr );
\r
9049 stream_.callbackInfo.isRunning = false;
\r
9050 errorText_ = "RtApiOss::error creating callback thread!";
\r
9059 pthread_cond_destroy( &handle->runnable );
\r
9060 if ( handle->id[0] ) close( handle->id[0] );
\r
9061 if ( handle->id[1] ) close( handle->id[1] );
\r
9063 stream_.apiHandle = 0;
\r
9066 for ( int i=0; i<2; i++ ) {
\r
9067 if ( stream_.userBuffer[i] ) {
\r
9068 free( stream_.userBuffer[i] );
\r
9069 stream_.userBuffer[i] = 0;
\r
9073 if ( stream_.deviceBuffer ) {
\r
9074 free( stream_.deviceBuffer );
\r
9075 stream_.deviceBuffer = 0;
\r
9081 void RtApiOss :: closeStream()
\r
9083 if ( stream_.state == STREAM_CLOSED ) {
\r
9084 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9085 error( RtAudioError::WARNING );
\r
9089 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9090 stream_.callbackInfo.isRunning = false;
\r
9091 MUTEX_LOCK( &stream_.mutex );
\r
9092 if ( stream_.state == STREAM_STOPPED )
\r
9093 pthread_cond_signal( &handle->runnable );
\r
9094 MUTEX_UNLOCK( &stream_.mutex );
\r
9095 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9097 if ( stream_.state == STREAM_RUNNING ) {
\r
9098 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9099 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9101 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9102 stream_.state = STREAM_STOPPED;
\r
9106 pthread_cond_destroy( &handle->runnable );
\r
9107 if ( handle->id[0] ) close( handle->id[0] );
\r
9108 if ( handle->id[1] ) close( handle->id[1] );
\r
9110 stream_.apiHandle = 0;
\r
9113 for ( int i=0; i<2; i++ ) {
\r
9114 if ( stream_.userBuffer[i] ) {
\r
9115 free( stream_.userBuffer[i] );
\r
9116 stream_.userBuffer[i] = 0;
\r
9120 if ( stream_.deviceBuffer ) {
\r
9121 free( stream_.deviceBuffer );
\r
9122 stream_.deviceBuffer = 0;
\r
9125 stream_.mode = UNINITIALIZED;
\r
9126 stream_.state = STREAM_CLOSED;
\r
9129 void RtApiOss :: startStream()
\r
9132 if ( stream_.state == STREAM_RUNNING ) {
\r
9133 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9134 error( RtAudioError::WARNING );
\r
9138 MUTEX_LOCK( &stream_.mutex );
\r
9140 stream_.state = STREAM_RUNNING;
\r
9142 // No need to do anything else here ... OSS automatically starts
\r
9143 // when fed samples.
\r
9145 MUTEX_UNLOCK( &stream_.mutex );
\r
9147 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9148 pthread_cond_signal( &handle->runnable );
\r
9151 void RtApiOss :: stopStream()
\r
9154 if ( stream_.state == STREAM_STOPPED ) {
\r
9155 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9156 error( RtAudioError::WARNING );
\r
9160 MUTEX_LOCK( &stream_.mutex );
\r
9162 // The state might change while waiting on a mutex.
\r
9163 if ( stream_.state == STREAM_STOPPED ) {
\r
9164 MUTEX_UNLOCK( &stream_.mutex );
\r
9169 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9170 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9172 // Flush the output with zeros a few times.
\r
9175 RtAudioFormat format;
\r
9177 if ( stream_.doConvertBuffer[0] ) {
\r
9178 buffer = stream_.deviceBuffer;
\r
9179 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9180 format = stream_.deviceFormat[0];
\r
9183 buffer = stream_.userBuffer[0];
\r
9184 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9185 format = stream_.userFormat;
\r
9188 memset( buffer, 0, samples * formatBytes(format) );
\r
9189 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9190 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9191 if ( result == -1 ) {
\r
9192 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9193 error( RtAudioError::WARNING );
\r
9197 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9198 if ( result == -1 ) {
\r
9199 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9200 errorText_ = errorStream_.str();
\r
9203 handle->triggered = false;
\r
9206 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9207 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9208 if ( result == -1 ) {
\r
9209 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9210 errorText_ = errorStream_.str();
\r
9216 stream_.state = STREAM_STOPPED;
\r
9217 MUTEX_UNLOCK( &stream_.mutex );
\r
9219 if ( result != -1 ) return;
\r
9220 error( RtAudioError::SYSTEM_ERROR );
\r
9223 void RtApiOss :: abortStream()
\r
9226 if ( stream_.state == STREAM_STOPPED ) {
\r
9227 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9228 error( RtAudioError::WARNING );
\r
9232 MUTEX_LOCK( &stream_.mutex );
\r
9234 // The state might change while waiting on a mutex.
\r
9235 if ( stream_.state == STREAM_STOPPED ) {
\r
9236 MUTEX_UNLOCK( &stream_.mutex );
\r
9241 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9242 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9243 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9244 if ( result == -1 ) {
\r
9245 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9246 errorText_ = errorStream_.str();
\r
9249 handle->triggered = false;
\r
9252 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9253 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9254 if ( result == -1 ) {
\r
9255 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9256 errorText_ = errorStream_.str();
\r
9262 stream_.state = STREAM_STOPPED;
\r
9263 MUTEX_UNLOCK( &stream_.mutex );
\r
9265 if ( result != -1 ) return;
\r
9266 error( RtAudioError::SYSTEM_ERROR );
\r
9269 void RtApiOss :: callbackEvent()
\r
9271 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9272 if ( stream_.state == STREAM_STOPPED ) {
\r
9273 MUTEX_LOCK( &stream_.mutex );
\r
9274 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9275 if ( stream_.state != STREAM_RUNNING ) {
\r
9276 MUTEX_UNLOCK( &stream_.mutex );
\r
9279 MUTEX_UNLOCK( &stream_.mutex );
\r
9282 if ( stream_.state == STREAM_CLOSED ) {
\r
9283 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9284 error( RtAudioError::WARNING );
\r
9288 // Invoke user callback to get fresh output data.
\r
9289 int doStopStream = 0;
\r
9290 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9291 double streamTime = getStreamTime();
\r
9292 RtAudioStreamStatus status = 0;
\r
9293 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9294 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9295 handle->xrun[0] = false;
\r
9297 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9298 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9299 handle->xrun[1] = false;
\r
9301 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9302 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9303 if ( doStopStream == 2 ) {
\r
9304 this->abortStream();
\r
9308 MUTEX_LOCK( &stream_.mutex );
\r
9310 // The state might change while waiting on a mutex.
\r
9311 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9316 RtAudioFormat format;
\r
9318 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9320 // Setup parameters and do buffer conversion if necessary.
\r
9321 if ( stream_.doConvertBuffer[0] ) {
\r
9322 buffer = stream_.deviceBuffer;
\r
9323 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9324 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9325 format = stream_.deviceFormat[0];
\r
9328 buffer = stream_.userBuffer[0];
\r
9329 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9330 format = stream_.userFormat;
\r
9333 // Do byte swapping if necessary.
\r
9334 if ( stream_.doByteSwap[0] )
\r
9335 byteSwapBuffer( buffer, samples, format );
\r
9337 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9339 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9340 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9341 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9342 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9343 handle->triggered = true;
\r
9346 // Write samples to device.
\r
9347 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9349 if ( result == -1 ) {
\r
9350 // We'll assume this is an underrun, though there isn't a
\r
9351 // specific means for determining that.
\r
9352 handle->xrun[0] = true;
\r
9353 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9354 error( RtAudioError::WARNING );
\r
9355 // Continue on to input section.
\r
9359 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9361 // Setup parameters.
\r
9362 if ( stream_.doConvertBuffer[1] ) {
\r
9363 buffer = stream_.deviceBuffer;
\r
9364 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9365 format = stream_.deviceFormat[1];
\r
9368 buffer = stream_.userBuffer[1];
\r
9369 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9370 format = stream_.userFormat;
\r
9373 // Read samples from device.
\r
9374 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9376 if ( result == -1 ) {
\r
9377 // We'll assume this is an overrun, though there isn't a
\r
9378 // specific means for determining that.
\r
9379 handle->xrun[1] = true;
\r
9380 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9381 error( RtAudioError::WARNING );
\r
9385 // Do byte swapping if necessary.
\r
9386 if ( stream_.doByteSwap[1] )
\r
9387 byteSwapBuffer( buffer, samples, format );
\r
9389 // Do buffer conversion if necessary.
\r
9390 if ( stream_.doConvertBuffer[1] )
\r
9391 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9395 MUTEX_UNLOCK( &stream_.mutex );
\r
9397 RtApi::tickStreamTime();
\r
9398 if ( doStopStream == 1 ) this->stopStream();
\r
9401 static void *ossCallbackHandler( void *ptr )
\r
9403 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9404 RtApiOss *object = (RtApiOss *) info->object;
\r
9405 bool *isRunning = &info->isRunning;
\r
9407 while ( *isRunning == true ) {
\r
9408 pthread_testcancel();
\r
9409 object->callbackEvent();
\r
9412 pthread_exit( NULL );
\r
9415 //******************** End of __LINUX_OSS__ *********************//
\r
9419 // *************************************************** //
\r
9421 // Protected common (OS-independent) RtAudio methods.
\r
9423 // *************************************************** //
\r
9425 // This method can be modified to control the behavior of error
\r
9426 // message printing.
\r
9427 void RtApi :: error( RtAudioError::Type type )
\r
9429 errorStream_.str(""); // clear the ostringstream
\r
9431 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9432 if ( errorCallback ) {
\r
9433 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9435 if ( firstErrorOccurred_ )
\r
9438 firstErrorOccurred_ = true;
\r
9439 const std::string errorMessage = errorText_;
\r
9441 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9442 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9446 errorCallback( type, errorMessage );
\r
9447 firstErrorOccurred_ = false;
\r
9451 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9452 std::cerr << '\n' << errorText_ << "\n\n";
\r
9453 else if ( type != RtAudioError::WARNING )
\r
9454 throw( RtAudioError( errorText_, type ) );
\r
9457 void RtApi :: verifyStream()
\r
9459 if ( stream_.state == STREAM_CLOSED ) {
\r
9460 errorText_ = "RtApi:: a stream is not open!";
\r
9461 error( RtAudioError::INVALID_USE );
\r
9465 void RtApi :: clearStreamInfo()
\r
9467 stream_.mode = UNINITIALIZED;
\r
9468 stream_.state = STREAM_CLOSED;
\r
9469 stream_.sampleRate = 0;
\r
9470 stream_.bufferSize = 0;
\r
9471 stream_.nBuffers = 0;
\r
9472 stream_.userFormat = 0;
\r
9473 stream_.userInterleaved = true;
\r
9474 stream_.streamTime = 0.0;
\r
9475 stream_.apiHandle = 0;
\r
9476 stream_.deviceBuffer = 0;
\r
9477 stream_.callbackInfo.callback = 0;
\r
9478 stream_.callbackInfo.userData = 0;
\r
9479 stream_.callbackInfo.isRunning = false;
\r
9480 stream_.callbackInfo.errorCallback = 0;
\r
9481 for ( int i=0; i<2; i++ ) {
\r
9482 stream_.device[i] = 11111;
\r
9483 stream_.doConvertBuffer[i] = false;
\r
9484 stream_.deviceInterleaved[i] = true;
\r
9485 stream_.doByteSwap[i] = false;
\r
9486 stream_.nUserChannels[i] = 0;
\r
9487 stream_.nDeviceChannels[i] = 0;
\r
9488 stream_.channelOffset[i] = 0;
\r
9489 stream_.deviceFormat[i] = 0;
\r
9490 stream_.latency[i] = 0;
\r
9491 stream_.userBuffer[i] = 0;
\r
9492 stream_.convertInfo[i].channels = 0;
\r
9493 stream_.convertInfo[i].inJump = 0;
\r
9494 stream_.convertInfo[i].outJump = 0;
\r
9495 stream_.convertInfo[i].inFormat = 0;
\r
9496 stream_.convertInfo[i].outFormat = 0;
\r
9497 stream_.convertInfo[i].inOffset.clear();
\r
9498 stream_.convertInfo[i].outOffset.clear();
\r
9502 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9504 if ( format == RTAUDIO_SINT16 )
\r
9506 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9508 else if ( format == RTAUDIO_FLOAT64 )
\r
9510 else if ( format == RTAUDIO_SINT24 )
\r
9512 else if ( format == RTAUDIO_SINT8 )
\r
9515 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9516 error( RtAudioError::WARNING );
\r
9521 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9523 if ( mode == INPUT ) { // convert device to user buffer
\r
9524 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9525 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9526 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9527 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9529 else { // convert user to device buffer
\r
9530 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9531 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9532 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9533 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9536 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9537 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9539 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9541 // Set up the interleave/deinterleave offsets.
\r
9542 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9543 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9544 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9545 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9546 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9547 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9548 stream_.convertInfo[mode].inJump = 1;
\r
9552 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9553 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9554 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9555 stream_.convertInfo[mode].outJump = 1;
\r
9559 else { // no (de)interleaving
\r
9560 if ( stream_.userInterleaved ) {
\r
9561 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9562 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9563 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9567 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9568 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9569 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9570 stream_.convertInfo[mode].inJump = 1;
\r
9571 stream_.convertInfo[mode].outJump = 1;
\r
9576 // Add channel offset.
\r
9577 if ( firstChannel > 0 ) {
\r
9578 if ( stream_.deviceInterleaved[mode] ) {
\r
9579 if ( mode == OUTPUT ) {
\r
9580 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9581 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9584 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9585 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9589 if ( mode == OUTPUT ) {
\r
9590 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9591 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9594 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9595 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9601 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9603 // This function does format conversion, input/output channel compensation, and
\r
9604 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9605 // the lower three bytes of a 32-bit integer.
\r
9607 // Clear our device buffer when in/out duplex device channels are different
\r
9608 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9609 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9610 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9613 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9615 Float64 *out = (Float64 *)outBuffer;
\r
9617 if (info.inFormat == RTAUDIO_SINT8) {
\r
9618 signed char *in = (signed char *)inBuffer;
\r
9619 scale = 1.0 / 127.5;
\r
9620 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9621 for (j=0; j<info.channels; j++) {
\r
9622 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9623 out[info.outOffset[j]] += 0.5;
\r
9624 out[info.outOffset[j]] *= scale;
\r
9626 in += info.inJump;
\r
9627 out += info.outJump;
\r
9630 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9631 Int16 *in = (Int16 *)inBuffer;
\r
9632 scale = 1.0 / 32767.5;
\r
9633 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9634 for (j=0; j<info.channels; j++) {
\r
9635 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9636 out[info.outOffset[j]] += 0.5;
\r
9637 out[info.outOffset[j]] *= scale;
\r
9639 in += info.inJump;
\r
9640 out += info.outJump;
\r
9643 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9644 Int24 *in = (Int24 *)inBuffer;
\r
9645 scale = 1.0 / 8388607.5;
\r
9646 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9647 for (j=0; j<info.channels; j++) {
\r
9648 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9649 out[info.outOffset[j]] += 0.5;
\r
9650 out[info.outOffset[j]] *= scale;
\r
9652 in += info.inJump;
\r
9653 out += info.outJump;
\r
9656 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9657 Int32 *in = (Int32 *)inBuffer;
\r
9658 scale = 1.0 / 2147483647.5;
\r
9659 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9660 for (j=0; j<info.channels; j++) {
\r
9661 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9662 out[info.outOffset[j]] += 0.5;
\r
9663 out[info.outOffset[j]] *= scale;
\r
9665 in += info.inJump;
\r
9666 out += info.outJump;
\r
9669 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9670 Float32 *in = (Float32 *)inBuffer;
\r
9671 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9672 for (j=0; j<info.channels; j++) {
\r
9673 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9675 in += info.inJump;
\r
9676 out += info.outJump;
\r
9679 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9680 // Channel compensation and/or (de)interleaving only.
\r
9681 Float64 *in = (Float64 *)inBuffer;
\r
9682 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9683 for (j=0; j<info.channels; j++) {
\r
9684 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9686 in += info.inJump;
\r
9687 out += info.outJump;
\r
9691 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9693 Float32 *out = (Float32 *)outBuffer;
\r
9695 if (info.inFormat == RTAUDIO_SINT8) {
\r
9696 signed char *in = (signed char *)inBuffer;
\r
9697 scale = (Float32) ( 1.0 / 127.5 );
\r
9698 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9699 for (j=0; j<info.channels; j++) {
\r
9700 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9701 out[info.outOffset[j]] += 0.5;
\r
9702 out[info.outOffset[j]] *= scale;
\r
9704 in += info.inJump;
\r
9705 out += info.outJump;
\r
9708 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9709 Int16 *in = (Int16 *)inBuffer;
\r
9710 scale = (Float32) ( 1.0 / 32767.5 );
\r
9711 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9712 for (j=0; j<info.channels; j++) {
\r
9713 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9714 out[info.outOffset[j]] += 0.5;
\r
9715 out[info.outOffset[j]] *= scale;
\r
9717 in += info.inJump;
\r
9718 out += info.outJump;
\r
9721 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9722 Int24 *in = (Int24 *)inBuffer;
\r
9723 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9724 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9725 for (j=0; j<info.channels; j++) {
\r
9726 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9727 out[info.outOffset[j]] += 0.5;
\r
9728 out[info.outOffset[j]] *= scale;
\r
9730 in += info.inJump;
\r
9731 out += info.outJump;
\r
9734 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9735 Int32 *in = (Int32 *)inBuffer;
\r
9736 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9737 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9738 for (j=0; j<info.channels; j++) {
\r
9739 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9740 out[info.outOffset[j]] += 0.5;
\r
9741 out[info.outOffset[j]] *= scale;
\r
9743 in += info.inJump;
\r
9744 out += info.outJump;
\r
9747 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9748 // Channel compensation and/or (de)interleaving only.
\r
9749 Float32 *in = (Float32 *)inBuffer;
\r
9750 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9751 for (j=0; j<info.channels; j++) {
\r
9752 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9754 in += info.inJump;
\r
9755 out += info.outJump;
\r
9758 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9759 Float64 *in = (Float64 *)inBuffer;
\r
9760 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9761 for (j=0; j<info.channels; j++) {
\r
9762 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9764 in += info.inJump;
\r
9765 out += info.outJump;
\r
9769 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9770 Int32 *out = (Int32 *)outBuffer;
\r
9771 if (info.inFormat == RTAUDIO_SINT8) {
\r
9772 signed char *in = (signed char *)inBuffer;
\r
9773 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9774 for (j=0; j<info.channels; j++) {
\r
9775 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9776 out[info.outOffset[j]] <<= 24;
\r
9778 in += info.inJump;
\r
9779 out += info.outJump;
\r
9782 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9783 Int16 *in = (Int16 *)inBuffer;
\r
9784 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9785 for (j=0; j<info.channels; j++) {
\r
9786 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9787 out[info.outOffset[j]] <<= 16;
\r
9789 in += info.inJump;
\r
9790 out += info.outJump;
\r
9793 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9794 Int24 *in = (Int24 *)inBuffer;
\r
9795 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9796 for (j=0; j<info.channels; j++) {
\r
9797 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9798 out[info.outOffset[j]] <<= 8;
\r
9800 in += info.inJump;
\r
9801 out += info.outJump;
\r
9804 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9805 // Channel compensation and/or (de)interleaving only.
\r
9806 Int32 *in = (Int32 *)inBuffer;
\r
9807 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9808 for (j=0; j<info.channels; j++) {
\r
9809 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9811 in += info.inJump;
\r
9812 out += info.outJump;
\r
9815 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9816 Float32 *in = (Float32 *)inBuffer;
\r
9817 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9818 for (j=0; j<info.channels; j++) {
\r
9819 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9821 in += info.inJump;
\r
9822 out += info.outJump;
\r
9825 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9826 Float64 *in = (Float64 *)inBuffer;
\r
9827 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9828 for (j=0; j<info.channels; j++) {
\r
9829 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9831 in += info.inJump;
\r
9832 out += info.outJump;
\r
9836 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9837 Int24 *out = (Int24 *)outBuffer;
\r
9838 if (info.inFormat == RTAUDIO_SINT8) {
\r
9839 signed char *in = (signed char *)inBuffer;
\r
9840 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9841 for (j=0; j<info.channels; j++) {
\r
9842 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9843 //out[info.outOffset[j]] <<= 16;
\r
9845 in += info.inJump;
\r
9846 out += info.outJump;
\r
9849 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9850 Int16 *in = (Int16 *)inBuffer;
\r
9851 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9852 for (j=0; j<info.channels; j++) {
\r
9853 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9854 //out[info.outOffset[j]] <<= 8;
\r
9856 in += info.inJump;
\r
9857 out += info.outJump;
\r
9860 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9861 // Channel compensation and/or (de)interleaving only.
\r
9862 Int24 *in = (Int24 *)inBuffer;
\r
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9864 for (j=0; j<info.channels; j++) {
\r
9865 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9867 in += info.inJump;
\r
9868 out += info.outJump;
\r
9871 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9872 Int32 *in = (Int32 *)inBuffer;
\r
9873 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9874 for (j=0; j<info.channels; j++) {
\r
9875 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9876 //out[info.outOffset[j]] >>= 8;
\r
9878 in += info.inJump;
\r
9879 out += info.outJump;
\r
9882 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9883 Float32 *in = (Float32 *)inBuffer;
\r
9884 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9885 for (j=0; j<info.channels; j++) {
\r
9886 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9888 in += info.inJump;
\r
9889 out += info.outJump;
\r
9892 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9893 Float64 *in = (Float64 *)inBuffer;
\r
9894 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9895 for (j=0; j<info.channels; j++) {
\r
9896 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9898 in += info.inJump;
\r
9899 out += info.outJump;
\r
9903 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9904 Int16 *out = (Int16 *)outBuffer;
\r
9905 if (info.inFormat == RTAUDIO_SINT8) {
\r
9906 signed char *in = (signed char *)inBuffer;
\r
9907 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9908 for (j=0; j<info.channels; j++) {
\r
9909 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9910 out[info.outOffset[j]] <<= 8;
\r
9912 in += info.inJump;
\r
9913 out += info.outJump;
\r
9916 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9917 // Channel compensation and/or (de)interleaving only.
\r
9918 Int16 *in = (Int16 *)inBuffer;
\r
9919 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9920 for (j=0; j<info.channels; j++) {
\r
9921 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9923 in += info.inJump;
\r
9924 out += info.outJump;
\r
9927 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9928 Int24 *in = (Int24 *)inBuffer;
\r
9929 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9930 for (j=0; j<info.channels; j++) {
\r
9931 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9933 in += info.inJump;
\r
9934 out += info.outJump;
\r
9937 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9938 Int32 *in = (Int32 *)inBuffer;
\r
9939 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9940 for (j=0; j<info.channels; j++) {
\r
9941 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9943 in += info.inJump;
\r
9944 out += info.outJump;
\r
9947 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9948 Float32 *in = (Float32 *)inBuffer;
\r
9949 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9950 for (j=0; j<info.channels; j++) {
\r
9951 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9953 in += info.inJump;
\r
9954 out += info.outJump;
\r
9957 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9958 Float64 *in = (Float64 *)inBuffer;
\r
9959 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9960 for (j=0; j<info.channels; j++) {
\r
9961 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9963 in += info.inJump;
\r
9964 out += info.outJump;
\r
9968 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9969 signed char *out = (signed char *)outBuffer;
\r
9970 if (info.inFormat == RTAUDIO_SINT8) {
\r
9971 // Channel compensation and/or (de)interleaving only.
\r
9972 signed char *in = (signed char *)inBuffer;
\r
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9974 for (j=0; j<info.channels; j++) {
\r
9975 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9977 in += info.inJump;
\r
9978 out += info.outJump;
\r
9981 if (info.inFormat == RTAUDIO_SINT16) {
\r
9982 Int16 *in = (Int16 *)inBuffer;
\r
9983 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9984 for (j=0; j<info.channels; j++) {
\r
9985 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9987 in += info.inJump;
\r
9988 out += info.outJump;
\r
9991 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9992 Int24 *in = (Int24 *)inBuffer;
\r
9993 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9994 for (j=0; j<info.channels; j++) {
\r
9995 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
9997 in += info.inJump;
\r
9998 out += info.outJump;
\r
10001 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10002 Int32 *in = (Int32 *)inBuffer;
\r
10003 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10004 for (j=0; j<info.channels; j++) {
\r
10005 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10007 in += info.inJump;
\r
10008 out += info.outJump;
\r
10011 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10012 Float32 *in = (Float32 *)inBuffer;
\r
10013 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10014 for (j=0; j<info.channels; j++) {
\r
10015 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10017 in += info.inJump;
\r
10018 out += info.outJump;
\r
10021 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10022 Float64 *in = (Float64 *)inBuffer;
\r
10023 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10024 for (j=0; j<info.channels; j++) {
\r
10025 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10027 in += info.inJump;
\r
10028 out += info.outJump;
\r
10034 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10035 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10036 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10038 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10040 register char val;
\r
10041 register char *ptr;
\r
10044 if ( format == RTAUDIO_SINT16 ) {
\r
10045 for ( unsigned int i=0; i<samples; i++ ) {
\r
10046 // Swap 1st and 2nd bytes.
\r
10048 *(ptr) = *(ptr+1);
\r
10051 // Increment 2 bytes.
\r
10055 else if ( format == RTAUDIO_SINT32 ||
\r
10056 format == RTAUDIO_FLOAT32 ) {
\r
10057 for ( unsigned int i=0; i<samples; i++ ) {
\r
10058 // Swap 1st and 4th bytes.
\r
10060 *(ptr) = *(ptr+3);
\r
10063 // Swap 2nd and 3rd bytes.
\r
10066 *(ptr) = *(ptr+1);
\r
10069 // Increment 3 more bytes.
\r
10073 else if ( format == RTAUDIO_SINT24 ) {
\r
10074 for ( unsigned int i=0; i<samples; i++ ) {
\r
10075 // Swap 1st and 3rd bytes.
\r
10077 *(ptr) = *(ptr+2);
\r
10080 // Increment 2 more bytes.
\r
10084 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10085 for ( unsigned int i=0; i<samples; i++ ) {
\r
10086 // Swap 1st and 8th bytes
\r
10088 *(ptr) = *(ptr+7);
\r
10091 // Swap 2nd and 7th bytes
\r
10094 *(ptr) = *(ptr+5);
\r
10097 // Swap 3rd and 6th bytes
\r
10100 *(ptr) = *(ptr+3);
\r
10103 // Swap 4th and 5th bytes
\r
10106 *(ptr) = *(ptr+1);
\r
10109 // Increment 5 more bytes.
\r
10115 // Indentation settings for Vim and Emacs
\r
10117 // Local Variables:
\r
10118 // c-basic-offset: 2
\r
10119 // indent-tabs-mode: nil
\r
10122 // vim: et sts=2 sw=2
\r