1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
62 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
64 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
65 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
66 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
67 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
69 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
70 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
73 // *************************************************** //
\r
75 // RtAudio definitions.
\r
77 // *************************************************** //
\r
79 std::string RtAudio :: getVersion( void ) throw()
\r
81 return RTAUDIO_VERSION;
\r
84 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
88 // The order here will control the order of RtAudio's API search in
\r
90 #if defined(__UNIX_JACK__)
\r
91 apis.push_back( UNIX_JACK );
\r
93 #if defined(__LINUX_ALSA__)
\r
94 apis.push_back( LINUX_ALSA );
\r
96 #if defined(__LINUX_PULSE__)
\r
97 apis.push_back( LINUX_PULSE );
\r
99 #if defined(__LINUX_OSS__)
\r
100 apis.push_back( LINUX_OSS );
\r
102 #if defined(__WINDOWS_ASIO__)
\r
103 apis.push_back( WINDOWS_ASIO );
\r
105 #if defined(__WINDOWS_WASAPI__)
\r
106 apis.push_back( WINDOWS_WASAPI );
\r
108 #if defined(__WINDOWS_DS__)
\r
109 apis.push_back( WINDOWS_DS );
\r
111 #if defined(__MACOSX_CORE__)
\r
112 apis.push_back( MACOSX_CORE );
\r
114 #if defined(__RTAUDIO_DUMMY__)
\r
115 apis.push_back( RTAUDIO_DUMMY );
\r
119 void RtAudio :: openRtApi( RtAudio::Api api )
\r
125 #if defined(__UNIX_JACK__)
\r
126 if ( api == UNIX_JACK )
\r
127 rtapi_ = new RtApiJack();
\r
129 #if defined(__LINUX_ALSA__)
\r
130 if ( api == LINUX_ALSA )
\r
131 rtapi_ = new RtApiAlsa();
\r
133 #if defined(__LINUX_PULSE__)
\r
134 if ( api == LINUX_PULSE )
\r
135 rtapi_ = new RtApiPulse();
\r
137 #if defined(__LINUX_OSS__)
\r
138 if ( api == LINUX_OSS )
\r
139 rtapi_ = new RtApiOss();
\r
141 #if defined(__WINDOWS_ASIO__)
\r
142 if ( api == WINDOWS_ASIO )
\r
143 rtapi_ = new RtApiAsio();
\r
145 #if defined(__WINDOWS_WASAPI__)
\r
146 if ( api == WINDOWS_WASAPI )
\r
147 rtapi_ = new RtApiWasapi();
\r
149 #if defined(__WINDOWS_DS__)
\r
150 if ( api == WINDOWS_DS )
\r
151 rtapi_ = new RtApiDs();
\r
153 #if defined(__MACOSX_CORE__)
\r
154 if ( api == MACOSX_CORE )
\r
155 rtapi_ = new RtApiCore();
\r
157 #if defined(__RTAUDIO_DUMMY__)
\r
158 if ( api == RTAUDIO_DUMMY )
\r
159 rtapi_ = new RtApiDummy();
\r
163 RtAudio :: RtAudio( RtAudio::Api api )
\r
167 if ( api != UNSPECIFIED ) {
\r
168 // Attempt to open the specified API.
\r
170 if ( rtapi_ ) return;
\r
172 // No compiled support for specified API value. Issue a debug
\r
173 // warning and continue as if no API was specified.
\r
174 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
177 // Iterate through the compiled APIs and return as soon as we find
\r
178 // one with at least one device or we reach the end of the list.
\r
179 std::vector< RtAudio::Api > apis;
\r
180 getCompiledApi( apis );
\r
181 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
182 openRtApi( apis[i] );
\r
183 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
186 if ( rtapi_ ) return;
\r
188 // It should not be possible to get here because the preprocessor
\r
189 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
190 // API-specific definitions are passed to the compiler. But just in
\r
191 // case something weird happens, we'll thow an error.
\r
192 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
193 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
196 RtAudio :: ~RtAudio() throw()
\r
202 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
203 RtAudio::StreamParameters *inputParameters,
\r
204 RtAudioFormat format, unsigned int sampleRate,
\r
205 unsigned int *bufferFrames,
\r
206 RtAudioCallback callback, void *userData,
\r
207 RtAudio::StreamOptions *options,
\r
208 RtAudioErrorCallback errorCallback )
\r
210 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
211 sampleRate, bufferFrames, callback,
\r
212 userData, options, errorCallback );
\r
215 // *************************************************** //
\r
217 // Public RtApi definitions (see end of file for
\r
218 // private or protected utility functions).
\r
220 // *************************************************** //
\r
224 stream_.state = STREAM_CLOSED;
\r
225 stream_.mode = UNINITIALIZED;
\r
226 stream_.apiHandle = 0;
\r
227 stream_.userBuffer[0] = 0;
\r
228 stream_.userBuffer[1] = 0;
\r
229 MUTEX_INITIALIZE( &stream_.mutex );
\r
230 showWarnings_ = true;
\r
231 firstErrorOccurred_ = false;
\r
236 MUTEX_DESTROY( &stream_.mutex );
\r
239 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
240 RtAudio::StreamParameters *iParams,
\r
241 RtAudioFormat format, unsigned int sampleRate,
\r
242 unsigned int *bufferFrames,
\r
243 RtAudioCallback callback, void *userData,
\r
244 RtAudio::StreamOptions *options,
\r
245 RtAudioErrorCallback errorCallback )
\r
247 if ( stream_.state != STREAM_CLOSED ) {
\r
248 errorText_ = "RtApi::openStream: a stream is already open!";
\r
249 error( RtAudioError::INVALID_USE );
\r
253 // Clear stream information potentially left from a previously open stream.
\r
256 if ( oParams && oParams->nChannels < 1 ) {
\r
257 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
258 error( RtAudioError::INVALID_USE );
\r
262 if ( iParams && iParams->nChannels < 1 ) {
\r
263 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 if ( oParams == NULL && iParams == NULL ) {
\r
269 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
270 error( RtAudioError::INVALID_USE );
\r
274 if ( formatBytes(format) == 0 ) {
\r
275 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
276 error( RtAudioError::INVALID_USE );
\r
280 unsigned int nDevices = getDeviceCount();
\r
281 unsigned int oChannels = 0;
\r
283 oChannels = oParams->nChannels;
\r
284 if ( oParams->deviceId >= nDevices ) {
\r
285 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
286 error( RtAudioError::INVALID_USE );
\r
291 unsigned int iChannels = 0;
\r
293 iChannels = iParams->nChannels;
\r
294 if ( iParams->deviceId >= nDevices ) {
\r
295 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
296 error( RtAudioError::INVALID_USE );
\r
303 if ( oChannels > 0 ) {
\r
305 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
306 sampleRate, format, bufferFrames, options );
\r
307 if ( result == false ) {
\r
308 error( RtAudioError::SYSTEM_ERROR );
\r
313 if ( iChannels > 0 ) {
\r
315 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
316 sampleRate, format, bufferFrames, options );
\r
317 if ( result == false ) {
\r
318 if ( oChannels > 0 ) closeStream();
\r
319 error( RtAudioError::SYSTEM_ERROR );
\r
324 stream_.callbackInfo.callback = (void *) callback;
\r
325 stream_.callbackInfo.userData = userData;
\r
326 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
328 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
329 stream_.state = STREAM_STOPPED;
\r
332 unsigned int RtApi :: getDefaultInputDevice( void )
\r
334 // Should be implemented in subclasses if possible.
\r
338 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
340 // Should be implemented in subclasses if possible.
\r
344 void RtApi :: closeStream( void )
\r
346 // MUST be implemented in subclasses!
\r
350 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
351 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
352 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
353 RtAudio::StreamOptions * /*options*/ )
\r
355 // MUST be implemented in subclasses!
\r
359 void RtApi :: tickStreamTime( void )
\r
361 // Subclasses that do not provide their own implementation of
\r
362 // getStreamTime should call this function once per buffer I/O to
\r
363 // provide basic stream time support.
\r
365 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
367 #if defined( HAVE_GETTIMEOFDAY )
\r
368 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
372 long RtApi :: getStreamLatency( void )
\r
376 long totalLatency = 0;
\r
377 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
378 totalLatency = stream_.latency[0];
\r
379 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
380 totalLatency += stream_.latency[1];
\r
382 return totalLatency;
\r
385 double RtApi :: getStreamTime( void )
\r
389 #if defined( HAVE_GETTIMEOFDAY )
\r
390 // Return a very accurate estimate of the stream time by
\r
391 // adding in the elapsed time since the last tick.
\r
392 struct timeval then;
\r
393 struct timeval now;
\r
395 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
396 return stream_.streamTime;
\r
398 gettimeofday( &now, NULL );
\r
399 then = stream_.lastTickTimestamp;
\r
400 return stream_.streamTime +
\r
401 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
402 (then.tv_sec + 0.000001 * then.tv_usec));
\r
404 return stream_.streamTime;
\r
408 void RtApi :: setStreamTime( double time )
\r
413 stream_.streamTime = time;
\r
416 unsigned int RtApi :: getStreamSampleRate( void )
\r
420 return stream_.sampleRate;
\r
424 // *************************************************** //
\r
426 // OS/API-specific methods.
\r
428 // *************************************************** //
\r
430 #if defined(__MACOSX_CORE__)
\r
432 // The OS X CoreAudio API is designed to use a separate callback
\r
433 // procedure for each of its audio devices. A single RtAudio duplex
\r
434 // stream using two different devices is supported here, though it
\r
435 // cannot be guaranteed to always behave correctly because we cannot
\r
436 // synchronize these two callbacks.
\r
438 // A property listener is installed for over/underrun information.
\r
439 // However, no functionality is currently provided to allow property
\r
440 // listeners to trigger user handlers because it is unclear what could
\r
441 // be done if a critical stream parameter (buffer size, sample rate,
\r
442 // device disconnect) notification arrived. The listeners entail
\r
443 // quite a bit of extra code and most likely, a user program wouldn't
\r
444 // be prepared for the result anyway. However, we do provide a flag
\r
445 // to the client callback function to inform of an over/underrun.
\r
447 // A structure to hold various information related to the CoreAudio API
\r
449 struct CoreHandle {
\r
450 AudioDeviceID id[2]; // device ids
\r
451 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
452 AudioDeviceIOProcID procId[2];
\r
454 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
455 UInt32 nStreams[2]; // number of streams to use
\r
457 char *deviceBuffer;
\r
458 pthread_cond_t condition;
\r
459 int drainCounter; // Tracks callback counts when draining
\r
460 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
463 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
466 RtApiCore:: RtApiCore()
\r
468 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
469 // This is a largely undocumented but absolutely necessary
\r
470 // requirement starting with OS-X 10.6. If not called, queries and
\r
471 // updates to various audio device properties are not handled
\r
473 CFRunLoopRef theRunLoop = NULL;
\r
474 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
475 kAudioObjectPropertyScopeGlobal,
\r
476 kAudioObjectPropertyElementMaster };
\r
477 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
478 if ( result != noErr ) {
\r
479 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
480 error( RtAudioError::WARNING );
\r
485 RtApiCore :: ~RtApiCore()
\r
487 // The subclass destructor gets called before the base class
\r
488 // destructor, so close an existing stream before deallocating
\r
489 // apiDeviceId memory.
\r
490 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
493 unsigned int RtApiCore :: getDeviceCount( void )
\r
495 // Find out how many audio devices there are, if any.
\r
497 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
498 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
499 if ( result != noErr ) {
\r
500 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
501 error( RtAudioError::WARNING );
\r
505 return dataSize / sizeof( AudioDeviceID );
\r
508 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
510 unsigned int nDevices = getDeviceCount();
\r
511 if ( nDevices <= 1 ) return 0;
\r
514 UInt32 dataSize = sizeof( AudioDeviceID );
\r
515 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
516 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
517 if ( result != noErr ) {
\r
518 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
519 error( RtAudioError::WARNING );
\r
523 dataSize *= nDevices;
\r
524 AudioDeviceID deviceList[ nDevices ];
\r
525 property.mSelector = kAudioHardwarePropertyDevices;
\r
526 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
527 if ( result != noErr ) {
\r
528 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
529 error( RtAudioError::WARNING );
\r
533 for ( unsigned int i=0; i<nDevices; i++ )
\r
534 if ( id == deviceList[i] ) return i;
\r
536 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
537 error( RtAudioError::WARNING );
\r
541 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
543 unsigned int nDevices = getDeviceCount();
\r
544 if ( nDevices <= 1 ) return 0;
\r
547 UInt32 dataSize = sizeof( AudioDeviceID );
\r
548 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
549 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
550 if ( result != noErr ) {
\r
551 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
552 error( RtAudioError::WARNING );
\r
556 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
557 AudioDeviceID deviceList[ nDevices ];
\r
558 property.mSelector = kAudioHardwarePropertyDevices;
\r
559 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
560 if ( result != noErr ) {
\r
561 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
562 error( RtAudioError::WARNING );
\r
566 for ( unsigned int i=0; i<nDevices; i++ )
\r
567 if ( id == deviceList[i] ) return i;
\r
569 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
570 error( RtAudioError::WARNING );
\r
574 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
576 RtAudio::DeviceInfo info;
\r
577 info.probed = false;
\r
580 unsigned int nDevices = getDeviceCount();
\r
581 if ( nDevices == 0 ) {
\r
582 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
583 error( RtAudioError::INVALID_USE );
\r
587 if ( device >= nDevices ) {
\r
588 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
589 error( RtAudioError::INVALID_USE );
\r
593 AudioDeviceID deviceList[ nDevices ];
\r
594 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
595 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
596 kAudioObjectPropertyScopeGlobal,
\r
597 kAudioObjectPropertyElementMaster };
\r
598 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
599 0, NULL, &dataSize, (void *) &deviceList );
\r
600 if ( result != noErr ) {
\r
601 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
602 error( RtAudioError::WARNING );
\r
606 AudioDeviceID id = deviceList[ device ];
\r
608 // Get the device name.
\r
610 CFStringRef cfname;
\r
611 dataSize = sizeof( CFStringRef );
\r
612 property.mSelector = kAudioObjectPropertyManufacturer;
\r
613 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
614 if ( result != noErr ) {
\r
615 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
616 errorText_ = errorStream_.str();
\r
617 error( RtAudioError::WARNING );
\r
621 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
622 int length = CFStringGetLength(cfname);
\r
623 char *mname = (char *)malloc(length * 3 + 1);
\r
624 #if defined( UNICODE ) || defined( _UNICODE )
\r
625 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
627 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
629 info.name.append( (const char *)mname, strlen(mname) );
\r
630 info.name.append( ": " );
\r
631 CFRelease( cfname );
\r
634 property.mSelector = kAudioObjectPropertyName;
\r
635 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
636 if ( result != noErr ) {
\r
637 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
638 errorText_ = errorStream_.str();
\r
639 error( RtAudioError::WARNING );
\r
643 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
644 length = CFStringGetLength(cfname);
\r
645 char *name = (char *)malloc(length * 3 + 1);
\r
646 #if defined( UNICODE ) || defined( _UNICODE )
\r
647 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
649 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
651 info.name.append( (const char *)name, strlen(name) );
\r
652 CFRelease( cfname );
\r
655 // Get the output stream "configuration".
\r
656 AudioBufferList *bufferList = nil;
\r
657 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
658 property.mScope = kAudioDevicePropertyScopeOutput;
\r
659 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
661 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
662 if ( result != noErr || dataSize == 0 ) {
\r
663 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
664 errorText_ = errorStream_.str();
\r
665 error( RtAudioError::WARNING );
\r
669 // Allocate the AudioBufferList.
\r
670 bufferList = (AudioBufferList *) malloc( dataSize );
\r
671 if ( bufferList == NULL ) {
\r
672 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
673 error( RtAudioError::WARNING );
\r
677 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 free( bufferList );
\r
680 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
681 errorText_ = errorStream_.str();
\r
682 error( RtAudioError::WARNING );
\r
686 // Get output channel information.
\r
687 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
688 for ( i=0; i<nStreams; i++ )
\r
689 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
690 free( bufferList );
\r
692 // Get the input stream "configuration".
\r
693 property.mScope = kAudioDevicePropertyScopeInput;
\r
694 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
695 if ( result != noErr || dataSize == 0 ) {
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Allocate the AudioBufferList.
\r
703 bufferList = (AudioBufferList *) malloc( dataSize );
\r
704 if ( bufferList == NULL ) {
\r
705 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
706 error( RtAudioError::WARNING );
\r
710 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
711 if (result != noErr || dataSize == 0) {
\r
712 free( bufferList );
\r
713 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
714 errorText_ = errorStream_.str();
\r
715 error( RtAudioError::WARNING );
\r
719 // Get input channel information.
\r
720 nStreams = bufferList->mNumberBuffers;
\r
721 for ( i=0; i<nStreams; i++ )
\r
722 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
723 free( bufferList );
\r
725 // If device opens for both playback and capture, we determine the channels.
\r
726 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
727 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
729 // Probe the device sample rates.
\r
730 bool isInput = false;
\r
731 if ( info.outputChannels == 0 ) isInput = true;
\r
733 // Determine the supported sample rates.
\r
734 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
735 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
736 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
737 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
738 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
739 errorText_ = errorStream_.str();
\r
740 error( RtAudioError::WARNING );
\r
744 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
745 AudioValueRange rangeList[ nRanges ];
\r
746 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
747 if ( result != kAudioHardwareNoError ) {
\r
748 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
749 errorText_ = errorStream_.str();
\r
750 error( RtAudioError::WARNING );
\r
754 // The sample rate reporting mechanism is a bit of a mystery. It
\r
755 // seems that it can either return individual rates or a range of
\r
756 // rates. I assume that if the min / max range values are the same,
\r
757 // then that represents a single supported rate and if the min / max
\r
758 // range values are different, the device supports an arbitrary
\r
759 // range of values (though there might be multiple ranges, so we'll
\r
760 // use the most conservative range).
\r
761 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
762 bool haveValueRange = false;
\r
763 info.sampleRates.clear();
\r
764 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
765 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
766 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
768 haveValueRange = true;
\r
769 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
770 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
774 if ( haveValueRange ) {
\r
775 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
776 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
777 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
781 // Sort and remove any redundant values
\r
782 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
783 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
785 if ( info.sampleRates.size() == 0 ) {
\r
786 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
787 errorText_ = errorStream_.str();
\r
788 error( RtAudioError::WARNING );
\r
792 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
793 // Thus, any other "physical" formats supported by the device are of
\r
794 // no interest to the client.
\r
795 info.nativeFormats = RTAUDIO_FLOAT32;
\r
797 if ( info.outputChannels > 0 )
\r
798 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
799 if ( info.inputChannels > 0 )
\r
800 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
802 info.probed = true;
\r
806 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
807 const AudioTimeStamp* /*inNow*/,
\r
808 const AudioBufferList* inInputData,
\r
809 const AudioTimeStamp* /*inInputTime*/,
\r
810 AudioBufferList* outOutputData,
\r
811 const AudioTimeStamp* /*inOutputTime*/,
\r
812 void* infoPointer )
\r
814 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
816 RtApiCore *object = (RtApiCore *) info->object;
\r
817 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
818 return kAudioHardwareUnspecifiedError;
\r
820 return kAudioHardwareNoError;
\r
823 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
825 const AudioObjectPropertyAddress properties[],
\r
826 void* handlePointer )
\r
828 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
829 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
830 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
831 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
832 handle->xrun[1] = true;
\r
834 handle->xrun[0] = true;
\r
838 return kAudioHardwareNoError;
\r
841 static OSStatus rateListener( AudioObjectID inDevice,
\r
842 UInt32 /*nAddresses*/,
\r
843 const AudioObjectPropertyAddress /*properties*/[],
\r
844 void* ratePointer )
\r
846 Float64 *rate = (Float64 *) ratePointer;
\r
847 UInt32 dataSize = sizeof( Float64 );
\r
848 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
849 kAudioObjectPropertyScopeGlobal,
\r
850 kAudioObjectPropertyElementMaster };
\r
851 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
852 return kAudioHardwareNoError;
\r
855 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
856 unsigned int firstChannel, unsigned int sampleRate,
\r
857 RtAudioFormat format, unsigned int *bufferSize,
\r
858 RtAudio::StreamOptions *options )
\r
861 unsigned int nDevices = getDeviceCount();
\r
862 if ( nDevices == 0 ) {
\r
863 // This should not happen because a check is made before this function is called.
\r
864 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
868 if ( device >= nDevices ) {
\r
869 // This should not happen because a check is made before this function is called.
\r
870 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
874 AudioDeviceID deviceList[ nDevices ];
\r
875 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
876 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
877 kAudioObjectPropertyScopeGlobal,
\r
878 kAudioObjectPropertyElementMaster };
\r
879 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
880 0, NULL, &dataSize, (void *) &deviceList );
\r
881 if ( result != noErr ) {
\r
882 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
886 AudioDeviceID id = deviceList[ device ];
\r
888 // Setup for stream mode.
\r
889 bool isInput = false;
\r
890 if ( mode == INPUT ) {
\r
892 property.mScope = kAudioDevicePropertyScopeInput;
\r
895 property.mScope = kAudioDevicePropertyScopeOutput;
\r
897 // Get the stream "configuration".
\r
898 AudioBufferList *bufferList = nil;
\r
900 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
901 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
902 if ( result != noErr || dataSize == 0 ) {
\r
903 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
904 errorText_ = errorStream_.str();
\r
908 // Allocate the AudioBufferList.
\r
909 bufferList = (AudioBufferList *) malloc( dataSize );
\r
910 if ( bufferList == NULL ) {
\r
911 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
915 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
916 if (result != noErr || dataSize == 0) {
\r
917 free( bufferList );
\r
918 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
919 errorText_ = errorStream_.str();
\r
923 // Search for one or more streams that contain the desired number of
\r
924 // channels. CoreAudio devices can have an arbitrary number of
\r
925 // streams and each stream can have an arbitrary number of channels.
\r
926 // For each stream, a single buffer of interleaved samples is
\r
927 // provided. RtAudio prefers the use of one stream of interleaved
\r
928 // data or multiple consecutive single-channel streams. However, we
\r
929 // now support multiple consecutive multi-channel streams of
\r
930 // interleaved data as well.
\r
931 UInt32 iStream, offsetCounter = firstChannel;
\r
932 UInt32 nStreams = bufferList->mNumberBuffers;
\r
933 bool monoMode = false;
\r
934 bool foundStream = false;
\r
936 // First check that the device supports the requested number of
\r
938 UInt32 deviceChannels = 0;
\r
939 for ( iStream=0; iStream<nStreams; iStream++ )
\r
940 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
942 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
943 free( bufferList );
\r
944 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
945 errorText_ = errorStream_.str();
\r
949 // Look for a single stream meeting our needs.
\r
950 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
951 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
952 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
953 if ( streamChannels >= channels + offsetCounter ) {
\r
954 firstStream = iStream;
\r
955 channelOffset = offsetCounter;
\r
956 foundStream = true;
\r
959 if ( streamChannels > offsetCounter ) break;
\r
960 offsetCounter -= streamChannels;
\r
963 // If we didn't find a single stream above, then we should be able
\r
964 // to meet the channel specification with multiple streams.
\r
965 if ( foundStream == false ) {
\r
967 offsetCounter = firstChannel;
\r
968 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
969 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
970 if ( streamChannels > offsetCounter ) break;
\r
971 offsetCounter -= streamChannels;
\r
974 firstStream = iStream;
\r
975 channelOffset = offsetCounter;
\r
976 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
978 if ( streamChannels > 1 ) monoMode = false;
\r
979 while ( channelCounter > 0 ) {
\r
980 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
981 if ( streamChannels > 1 ) monoMode = false;
\r
982 channelCounter -= streamChannels;
\r
987 free( bufferList );
\r
989 // Determine the buffer size.
\r
990 AudioValueRange bufferRange;
\r
991 dataSize = sizeof( AudioValueRange );
\r
992 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
993 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
995 if ( result != noErr ) {
\r
996 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
997 errorText_ = errorStream_.str();
\r
1001 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1002 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1003 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1005 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1006 // need to make this setting for the master channel.
\r
1007 UInt32 theSize = (UInt32) *bufferSize;
\r
1008 dataSize = sizeof( UInt32 );
\r
1009 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1010 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1012 if ( result != noErr ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1019 // MUST be the same in both directions!
\r
1020 *bufferSize = theSize;
\r
1021 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1022 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1023 errorText_ = errorStream_.str();
\r
1027 stream_.bufferSize = *bufferSize;
\r
1028 stream_.nBuffers = 1;
\r
1030 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1031 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1033 dataSize = sizeof( hog_pid );
\r
1034 property.mSelector = kAudioDevicePropertyHogMode;
\r
1035 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1042 if ( hog_pid != getpid() ) {
\r
1043 hog_pid = getpid();
\r
1044 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1045 if ( result != noErr ) {
\r
1046 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1047 errorText_ = errorStream_.str();
\r
1053 // Check and if necessary, change the sample rate for the device.
\r
1054 Float64 nominalRate;
\r
1055 dataSize = sizeof( Float64 );
\r
1056 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1057 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1058 if ( result != noErr ) {
\r
1059 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1060 errorText_ = errorStream_.str();
\r
1064 // Only change the sample rate if off by more than 1 Hz.
\r
1065 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1067 // Set a property listener for the sample rate change
\r
1068 Float64 reportedRate = 0.0;
\r
1069 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1070 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1071 if ( result != noErr ) {
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 nominalRate = (Float64) sampleRate;
\r
1078 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1079 if ( result != noErr ) {
\r
1080 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1082 errorText_ = errorStream_.str();
\r
1086 // Now wait until the reported nominal rate is what we just set.
\r
1087 UInt32 microCounter = 0;
\r
1088 while ( reportedRate != nominalRate ) {
\r
1089 microCounter += 5000;
\r
1090 if ( microCounter > 5000000 ) break;
\r
1094 // Remove the property listener.
\r
1095 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1097 if ( microCounter > 5000000 ) {
\r
1098 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1099 errorText_ = errorStream_.str();
\r
1104 // Now set the stream format for all streams. Also, check the
\r
1105 // physical format of the device and change that if necessary.
\r
1106 AudioStreamBasicDescription description;
\r
1107 dataSize = sizeof( AudioStreamBasicDescription );
\r
1108 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1109 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1110 if ( result != noErr ) {
\r
1111 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1112 errorText_ = errorStream_.str();
\r
1116 // Set the sample rate and data format id. However, only make the
\r
1117 // change if the sample rate is not within 1.0 of the desired
\r
1118 // rate and the format is not linear pcm.
\r
1119 bool updateFormat = false;
\r
1120 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1121 description.mSampleRate = (Float64) sampleRate;
\r
1122 updateFormat = true;
\r
1125 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1126 description.mFormatID = kAudioFormatLinearPCM;
\r
1127 updateFormat = true;
\r
1130 if ( updateFormat ) {
\r
1131 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1132 if ( result != noErr ) {
\r
1133 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1134 errorText_ = errorStream_.str();
\r
1139 // Now check the physical format.
\r
1140 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1141 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1142 if ( result != noErr ) {
\r
1143 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1144 errorText_ = errorStream_.str();
\r
1148 //std::cout << "Current physical stream format:" << std::endl;
\r
1149 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1150 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1151 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1152 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1154 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1155 description.mFormatID = kAudioFormatLinearPCM;
\r
1156 //description.mSampleRate = (Float64) sampleRate;
\r
1157 AudioStreamBasicDescription testDescription = description;
\r
1158 UInt32 formatFlags;
\r
1160 // We'll try higher bit rates first and then work our way down.
\r
1161 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1164 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1165 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1166 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1167 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1168 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1169 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1170 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1171 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1172 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1173 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1175 bool setPhysicalFormat = false;
\r
1176 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1177 testDescription = description;
\r
1178 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1179 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1180 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1181 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1183 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1184 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1185 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1186 if ( result == noErr ) {
\r
1187 setPhysicalFormat = true;
\r
1188 //std::cout << "Updated physical stream format:" << std::endl;
\r
1189 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1190 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1191 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1192 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1197 if ( !setPhysicalFormat ) {
\r
1198 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1199 errorText_ = errorStream_.str();
\r
1202 } // done setting virtual/physical formats.
\r
1204 // Get the stream / device latency.
\r
1206 dataSize = sizeof( UInt32 );
\r
1207 property.mSelector = kAudioDevicePropertyLatency;
\r
1208 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1209 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1210 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1212 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1213 errorText_ = errorStream_.str();
\r
1214 error( RtAudioError::WARNING );
\r
1218 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1219 // always be presented in native-endian format, so we should never
\r
1220 // need to byte swap.
\r
1221 stream_.doByteSwap[mode] = false;
\r
1223 // From the CoreAudio documentation, PCM data must be supplied as
\r
1225 stream_.userFormat = format;
\r
1226 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1228 if ( streamCount == 1 )
\r
1229 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1230 else // multiple streams
\r
1231 stream_.nDeviceChannels[mode] = channels;
\r
1232 stream_.nUserChannels[mode] = channels;
\r
1233 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1234 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1235 else stream_.userInterleaved = true;
\r
1236 stream_.deviceInterleaved[mode] = true;
\r
1237 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1239 // Set flags for buffer conversion.
\r
1240 stream_.doConvertBuffer[mode] = false;
\r
1241 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1243 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1244 stream_.doConvertBuffer[mode] = true;
\r
1245 if ( streamCount == 1 ) {
\r
1246 if ( stream_.nUserChannels[mode] > 1 &&
\r
1247 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1248 stream_.doConvertBuffer[mode] = true;
\r
1250 else if ( monoMode && stream_.userInterleaved )
\r
1251 stream_.doConvertBuffer[mode] = true;
\r
1253 // Allocate our CoreHandle structure for the stream.
\r
1254 CoreHandle *handle = 0;
\r
1255 if ( stream_.apiHandle == 0 ) {
\r
1257 handle = new CoreHandle;
\r
1259 catch ( std::bad_alloc& ) {
\r
1260 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1264 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1265 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1268 stream_.apiHandle = (void *) handle;
\r
1271 handle = (CoreHandle *) stream_.apiHandle;
\r
1272 handle->iStream[mode] = firstStream;
\r
1273 handle->nStreams[mode] = streamCount;
\r
1274 handle->id[mode] = id;
\r
1276 // Allocate necessary internal buffers.
\r
1277 unsigned long bufferBytes;
\r
1278 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1279 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1280 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1281 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1282 if ( stream_.userBuffer[mode] == NULL ) {
\r
1283 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1287 // If possible, we will make use of the CoreAudio stream buffers as
\r
1288 // "device buffers". However, we can't do this if using multiple
\r
1290 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1292 bool makeBuffer = true;
\r
1293 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1294 if ( mode == INPUT ) {
\r
1295 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1296 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1297 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1301 if ( makeBuffer ) {
\r
1302 bufferBytes *= *bufferSize;
\r
1303 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1304 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1305 if ( stream_.deviceBuffer == NULL ) {
\r
1306 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1312 stream_.sampleRate = sampleRate;
\r
1313 stream_.device[mode] = device;
\r
1314 stream_.state = STREAM_STOPPED;
\r
1315 stream_.callbackInfo.object = (void *) this;
\r
1317 // Setup the buffer conversion information structure.
\r
1318 if ( stream_.doConvertBuffer[mode] ) {
\r
1319 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1320 else setConvertInfo( mode, channelOffset );
\r
1323 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1324 // Only one callback procedure per device.
\r
1325 stream_.mode = DUPLEX;
\r
1327 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1328 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1330 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1331 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1333 if ( result != noErr ) {
\r
1334 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1335 errorText_ = errorStream_.str();
\r
1338 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1339 stream_.mode = DUPLEX;
\r
1341 stream_.mode = mode;
\r
1344 // Setup the device property listener for over/underload.
\r
1345 property.mSelector = kAudioDeviceProcessorOverload;
\r
1346 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1347 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1353 pthread_cond_destroy( &handle->condition );
\r
1355 stream_.apiHandle = 0;
\r
1358 for ( int i=0; i<2; i++ ) {
\r
1359 if ( stream_.userBuffer[i] ) {
\r
1360 free( stream_.userBuffer[i] );
\r
1361 stream_.userBuffer[i] = 0;
\r
1365 if ( stream_.deviceBuffer ) {
\r
1366 free( stream_.deviceBuffer );
\r
1367 stream_.deviceBuffer = 0;
\r
1370 stream_.state = STREAM_CLOSED;
\r
1374 void RtApiCore :: closeStream( void )
\r
1376 if ( stream_.state == STREAM_CLOSED ) {
\r
1377 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1378 error( RtAudioError::WARNING );
\r
1382 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1383 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1384 if ( stream_.state == STREAM_RUNNING )
\r
1385 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1386 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1387 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1389 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1390 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1394 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1395 if ( stream_.state == STREAM_RUNNING )
\r
1396 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1397 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1398 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1400 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1401 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1405 for ( int i=0; i<2; i++ ) {
\r
1406 if ( stream_.userBuffer[i] ) {
\r
1407 free( stream_.userBuffer[i] );
\r
1408 stream_.userBuffer[i] = 0;
\r
1412 if ( stream_.deviceBuffer ) {
\r
1413 free( stream_.deviceBuffer );
\r
1414 stream_.deviceBuffer = 0;
\r
1417 // Destroy pthread condition variable.
\r
1418 pthread_cond_destroy( &handle->condition );
\r
1420 stream_.apiHandle = 0;
\r
1422 stream_.mode = UNINITIALIZED;
\r
1423 stream_.state = STREAM_CLOSED;
\r
1426 void RtApiCore :: startStream( void )
\r
1429 if ( stream_.state == STREAM_RUNNING ) {
\r
1430 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1431 error( RtAudioError::WARNING );
\r
1435 OSStatus result = noErr;
\r
1436 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1437 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1439 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1440 if ( result != noErr ) {
\r
1441 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1442 errorText_ = errorStream_.str();
\r
1447 if ( stream_.mode == INPUT ||
\r
1448 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1450 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1451 if ( result != noErr ) {
\r
1452 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1453 errorText_ = errorStream_.str();
\r
1458 handle->drainCounter = 0;
\r
1459 handle->internalDrain = false;
\r
1460 stream_.state = STREAM_RUNNING;
\r
1463 if ( result == noErr ) return;
\r
1464 error( RtAudioError::SYSTEM_ERROR );
\r
1467 void RtApiCore :: stopStream( void )
\r
1470 if ( stream_.state == STREAM_STOPPED ) {
\r
1471 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1472 error( RtAudioError::WARNING );
\r
1476 OSStatus result = noErr;
\r
1477 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1478 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1480 if ( handle->drainCounter == 0 ) {
\r
1481 handle->drainCounter = 2;
\r
1482 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1485 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1486 if ( result != noErr ) {
\r
1487 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1488 errorText_ = errorStream_.str();
\r
1493 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1495 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1496 if ( result != noErr ) {
\r
1497 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1498 errorText_ = errorStream_.str();
\r
1503 stream_.state = STREAM_STOPPED;
\r
1506 if ( result == noErr ) return;
\r
1507 error( RtAudioError::SYSTEM_ERROR );
\r
1510 void RtApiCore :: abortStream( void )
\r
1513 if ( stream_.state == STREAM_STOPPED ) {
\r
1514 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1515 error( RtAudioError::WARNING );
\r
1519 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1520 handle->drainCounter = 2;
\r
1525 // This function will be called by a spawned thread when the user
\r
1526 // callback function signals that the stream should be stopped or
\r
1527 // aborted. It is better to handle it this way because the
\r
1528 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1529 // function is called.
\r
1530 static void *coreStopStream( void *ptr )
\r
1532 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1533 RtApiCore *object = (RtApiCore *) info->object;
\r
1535 object->stopStream();
\r
1536 pthread_exit( NULL );
\r
1539 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1540 const AudioBufferList *inBufferList,
\r
1541 const AudioBufferList *outBufferList )
\r
1543 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1544 if ( stream_.state == STREAM_CLOSED ) {
\r
1545 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1546 error( RtAudioError::WARNING );
\r
1550 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1551 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1553 // Check if we were draining the stream and signal is finished.
\r
1554 if ( handle->drainCounter > 3 ) {
\r
1555 ThreadHandle threadId;
\r
1557 stream_.state = STREAM_STOPPING;
\r
1558 if ( handle->internalDrain == true )
\r
1559 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1560 else // external call to stopStream()
\r
1561 pthread_cond_signal( &handle->condition );
\r
1565 AudioDeviceID outputDevice = handle->id[0];
\r
1567 // Invoke user callback to get fresh output data UNLESS we are
\r
1568 // draining stream or duplex mode AND the input/output devices are
\r
1569 // different AND this function is called for the input device.
\r
1570 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1571 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1572 double streamTime = getStreamTime();
\r
1573 RtAudioStreamStatus status = 0;
\r
1574 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1575 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1576 handle->xrun[0] = false;
\r
1578 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1579 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1580 handle->xrun[1] = false;
\r
1583 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1584 stream_.bufferSize, streamTime, status, info->userData );
\r
1585 if ( cbReturnValue == 2 ) {
\r
1586 stream_.state = STREAM_STOPPING;
\r
1587 handle->drainCounter = 2;
\r
1591 else if ( cbReturnValue == 1 ) {
\r
1592 handle->drainCounter = 1;
\r
1593 handle->internalDrain = true;
\r
1597 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1599 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1601 if ( handle->nStreams[0] == 1 ) {
\r
1602 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1604 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1606 else { // fill multiple streams with zeros
\r
1607 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1608 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1610 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1614 else if ( handle->nStreams[0] == 1 ) {
\r
1615 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1616 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1617 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1619 else { // copy from user buffer
\r
1620 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1621 stream_.userBuffer[0],
\r
1622 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1625 else { // fill multiple streams
\r
1626 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1627 if ( stream_.doConvertBuffer[0] ) {
\r
1628 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1629 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1632 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1633 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1634 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1635 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1636 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1639 else { // fill multiple multi-channel streams with interleaved data
\r
1640 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1641 Float32 *out, *in;
\r
1643 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1644 UInt32 inChannels = stream_.nUserChannels[0];
\r
1645 if ( stream_.doConvertBuffer[0] ) {
\r
1646 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1647 inChannels = stream_.nDeviceChannels[0];
\r
1650 if ( inInterleaved ) inOffset = 1;
\r
1651 else inOffset = stream_.bufferSize;
\r
1653 channelsLeft = inChannels;
\r
1654 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1656 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1657 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1660 // Account for possible channel offset in first stream
\r
1661 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1662 streamChannels -= stream_.channelOffset[0];
\r
1663 outJump = stream_.channelOffset[0];
\r
1667 // Account for possible unfilled channels at end of the last stream
\r
1668 if ( streamChannels > channelsLeft ) {
\r
1669 outJump = streamChannels - channelsLeft;
\r
1670 streamChannels = channelsLeft;
\r
1673 // Determine input buffer offsets and skips
\r
1674 if ( inInterleaved ) {
\r
1675 inJump = inChannels;
\r
1676 in += inChannels - channelsLeft;
\r
1680 in += (inChannels - channelsLeft) * inOffset;
\r
1683 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1684 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1685 *out++ = in[j*inOffset];
\r
1690 channelsLeft -= streamChannels;
\r
1696 // Don't bother draining input
\r
1697 if ( handle->drainCounter ) {
\r
1698 handle->drainCounter++;
\r
1702 AudioDeviceID inputDevice;
\r
1703 inputDevice = handle->id[1];
\r
1704 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1706 if ( handle->nStreams[1] == 1 ) {
\r
1707 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1708 convertBuffer( stream_.userBuffer[1],
\r
1709 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1710 stream_.convertInfo[1] );
\r
1712 else { // copy to user buffer
\r
1713 memcpy( stream_.userBuffer[1],
\r
1714 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1715 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1718 else { // read from multiple streams
\r
1719 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1720 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1722 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1723 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1724 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1725 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1726 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1729 else { // read from multiple multi-channel streams
\r
1730 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1731 Float32 *out, *in;
\r
1733 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1734 UInt32 outChannels = stream_.nUserChannels[1];
\r
1735 if ( stream_.doConvertBuffer[1] ) {
\r
1736 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1737 outChannels = stream_.nDeviceChannels[1];
\r
1740 if ( outInterleaved ) outOffset = 1;
\r
1741 else outOffset = stream_.bufferSize;
\r
1743 channelsLeft = outChannels;
\r
1744 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1746 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1747 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1750 // Account for possible channel offset in first stream
\r
1751 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1752 streamChannels -= stream_.channelOffset[1];
\r
1753 inJump = stream_.channelOffset[1];
\r
1757 // Account for possible unread channels at end of the last stream
\r
1758 if ( streamChannels > channelsLeft ) {
\r
1759 inJump = streamChannels - channelsLeft;
\r
1760 streamChannels = channelsLeft;
\r
1763 // Determine output buffer offsets and skips
\r
1764 if ( outInterleaved ) {
\r
1765 outJump = outChannels;
\r
1766 out += outChannels - channelsLeft;
\r
1770 out += (outChannels - channelsLeft) * outOffset;
\r
1773 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1774 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1775 out[j*outOffset] = *in++;
\r
1780 channelsLeft -= streamChannels;
\r
1784 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1785 convertBuffer( stream_.userBuffer[1],
\r
1786 stream_.deviceBuffer,
\r
1787 stream_.convertInfo[1] );
\r
1793 //MUTEX_UNLOCK( &stream_.mutex );
\r
1795 RtApi::tickStreamTime();
\r
1799 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1803 case kAudioHardwareNotRunningError:
\r
1804 return "kAudioHardwareNotRunningError";
\r
1806 case kAudioHardwareUnspecifiedError:
\r
1807 return "kAudioHardwareUnspecifiedError";
\r
1809 case kAudioHardwareUnknownPropertyError:
\r
1810 return "kAudioHardwareUnknownPropertyError";
\r
1812 case kAudioHardwareBadPropertySizeError:
\r
1813 return "kAudioHardwareBadPropertySizeError";
\r
1815 case kAudioHardwareIllegalOperationError:
\r
1816 return "kAudioHardwareIllegalOperationError";
\r
1818 case kAudioHardwareBadObjectError:
\r
1819 return "kAudioHardwareBadObjectError";
\r
1821 case kAudioHardwareBadDeviceError:
\r
1822 return "kAudioHardwareBadDeviceError";
\r
1824 case kAudioHardwareBadStreamError:
\r
1825 return "kAudioHardwareBadStreamError";
\r
1827 case kAudioHardwareUnsupportedOperationError:
\r
1828 return "kAudioHardwareUnsupportedOperationError";
\r
1830 case kAudioDeviceUnsupportedFormatError:
\r
1831 return "kAudioDeviceUnsupportedFormatError";
\r
1833 case kAudioDevicePermissionsError:
\r
1834 return "kAudioDevicePermissionsError";
\r
1837 return "CoreAudio unknown error";
\r
1841 //******************** End of __MACOSX_CORE__ *********************//
\r
1844 #if defined(__UNIX_JACK__)
\r
1846 // JACK is a low-latency audio server, originally written for the
\r
1847 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1848 // connect a number of different applications to an audio device, as
\r
1849 // well as allowing them to share audio between themselves.
\r
1851 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1852 // have ports connected to the server. The JACK server is typically
\r
1853 // started in a terminal as follows:
\r
1855 // .jackd -d alsa -d hw:0
\r
1857 // or through an interface program such as qjackctl. Many of the
\r
1858 // parameters normally set for a stream are fixed by the JACK server
\r
1859 // and can be specified when the JACK server is started. In
\r
1862 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1864 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1865 // frames, and number of buffers = 4. Once the server is running, it
\r
1866 // is not possible to override these values. If the values are not
\r
1867 // specified in the command-line, the JACK server uses default values.
\r
1869 // The JACK server does not have to be running when an instance of
\r
1870 // RtApiJack is created, though the function getDeviceCount() will
\r
1871 // report 0 devices found until JACK has been started. When no
\r
1872 // devices are available (i.e., the JACK server is not running), a
\r
1873 // stream cannot be opened.
\r
1875 #include <jack/jack.h>
\r
1876 #include <unistd.h>
\r
1879 // A structure to hold various information related to the Jack API
\r
1880 // implementation.
\r
1881 struct JackHandle {
\r
1882 jack_client_t *client;
\r
1883 jack_port_t **ports[2];
\r
1884 std::string deviceName[2];
\r
1886 pthread_cond_t condition;
\r
1887 int drainCounter; // Tracks callback counts when draining
\r
1888 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1891 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1894 static void jackSilentError( const char * ) {};
\r
1896 RtApiJack :: RtApiJack()
\r
1898 // Nothing to do here.
\r
1899 #if !defined(__RTAUDIO_DEBUG__)
\r
1900 // Turn off Jack's internal error reporting.
\r
1901 jack_set_error_function( &jackSilentError );
\r
1905 RtApiJack :: ~RtApiJack()
\r
1907 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1910 unsigned int RtApiJack :: getDeviceCount( void )
\r
1912 // See if we can become a jack client.
\r
1913 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1914 jack_status_t *status = NULL;
\r
1915 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1916 if ( client == 0 ) return 0;
\r
1918 const char **ports;
\r
1919 std::string port, previousPort;
\r
1920 unsigned int nChannels = 0, nDevices = 0;
\r
1921 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1923 // Parse the port names up to the first colon (:).
\r
1924 size_t iColon = 0;
\r
1926 port = (char *) ports[ nChannels ];
\r
1927 iColon = port.find(":");
\r
1928 if ( iColon != std::string::npos ) {
\r
1929 port = port.substr( 0, iColon + 1 );
\r
1930 if ( port != previousPort ) {
\r
1932 previousPort = port;
\r
1935 } while ( ports[++nChannels] );
\r
1939 jack_client_close( client );
\r
1943 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1945 RtAudio::DeviceInfo info;
\r
1946 info.probed = false;
\r
1948 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1949 jack_status_t *status = NULL;
\r
1950 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1951 if ( client == 0 ) {
\r
1952 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1953 error( RtAudioError::WARNING );
\r
1957 const char **ports;
\r
1958 std::string port, previousPort;
\r
1959 unsigned int nPorts = 0, nDevices = 0;
\r
1960 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1962 // Parse the port names up to the first colon (:).
\r
1963 size_t iColon = 0;
\r
1965 port = (char *) ports[ nPorts ];
\r
1966 iColon = port.find(":");
\r
1967 if ( iColon != std::string::npos ) {
\r
1968 port = port.substr( 0, iColon );
\r
1969 if ( port != previousPort ) {
\r
1970 if ( nDevices == device ) info.name = port;
\r
1972 previousPort = port;
\r
1975 } while ( ports[++nPorts] );
\r
1979 if ( device >= nDevices ) {
\r
1980 jack_client_close( client );
\r
1981 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1982 error( RtAudioError::INVALID_USE );
\r
1986 // Get the current jack server sample rate.
\r
1987 info.sampleRates.clear();
\r
1988 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1990 // Count the available ports containing the client name as device
\r
1991 // channels. Jack "input ports" equal RtAudio output channels.
\r
1992 unsigned int nChannels = 0;
\r
1993 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1995 while ( ports[ nChannels ] ) nChannels++;
\r
1997 info.outputChannels = nChannels;
\r
2000 // Jack "output ports" equal RtAudio input channels.
\r
2002 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2004 while ( ports[ nChannels ] ) nChannels++;
\r
2006 info.inputChannels = nChannels;
\r
2009 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2010 jack_client_close(client);
\r
2011 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2012 error( RtAudioError::WARNING );
\r
2016 // If device opens for both playback and capture, we determine the channels.
\r
2017 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2018 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2020 // Jack always uses 32-bit floats.
\r
2021 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2023 // Jack doesn't provide default devices so we'll use the first available one.
\r
2024 if ( device == 0 && info.outputChannels > 0 )
\r
2025 info.isDefaultOutput = true;
\r
2026 if ( device == 0 && info.inputChannels > 0 )
\r
2027 info.isDefaultInput = true;
\r
2029 jack_client_close(client);
\r
2030 info.probed = true;
\r
2034 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2036 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2038 RtApiJack *object = (RtApiJack *) info->object;
\r
2039 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2044 // This function will be called by a spawned thread when the Jack
\r
2045 // server signals that it is shutting down. It is necessary to handle
\r
2046 // it this way because the jackShutdown() function must return before
\r
2047 // the jack_deactivate() function (in closeStream()) will return.
\r
2048 static void *jackCloseStream( void *ptr )
\r
2050 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2051 RtApiJack *object = (RtApiJack *) info->object;
\r
2053 object->closeStream();
\r
2055 pthread_exit( NULL );
\r
2057 static void jackShutdown( void *infoPointer )
\r
2059 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2060 RtApiJack *object = (RtApiJack *) info->object;
\r
2062 // Check current stream state. If stopped, then we'll assume this
\r
2063 // was called as a result of a call to RtApiJack::stopStream (the
\r
2064 // deactivation of a client handle causes this function to be called).
\r
2065 // If not, we'll assume the Jack server is shutting down or some
\r
2066 // other problem occurred and we should close the stream.
\r
2067 if ( object->isStreamRunning() == false ) return;
\r
2069 ThreadHandle threadId;
\r
2070 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2071 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2074 static int jackXrun( void *infoPointer )
\r
2076 JackHandle *handle = (JackHandle *) infoPointer;
\r
2078 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2079 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2084 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2085 unsigned int firstChannel, unsigned int sampleRate,
\r
2086 RtAudioFormat format, unsigned int *bufferSize,
\r
2087 RtAudio::StreamOptions *options )
\r
2089 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2091 // Look for jack server and try to become a client (only do once per stream).
\r
2092 jack_client_t *client = 0;
\r
2093 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2094 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2095 jack_status_t *status = NULL;
\r
2096 if ( options && !options->streamName.empty() )
\r
2097 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2099 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2100 if ( client == 0 ) {
\r
2101 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2102 error( RtAudioError::WARNING );
\r
2107 // The handle must have been created on an earlier pass.
\r
2108 client = handle->client;
\r
2111 const char **ports;
\r
2112 std::string port, previousPort, deviceName;
\r
2113 unsigned int nPorts = 0, nDevices = 0;
\r
2114 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2116 // Parse the port names up to the first colon (:).
\r
2117 size_t iColon = 0;
\r
2119 port = (char *) ports[ nPorts ];
\r
2120 iColon = port.find(":");
\r
2121 if ( iColon != std::string::npos ) {
\r
2122 port = port.substr( 0, iColon );
\r
2123 if ( port != previousPort ) {
\r
2124 if ( nDevices == device ) deviceName = port;
\r
2126 previousPort = port;
\r
2129 } while ( ports[++nPorts] );
\r
2133 if ( device >= nDevices ) {
\r
2134 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2138 // Count the available ports containing the client name as device
\r
2139 // channels. Jack "input ports" equal RtAudio output channels.
\r
2140 unsigned int nChannels = 0;
\r
2141 unsigned long flag = JackPortIsInput;
\r
2142 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2143 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2145 while ( ports[ nChannels ] ) nChannels++;
\r
2149 // Compare the jack ports for specified client to the requested number of channels.
\r
2150 if ( nChannels < (channels + firstChannel) ) {
\r
2151 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2152 errorText_ = errorStream_.str();
\r
2156 // Check the jack server sample rate.
\r
2157 unsigned int jackRate = jack_get_sample_rate( client );
\r
2158 if ( sampleRate != jackRate ) {
\r
2159 jack_client_close( client );
\r
2160 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2161 errorText_ = errorStream_.str();
\r
2164 stream_.sampleRate = jackRate;
\r
2166 // Get the latency of the JACK port.
\r
2167 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2168 if ( ports[ firstChannel ] ) {
\r
2169 // Added by Ge Wang
\r
2170 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2171 // the range (usually the min and max are equal)
\r
2172 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2173 // get the latency range
\r
2174 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2175 // be optimistic, use the min!
\r
2176 stream_.latency[mode] = latrange.min;
\r
2177 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2181 // The jack server always uses 32-bit floating-point data.
\r
2182 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2183 stream_.userFormat = format;
\r
2185 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2186 else stream_.userInterleaved = true;
\r
2188 // Jack always uses non-interleaved buffers.
\r
2189 stream_.deviceInterleaved[mode] = false;
\r
2191 // Jack always provides host byte-ordered data.
\r
2192 stream_.doByteSwap[mode] = false;
\r
2194 // Get the buffer size. The buffer size and number of buffers
\r
2195 // (periods) is set when the jack server is started.
\r
2196 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2197 *bufferSize = stream_.bufferSize;
\r
2199 stream_.nDeviceChannels[mode] = channels;
\r
2200 stream_.nUserChannels[mode] = channels;
\r
2202 // Set flags for buffer conversion.
\r
2203 stream_.doConvertBuffer[mode] = false;
\r
2204 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2205 stream_.doConvertBuffer[mode] = true;
\r
2206 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2207 stream_.nUserChannels[mode] > 1 )
\r
2208 stream_.doConvertBuffer[mode] = true;
\r
2210 // Allocate our JackHandle structure for the stream.
\r
2211 if ( handle == 0 ) {
\r
2213 handle = new JackHandle;
\r
2215 catch ( std::bad_alloc& ) {
\r
2216 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2220 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2221 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2224 stream_.apiHandle = (void *) handle;
\r
2225 handle->client = client;
\r
2227 handle->deviceName[mode] = deviceName;
\r
2229 // Allocate necessary internal buffers.
\r
2230 unsigned long bufferBytes;
\r
2231 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2232 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2233 if ( stream_.userBuffer[mode] == NULL ) {
\r
2234 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2238 if ( stream_.doConvertBuffer[mode] ) {
\r
2240 bool makeBuffer = true;
\r
2241 if ( mode == OUTPUT )
\r
2242 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2243 else { // mode == INPUT
\r
2244 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2245 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2246 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2247 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2251 if ( makeBuffer ) {
\r
2252 bufferBytes *= *bufferSize;
\r
2253 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2254 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2255 if ( stream_.deviceBuffer == NULL ) {
\r
2256 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2262 // Allocate memory for the Jack ports (channels) identifiers.
\r
2263 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2264 if ( handle->ports[mode] == NULL ) {
\r
2265 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2269 stream_.device[mode] = device;
\r
2270 stream_.channelOffset[mode] = firstChannel;
\r
2271 stream_.state = STREAM_STOPPED;
\r
2272 stream_.callbackInfo.object = (void *) this;
\r
2274 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2275 // We had already set up the stream for output.
\r
2276 stream_.mode = DUPLEX;
\r
2278 stream_.mode = mode;
\r
2279 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2280 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2281 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2284 // Register our ports.
\r
2286 if ( mode == OUTPUT ) {
\r
2287 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2288 snprintf( label, 64, "outport %d", i );
\r
2289 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2290 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2294 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2295 snprintf( label, 64, "inport %d", i );
\r
2296 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2297 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2301 // Setup the buffer conversion information structure. We don't use
\r
2302 // buffers to do channel offsets, so we override that parameter
\r
2304 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2310 pthread_cond_destroy( &handle->condition );
\r
2311 jack_client_close( handle->client );
\r
2313 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2314 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2317 stream_.apiHandle = 0;
\r
2320 for ( int i=0; i<2; i++ ) {
\r
2321 if ( stream_.userBuffer[i] ) {
\r
2322 free( stream_.userBuffer[i] );
\r
2323 stream_.userBuffer[i] = 0;
\r
2327 if ( stream_.deviceBuffer ) {
\r
2328 free( stream_.deviceBuffer );
\r
2329 stream_.deviceBuffer = 0;
\r
2335 void RtApiJack :: closeStream( void )
\r
2337 if ( stream_.state == STREAM_CLOSED ) {
\r
2338 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2339 error( RtAudioError::WARNING );
\r
2343 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2346 if ( stream_.state == STREAM_RUNNING )
\r
2347 jack_deactivate( handle->client );
\r
2349 jack_client_close( handle->client );
\r
2353 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2354 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2355 pthread_cond_destroy( &handle->condition );
\r
2357 stream_.apiHandle = 0;
\r
2360 for ( int i=0; i<2; i++ ) {
\r
2361 if ( stream_.userBuffer[i] ) {
\r
2362 free( stream_.userBuffer[i] );
\r
2363 stream_.userBuffer[i] = 0;
\r
2367 if ( stream_.deviceBuffer ) {
\r
2368 free( stream_.deviceBuffer );
\r
2369 stream_.deviceBuffer = 0;
\r
2372 stream_.mode = UNINITIALIZED;
\r
2373 stream_.state = STREAM_CLOSED;
\r
2376 void RtApiJack :: startStream( void )
\r
2379 if ( stream_.state == STREAM_RUNNING ) {
\r
2380 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2381 error( RtAudioError::WARNING );
\r
2385 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2386 int result = jack_activate( handle->client );
\r
2388 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2392 const char **ports;
\r
2394 // Get the list of available ports.
\r
2395 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2397 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2398 if ( ports == NULL) {
\r
2399 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2403 // Now make the port connections. Since RtAudio wasn't designed to
\r
2404 // allow the user to select particular channels of a device, we'll
\r
2405 // just open the first "nChannels" ports with offset.
\r
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2408 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2409 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2412 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2419 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2421 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2422 if ( ports == NULL) {
\r
2423 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2427 // Now make the port connections. See note above.
\r
2428 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2430 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2431 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2434 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2441 handle->drainCounter = 0;
\r
2442 handle->internalDrain = false;
\r
2443 stream_.state = STREAM_RUNNING;
\r
2446 if ( result == 0 ) return;
\r
2447 error( RtAudioError::SYSTEM_ERROR );
\r
2450 void RtApiJack :: stopStream( void )
\r
2453 if ( stream_.state == STREAM_STOPPED ) {
\r
2454 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2455 error( RtAudioError::WARNING );
\r
2459 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2460 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2462 if ( handle->drainCounter == 0 ) {
\r
2463 handle->drainCounter = 2;
\r
2464 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2468 jack_deactivate( handle->client );
\r
2469 stream_.state = STREAM_STOPPED;
\r
2472 void RtApiJack :: abortStream( void )
\r
2475 if ( stream_.state == STREAM_STOPPED ) {
\r
2476 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2477 error( RtAudioError::WARNING );
\r
2481 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2482 handle->drainCounter = 2;
\r
2487 // This function will be called by a spawned thread when the user
\r
2488 // callback function signals that the stream should be stopped or
\r
2489 // aborted. It is necessary to handle it this way because the
\r
2490 // callbackEvent() function must return before the jack_deactivate()
\r
2491 // function will return.
\r
2492 static void *jackStopStream( void *ptr )
\r
2494 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2495 RtApiJack *object = (RtApiJack *) info->object;
\r
2497 object->stopStream();
\r
2498 pthread_exit( NULL );
\r
2501 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2503 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2504 if ( stream_.state == STREAM_CLOSED ) {
\r
2505 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2506 error( RtAudioError::WARNING );
\r
2509 if ( stream_.bufferSize != nframes ) {
\r
2510 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2511 error( RtAudioError::WARNING );
\r
2515 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2516 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2518 // Check if we were draining the stream and signal is finished.
\r
2519 if ( handle->drainCounter > 3 ) {
\r
2520 ThreadHandle threadId;
\r
2522 stream_.state = STREAM_STOPPING;
\r
2523 if ( handle->internalDrain == true )
\r
2524 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2526 pthread_cond_signal( &handle->condition );
\r
2530 // Invoke user callback first, to get fresh output data.
\r
2531 if ( handle->drainCounter == 0 ) {
\r
2532 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2533 double streamTime = getStreamTime();
\r
2534 RtAudioStreamStatus status = 0;
\r
2535 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2536 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2537 handle->xrun[0] = false;
\r
2539 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2540 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2541 handle->xrun[1] = false;
\r
2543 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2544 stream_.bufferSize, streamTime, status, info->userData );
\r
2545 if ( cbReturnValue == 2 ) {
\r
2546 stream_.state = STREAM_STOPPING;
\r
2547 handle->drainCounter = 2;
\r
2549 pthread_create( &id, NULL, jackStopStream, info );
\r
2552 else if ( cbReturnValue == 1 ) {
\r
2553 handle->drainCounter = 1;
\r
2554 handle->internalDrain = true;
\r
2558 jack_default_audio_sample_t *jackbuffer;
\r
2559 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2560 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2562 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2564 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2565 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2566 memset( jackbuffer, 0, bufferBytes );
\r
2570 else if ( stream_.doConvertBuffer[0] ) {
\r
2572 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2574 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2575 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2576 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2579 else { // no buffer conversion
\r
2580 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2581 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2582 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2587 // Don't bother draining input
\r
2588 if ( handle->drainCounter ) {
\r
2589 handle->drainCounter++;
\r
2593 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2595 if ( stream_.doConvertBuffer[1] ) {
\r
2596 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2597 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2598 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2600 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2602 else { // no buffer conversion
\r
2603 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2604 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2605 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2611 RtApi::tickStreamTime();
\r
2614 //******************** End of __UNIX_JACK__ *********************//
\r
2617 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2619 // The ASIO API is designed around a callback scheme, so this
\r
2620 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2621 // Jack. The primary constraint with ASIO is that it only allows
\r
2622 // access to a single driver at a time. Thus, it is not possible to
\r
2623 // have more than one simultaneous RtAudio stream.
\r
2625 // This implementation also requires a number of external ASIO files
\r
2626 // and a few global variables. The ASIO callback scheme does not
\r
2627 // allow for the passing of user data, so we must create a global
\r
2628 // pointer to our callbackInfo structure.
\r
2630 // On unix systems, we make use of a pthread condition variable.
\r
2631 // Since there is no equivalent in Windows, I hacked something based
\r
2632 // on information found in
\r
2633 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2635 #include "asiosys.h"
\r
2637 #include "iasiothiscallresolver.h"
\r
2638 #include "asiodrivers.h"
\r
2641 static AsioDrivers drivers;
\r
2642 static ASIOCallbacks asioCallbacks;
\r
2643 static ASIODriverInfo driverInfo;
\r
2644 static CallbackInfo *asioCallbackInfo;
\r
2645 static bool asioXRun;
\r
2647 struct AsioHandle {
\r
2648 int drainCounter; // Tracks callback counts when draining
\r
2649 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2650 ASIOBufferInfo *bufferInfos;
\r
2654 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2657 // Function declarations (definitions at end of section)
\r
2658 static const char* getAsioErrorString( ASIOError result );
\r
2659 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2660 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2662 RtApiAsio :: RtApiAsio()
\r
2664 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2665 // CoInitialize beforehand, but it must be for appartment threading
\r
2666 // (in which case, CoInitilialize will return S_FALSE here).
\r
2667 coInitialized_ = false;
\r
2668 HRESULT hr = CoInitialize( NULL );
\r
2669 if ( FAILED(hr) ) {
\r
2670 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2671 error( RtAudioError::WARNING );
\r
2673 coInitialized_ = true;
\r
2675 drivers.removeCurrentDriver();
\r
2676 driverInfo.asioVersion = 2;
\r
2678 // See note in DirectSound implementation about GetDesktopWindow().
\r
2679 driverInfo.sysRef = GetForegroundWindow();
\r
2682 RtApiAsio :: ~RtApiAsio()
\r
2684 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2685 if ( coInitialized_ ) CoUninitialize();
\r
2688 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2690 return (unsigned int) drivers.asioGetNumDev();
\r
2693 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2695 RtAudio::DeviceInfo info;
\r
2696 info.probed = false;
\r
2699 unsigned int nDevices = getDeviceCount();
\r
2700 if ( nDevices == 0 ) {
\r
2701 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2702 error( RtAudioError::INVALID_USE );
\r
2706 if ( device >= nDevices ) {
\r
2707 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2708 error( RtAudioError::INVALID_USE );
\r
2712 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2713 if ( stream_.state != STREAM_CLOSED ) {
\r
2714 if ( device >= devices_.size() ) {
\r
2715 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2716 error( RtAudioError::WARNING );
\r
2719 return devices_[ device ];
\r
2722 char driverName[32];
\r
2723 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2724 if ( result != ASE_OK ) {
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtAudioError::WARNING );
\r
2731 info.name = driverName;
\r
2733 if ( !drivers.loadDriver( driverName ) ) {
\r
2734 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2735 errorText_ = errorStream_.str();
\r
2736 error( RtAudioError::WARNING );
\r
2740 result = ASIOInit( &driverInfo );
\r
2741 if ( result != ASE_OK ) {
\r
2742 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2743 errorText_ = errorStream_.str();
\r
2744 error( RtAudioError::WARNING );
\r
2748 // Determine the device channel information.
\r
2749 long inputChannels, outputChannels;
\r
2750 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2751 if ( result != ASE_OK ) {
\r
2752 drivers.removeCurrentDriver();
\r
2753 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2754 errorText_ = errorStream_.str();
\r
2755 error( RtAudioError::WARNING );
\r
2759 info.outputChannels = outputChannels;
\r
2760 info.inputChannels = inputChannels;
\r
2761 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2762 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2764 // Determine the supported sample rates.
\r
2765 info.sampleRates.clear();
\r
2766 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2767 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2768 if ( result == ASE_OK )
\r
2769 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2772 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2773 ASIOChannelInfo channelInfo;
\r
2774 channelInfo.channel = 0;
\r
2775 channelInfo.isInput = true;
\r
2776 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2777 result = ASIOGetChannelInfo( &channelInfo );
\r
2778 if ( result != ASE_OK ) {
\r
2779 drivers.removeCurrentDriver();
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.nativeFormats = 0;
\r
2787 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2788 info.nativeFormats |= RTAUDIO_SINT16;
\r
2789 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2790 info.nativeFormats |= RTAUDIO_SINT32;
\r
2791 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2792 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2793 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2794 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2795 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2796 info.nativeFormats |= RTAUDIO_SINT24;
\r
2798 if ( info.outputChannels > 0 )
\r
2799 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2800 if ( info.inputChannels > 0 )
\r
2801 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2803 info.probed = true;
\r
2804 drivers.removeCurrentDriver();
\r
2808 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2810 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2811 object->callbackEvent( index );
\r
2814 void RtApiAsio :: saveDeviceInfo( void )
\r
2818 unsigned int nDevices = getDeviceCount();
\r
2819 devices_.resize( nDevices );
\r
2820 for ( unsigned int i=0; i<nDevices; i++ )
\r
2821 devices_[i] = getDeviceInfo( i );
\r
2824 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2825 unsigned int firstChannel, unsigned int sampleRate,
\r
2826 RtAudioFormat format, unsigned int *bufferSize,
\r
2827 RtAudio::StreamOptions *options )
\r
2829 // For ASIO, a duplex stream MUST use the same driver.
\r
2830 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2831 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2835 char driverName[32];
\r
2836 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2837 if ( result != ASE_OK ) {
\r
2838 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2839 errorText_ = errorStream_.str();
\r
2843 // Only load the driver once for duplex stream.
\r
2844 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2845 // The getDeviceInfo() function will not work when a stream is open
\r
2846 // because ASIO does not allow multiple devices to run at the same
\r
2847 // time. Thus, we'll probe the system before opening a stream and
\r
2848 // save the results for use by getDeviceInfo().
\r
2849 this->saveDeviceInfo();
\r
2851 if ( !drivers.loadDriver( driverName ) ) {
\r
2852 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2853 errorText_ = errorStream_.str();
\r
2857 result = ASIOInit( &driverInfo );
\r
2858 if ( result != ASE_OK ) {
\r
2859 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2860 errorText_ = errorStream_.str();
\r
2865 // Check the device channel count.
\r
2866 long inputChannels, outputChannels;
\r
2867 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2868 if ( result != ASE_OK ) {
\r
2869 drivers.removeCurrentDriver();
\r
2870 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2871 errorText_ = errorStream_.str();
\r
2875 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2876 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2877 drivers.removeCurrentDriver();
\r
2878 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2879 errorText_ = errorStream_.str();
\r
2882 stream_.nDeviceChannels[mode] = channels;
\r
2883 stream_.nUserChannels[mode] = channels;
\r
2884 stream_.channelOffset[mode] = firstChannel;
\r
2886 // Verify the sample rate is supported.
\r
2887 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2888 if ( result != ASE_OK ) {
\r
2889 drivers.removeCurrentDriver();
\r
2890 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2891 errorText_ = errorStream_.str();
\r
2895 // Get the current sample rate
\r
2896 ASIOSampleRate currentRate;
\r
2897 result = ASIOGetSampleRate( ¤tRate );
\r
2898 if ( result != ASE_OK ) {
\r
2899 drivers.removeCurrentDriver();
\r
2900 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2901 errorText_ = errorStream_.str();
\r
2905 // Set the sample rate only if necessary
\r
2906 if ( currentRate != sampleRate ) {
\r
2907 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2908 if ( result != ASE_OK ) {
\r
2909 drivers.removeCurrentDriver();
\r
2910 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2911 errorText_ = errorStream_.str();
\r
2916 // Determine the driver data type.
\r
2917 ASIOChannelInfo channelInfo;
\r
2918 channelInfo.channel = 0;
\r
2919 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2920 else channelInfo.isInput = true;
\r
2921 result = ASIOGetChannelInfo( &channelInfo );
\r
2922 if ( result != ASE_OK ) {
\r
2923 drivers.removeCurrentDriver();
\r
2924 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2925 errorText_ = errorStream_.str();
\r
2929 // Assuming WINDOWS host is always little-endian.
\r
2930 stream_.doByteSwap[mode] = false;
\r
2931 stream_.userFormat = format;
\r
2932 stream_.deviceFormat[mode] = 0;
\r
2933 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2934 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2935 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2937 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2938 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2939 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2941 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2942 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2943 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2945 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2946 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2947 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2949 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2950 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2951 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2954 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2955 drivers.removeCurrentDriver();
\r
2956 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2957 errorText_ = errorStream_.str();
\r
2961 // Set the buffer size. For a duplex stream, this will end up
\r
2962 // setting the buffer size based on the input constraints, which
\r
2964 long minSize, maxSize, preferSize, granularity;
\r
2965 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2966 if ( result != ASE_OK ) {
\r
2967 drivers.removeCurrentDriver();
\r
2968 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2969 errorText_ = errorStream_.str();
\r
2973 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2974 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2975 else if ( granularity == -1 ) {
\r
2976 // Make sure bufferSize is a power of two.
\r
2977 int log2_of_min_size = 0;
\r
2978 int log2_of_max_size = 0;
\r
2980 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2981 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2982 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2985 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2986 int min_delta_num = log2_of_min_size;
\r
2988 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2989 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2990 if (current_delta < min_delta) {
\r
2991 min_delta = current_delta;
\r
2992 min_delta_num = i;
\r
2996 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2997 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2998 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3000 else if ( granularity != 0 ) {
\r
3001 // Set to an even multiple of granularity, rounding up.
\r
3002 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3005 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
3006 drivers.removeCurrentDriver();
\r
3007 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3011 stream_.bufferSize = *bufferSize;
\r
3012 stream_.nBuffers = 2;
\r
3014 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3015 else stream_.userInterleaved = true;
\r
3017 // ASIO always uses non-interleaved buffers.
\r
3018 stream_.deviceInterleaved[mode] = false;
\r
3020 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3021 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3022 if ( handle == 0 ) {
\r
3024 handle = new AsioHandle;
\r
3026 catch ( std::bad_alloc& ) {
\r
3027 //if ( handle == NULL ) {
\r
3028 drivers.removeCurrentDriver();
\r
3029 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3032 handle->bufferInfos = 0;
\r
3034 // Create a manual-reset event.
\r
3035 handle->condition = CreateEvent( NULL, // no security
\r
3036 TRUE, // manual-reset
\r
3037 FALSE, // non-signaled initially
\r
3038 NULL ); // unnamed
\r
3039 stream_.apiHandle = (void *) handle;
\r
3042 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3043 // and output separately, we'll have to dispose of previously
\r
3044 // created output buffers for a duplex stream.
\r
3045 long inputLatency, outputLatency;
\r
3046 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3047 ASIODisposeBuffers();
\r
3048 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3051 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3052 bool buffersAllocated = false;
\r
3053 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3054 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3055 if ( handle->bufferInfos == NULL ) {
\r
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3057 errorText_ = errorStream_.str();
\r
3061 ASIOBufferInfo *infos;
\r
3062 infos = handle->bufferInfos;
\r
3063 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3064 infos->isInput = ASIOFalse;
\r
3065 infos->channelNum = i + stream_.channelOffset[0];
\r
3066 infos->buffers[0] = infos->buffers[1] = 0;
\r
3068 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3069 infos->isInput = ASIOTrue;
\r
3070 infos->channelNum = i + stream_.channelOffset[1];
\r
3071 infos->buffers[0] = infos->buffers[1] = 0;
\r
3074 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3075 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3076 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3077 asioCallbacks.asioMessage = &asioMessages;
\r
3078 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3079 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3080 if ( result != ASE_OK ) {
\r
3081 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3082 errorText_ = errorStream_.str();
\r
3085 buffersAllocated = true;
\r
3087 // Set flags for buffer conversion.
\r
3088 stream_.doConvertBuffer[mode] = false;
\r
3089 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3090 stream_.doConvertBuffer[mode] = true;
\r
3091 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3092 stream_.nUserChannels[mode] > 1 )
\r
3093 stream_.doConvertBuffer[mode] = true;
\r
3095 // Allocate necessary internal buffers
\r
3096 unsigned long bufferBytes;
\r
3097 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3098 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3099 if ( stream_.userBuffer[mode] == NULL ) {
\r
3100 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3104 if ( stream_.doConvertBuffer[mode] ) {
\r
3106 bool makeBuffer = true;
\r
3107 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3108 if ( mode == INPUT ) {
\r
3109 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3110 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3111 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3115 if ( makeBuffer ) {
\r
3116 bufferBytes *= *bufferSize;
\r
3117 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3118 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3119 if ( stream_.deviceBuffer == NULL ) {
\r
3120 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3126 stream_.sampleRate = sampleRate;
\r
3127 stream_.device[mode] = device;
\r
3128 stream_.state = STREAM_STOPPED;
\r
3129 asioCallbackInfo = &stream_.callbackInfo;
\r
3130 stream_.callbackInfo.object = (void *) this;
\r
3131 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3132 // We had already set up an output stream.
\r
3133 stream_.mode = DUPLEX;
\r
3135 stream_.mode = mode;
\r
3137 // Determine device latencies
\r
3138 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3139 if ( result != ASE_OK ) {
\r
3140 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3141 errorText_ = errorStream_.str();
\r
3142 error( RtAudioError::WARNING); // warn but don't fail
\r
3145 stream_.latency[0] = outputLatency;
\r
3146 stream_.latency[1] = inputLatency;
\r
3149 // Setup the buffer conversion information structure. We don't use
\r
3150 // buffers to do channel offsets, so we override that parameter
\r
3152 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3157 if ( buffersAllocated )
\r
3158 ASIODisposeBuffers();
\r
3159 drivers.removeCurrentDriver();
\r
3162 CloseHandle( handle->condition );
\r
3163 if ( handle->bufferInfos )
\r
3164 free( handle->bufferInfos );
\r
3166 stream_.apiHandle = 0;
\r
3169 for ( int i=0; i<2; i++ ) {
\r
3170 if ( stream_.userBuffer[i] ) {
\r
3171 free( stream_.userBuffer[i] );
\r
3172 stream_.userBuffer[i] = 0;
\r
3176 if ( stream_.deviceBuffer ) {
\r
3177 free( stream_.deviceBuffer );
\r
3178 stream_.deviceBuffer = 0;
\r
3184 void RtApiAsio :: closeStream()
\r
3186 if ( stream_.state == STREAM_CLOSED ) {
\r
3187 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3188 error( RtAudioError::WARNING );
\r
3192 if ( stream_.state == STREAM_RUNNING ) {
\r
3193 stream_.state = STREAM_STOPPED;
\r
3196 ASIODisposeBuffers();
\r
3197 drivers.removeCurrentDriver();
\r
3199 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3201 CloseHandle( handle->condition );
\r
3202 if ( handle->bufferInfos )
\r
3203 free( handle->bufferInfos );
\r
3205 stream_.apiHandle = 0;
\r
3208 for ( int i=0; i<2; i++ ) {
\r
3209 if ( stream_.userBuffer[i] ) {
\r
3210 free( stream_.userBuffer[i] );
\r
3211 stream_.userBuffer[i] = 0;
\r
3215 if ( stream_.deviceBuffer ) {
\r
3216 free( stream_.deviceBuffer );
\r
3217 stream_.deviceBuffer = 0;
\r
3220 stream_.mode = UNINITIALIZED;
\r
3221 stream_.state = STREAM_CLOSED;
\r
3224 bool stopThreadCalled = false;
\r
3226 void RtApiAsio :: startStream()
\r
3229 if ( stream_.state == STREAM_RUNNING ) {
\r
3230 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3231 error( RtAudioError::WARNING );
\r
3235 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3236 ASIOError result = ASIOStart();
\r
3237 if ( result != ASE_OK ) {
\r
3238 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3239 errorText_ = errorStream_.str();
\r
3243 handle->drainCounter = 0;
\r
3244 handle->internalDrain = false;
\r
3245 ResetEvent( handle->condition );
\r
3246 stream_.state = STREAM_RUNNING;
\r
3250 stopThreadCalled = false;
\r
3252 if ( result == ASE_OK ) return;
\r
3253 error( RtAudioError::SYSTEM_ERROR );
\r
3256 void RtApiAsio :: stopStream()
\r
3259 if ( stream_.state == STREAM_STOPPED ) {
\r
3260 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3261 error( RtAudioError::WARNING );
\r
3265 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3266 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3267 if ( handle->drainCounter == 0 ) {
\r
3268 handle->drainCounter = 2;
\r
3269 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3273 stream_.state = STREAM_STOPPED;
\r
3275 ASIOError result = ASIOStop();
\r
3276 if ( result != ASE_OK ) {
\r
3277 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3278 errorText_ = errorStream_.str();
\r
3281 if ( result == ASE_OK ) return;
\r
3282 error( RtAudioError::SYSTEM_ERROR );
\r
3285 void RtApiAsio :: abortStream()
\r
3288 if ( stream_.state == STREAM_STOPPED ) {
\r
3289 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3290 error( RtAudioError::WARNING );
\r
3294 // The following lines were commented-out because some behavior was
\r
3295 // noted where the device buffers need to be zeroed to avoid
\r
3296 // continuing sound, even when the device buffers are completely
\r
3297 // disposed. So now, calling abort is the same as calling stop.
\r
3298 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3299 // handle->drainCounter = 2;
\r
3303 // This function will be called by a spawned thread when the user
\r
3304 // callback function signals that the stream should be stopped or
\r
3305 // aborted. It is necessary to handle it this way because the
\r
3306 // callbackEvent() function must return before the ASIOStop()
\r
3307 // function will return.
\r
3308 static unsigned __stdcall asioStopStream( void *ptr )
\r
3310 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3311 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3313 object->stopStream();
\r
3314 _endthreadex( 0 );
\r
3318 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3320 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3321 if ( stream_.state == STREAM_CLOSED ) {
\r
3322 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3323 error( RtAudioError::WARNING );
\r
3327 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3328 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3330 // Check if we were draining the stream and signal if finished.
\r
3331 if ( handle->drainCounter > 3 ) {
\r
3333 stream_.state = STREAM_STOPPING;
\r
3334 if ( handle->internalDrain == false )
\r
3335 SetEvent( handle->condition );
\r
3336 else { // spawn a thread to stop the stream
\r
3337 unsigned threadId;
\r
3338 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3339 &stream_.callbackInfo, 0, &threadId );
\r
3344 // Invoke user callback to get fresh output data UNLESS we are
\r
3345 // draining stream.
\r
3346 if ( handle->drainCounter == 0 ) {
\r
3347 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3348 double streamTime = getStreamTime();
\r
3349 RtAudioStreamStatus status = 0;
\r
3350 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3351 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3354 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3355 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3358 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3359 stream_.bufferSize, streamTime, status, info->userData );
\r
3360 if ( cbReturnValue == 2 ) {
\r
3361 stream_.state = STREAM_STOPPING;
\r
3362 handle->drainCounter = 2;
\r
3363 unsigned threadId;
\r
3364 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3365 &stream_.callbackInfo, 0, &threadId );
\r
3368 else if ( cbReturnValue == 1 ) {
\r
3369 handle->drainCounter = 1;
\r
3370 handle->internalDrain = true;
\r
3374 unsigned int nChannels, bufferBytes, i, j;
\r
3375 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3378 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3380 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3382 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3383 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3384 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3388 else if ( stream_.doConvertBuffer[0] ) {
\r
3390 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3391 if ( stream_.doByteSwap[0] )
\r
3392 byteSwapBuffer( stream_.deviceBuffer,
\r
3393 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3394 stream_.deviceFormat[0] );
\r
3396 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3397 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3398 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3399 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3405 if ( stream_.doByteSwap[0] )
\r
3406 byteSwapBuffer( stream_.userBuffer[0],
\r
3407 stream_.bufferSize * stream_.nUserChannels[0],
\r
3408 stream_.userFormat );
\r
3410 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3411 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3412 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3413 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3419 // Don't bother draining input
\r
3420 if ( handle->drainCounter ) {
\r
3421 handle->drainCounter++;
\r
3425 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3427 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3429 if (stream_.doConvertBuffer[1]) {
\r
3431 // Always interleave ASIO input data.
\r
3432 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3433 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3434 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3435 handle->bufferInfos[i].buffers[bufferIndex],
\r
3439 if ( stream_.doByteSwap[1] )
\r
3440 byteSwapBuffer( stream_.deviceBuffer,
\r
3441 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3442 stream_.deviceFormat[1] );
\r
3443 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3447 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3448 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3449 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3450 handle->bufferInfos[i].buffers[bufferIndex],
\r
3455 if ( stream_.doByteSwap[1] )
\r
3456 byteSwapBuffer( stream_.userBuffer[1],
\r
3457 stream_.bufferSize * stream_.nUserChannels[1],
\r
3458 stream_.userFormat );
\r
3463 // The following call was suggested by Malte Clasen. While the API
\r
3464 // documentation indicates it should not be required, some device
\r
3465 // drivers apparently do not function correctly without it.
\r
3466 ASIOOutputReady();
\r
3468 RtApi::tickStreamTime();
\r
3472 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3474 // The ASIO documentation says that this usually only happens during
\r
3475 // external sync. Audio processing is not stopped by the driver,
\r
3476 // actual sample rate might not have even changed, maybe only the
\r
3477 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3480 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3482 object->stopStream();
\r
3484 catch ( RtAudioError &exception ) {
\r
3485 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3489 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3492 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3496 switch( selector ) {
\r
3497 case kAsioSelectorSupported:
\r
3498 if ( value == kAsioResetRequest
\r
3499 || value == kAsioEngineVersion
\r
3500 || value == kAsioResyncRequest
\r
3501 || value == kAsioLatenciesChanged
\r
3502 // The following three were added for ASIO 2.0, you don't
\r
3503 // necessarily have to support them.
\r
3504 || value == kAsioSupportsTimeInfo
\r
3505 || value == kAsioSupportsTimeCode
\r
3506 || value == kAsioSupportsInputMonitor)
\r
3509 case kAsioResetRequest:
\r
3510 // Defer the task and perform the reset of the driver during the
\r
3511 // next "safe" situation. You cannot reset the driver right now,
\r
3512 // as this code is called from the driver. Reset the driver is
\r
3513 // done by completely destruct is. I.e. ASIOStop(),
\r
3514 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3516 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3519 case kAsioResyncRequest:
\r
3520 // This informs the application that the driver encountered some
\r
3521 // non-fatal data loss. It is used for synchronization purposes
\r
3522 // of different media. Added mainly to work around the Win16Mutex
\r
3523 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3524 // which could lose data because the Mutex was held too long by
\r
3525 // another thread. However a driver can issue it in other
\r
3526 // situations, too.
\r
3527 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3531 case kAsioLatenciesChanged:
\r
3532 // This will inform the host application that the drivers were
\r
3533 // latencies changed. Beware, it this does not mean that the
\r
3534 // buffer sizes have changed! You might need to update internal
\r
3536 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3539 case kAsioEngineVersion:
\r
3540 // Return the supported ASIO version of the host application. If
\r
3541 // a host application does not implement this selector, ASIO 1.0
\r
3542 // is assumed by the driver.
\r
3545 case kAsioSupportsTimeInfo:
\r
3546 // Informs the driver whether the
\r
3547 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3548 // For compatibility with ASIO 1.0 drivers the host application
\r
3549 // should always support the "old" bufferSwitch method, too.
\r
3552 case kAsioSupportsTimeCode:
\r
3553 // Informs the driver whether application is interested in time
\r
3554 // code info. If an application does not need to know about time
\r
3555 // code, the driver has less work to do.
\r
3562 static const char* getAsioErrorString( ASIOError result )
\r
3567 const char*message;
\r
3570 static const Messages m[] =
\r
3572 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3573 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3574 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3575 { ASE_InvalidMode, "Invalid mode." },
\r
3576 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3577 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3578 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3581 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3582 if ( m[i].value == result ) return m[i].message;
\r
3584 return "Unknown error.";
\r
3587 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3591 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3593 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3594 // - Introduces support for the Windows WASAPI API
\r
3595 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3596 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3597 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3602 #include <audioclient.h>
\r
3604 #include <mmdeviceapi.h>
\r
3605 #include <functiondiscoverykeys_devpkey.h>
\r
3607 //=============================================================================
\r
3609 #define SAFE_RELEASE( objectPtr )\
\r
3612 objectPtr->Release();\
\r
3613 objectPtr = NULL;\
\r
3616 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3618 //-----------------------------------------------------------------------------
\r
3620 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3621 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3622 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3623 // provide intermediate storage for read / write synchronization.
\r
3624 class WasapiBuffer
\r
3628 : buffer_( NULL ),
\r
3637 // sets the length of the internal ring buffer
\r
3638 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3641 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3643 bufferSize_ = bufferSize;
\r
3648 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3649 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3651 if ( !buffer || // incoming buffer is NULL
\r
3652 bufferSize == 0 || // incoming buffer has no data
\r
3653 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3658 unsigned int relOutIndex = outIndex_;
\r
3659 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3660 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3661 relOutIndex += bufferSize_;
\r
3664 // "in" index can end on the "out" index but cannot begin at it
\r
3665 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3666 return false; // not enough space between "in" index and "out" index
\r
3669 // copy buffer from external to internal
\r
3670 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3671 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3672 int fromInSize = bufferSize - fromZeroSize;
\r
3676 case RTAUDIO_SINT8:
\r
3677 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3678 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3680 case RTAUDIO_SINT16:
\r
3681 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3682 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3684 case RTAUDIO_SINT24:
\r
3685 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3686 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3688 case RTAUDIO_SINT32:
\r
3689 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3690 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3692 case RTAUDIO_FLOAT32:
\r
3693 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3694 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3696 case RTAUDIO_FLOAT64:
\r
3697 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3698 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3702 // update "in" index
\r
3703 inIndex_ += bufferSize;
\r
3704 inIndex_ %= bufferSize_;
\r
3709 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3710 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3712 if ( !buffer || // incoming buffer is NULL
\r
3713 bufferSize == 0 || // incoming buffer has no data
\r
3714 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3719 unsigned int relInIndex = inIndex_;
\r
3720 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3721 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3722 relInIndex += bufferSize_;
\r
3725 // "out" index can begin at and end on the "in" index
\r
3726 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3727 return false; // not enough space between "out" index and "in" index
\r
3730 // copy buffer from internal to external
\r
3731 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3732 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3733 int fromOutSize = bufferSize - fromZeroSize;
\r
3737 case RTAUDIO_SINT8:
\r
3738 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3739 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3741 case RTAUDIO_SINT16:
\r
3742 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3743 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3745 case RTAUDIO_SINT24:
\r
3746 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3747 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3749 case RTAUDIO_SINT32:
\r
3750 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3751 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3753 case RTAUDIO_FLOAT32:
\r
3754 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3755 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3757 case RTAUDIO_FLOAT64:
\r
3758 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3759 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3763 // update "out" index
\r
3764 outIndex_ += bufferSize;
\r
3765 outIndex_ %= bufferSize_;
\r
3772 unsigned int bufferSize_;
\r
3773 unsigned int inIndex_;
\r
3774 unsigned int outIndex_;
\r
3777 //-----------------------------------------------------------------------------
\r
3779 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3780 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3781 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3782 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3783 // one rate and its multiple.
\r
3784 void convertBufferWasapi( char* outBuffer,
\r
3785 const char* inBuffer,
\r
3786 const unsigned int& channelCount,
\r
3787 const unsigned int& inSampleRate,
\r
3788 const unsigned int& outSampleRate,
\r
3789 const unsigned int& inSampleCount,
\r
3790 unsigned int& outSampleCount,
\r
3791 const RtAudioFormat& format )
\r
3793 // calculate the new outSampleCount and relative sampleStep
\r
3794 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3795 float sampleStep = 1.0f / sampleRatio;
\r
3796 float inSampleFraction = 0.0f;
\r
3798 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3800 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3801 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3803 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3807 case RTAUDIO_SINT8:
\r
3808 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3810 case RTAUDIO_SINT16:
\r
3811 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3813 case RTAUDIO_SINT24:
\r
3814 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3816 case RTAUDIO_SINT32:
\r
3817 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3819 case RTAUDIO_FLOAT32:
\r
3820 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3822 case RTAUDIO_FLOAT64:
\r
3823 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3827 // jump to next in sample
\r
3828 inSampleFraction += sampleStep;
\r
3832 //-----------------------------------------------------------------------------
\r
3834 // A structure to hold various information related to the WASAPI implementation.
\r
3835 struct WasapiHandle
\r
3837 IAudioClient* captureAudioClient;
\r
3838 IAudioClient* renderAudioClient;
\r
3839 IAudioCaptureClient* captureClient;
\r
3840 IAudioRenderClient* renderClient;
\r
3841 HANDLE captureEvent;
\r
3842 HANDLE renderEvent;
\r
3845 : captureAudioClient( NULL ),
\r
3846 renderAudioClient( NULL ),
\r
3847 captureClient( NULL ),
\r
3848 renderClient( NULL ),
\r
3849 captureEvent( NULL ),
\r
3850 renderEvent( NULL ) {}
\r
3853 //=============================================================================
\r
3855 RtApiWasapi::RtApiWasapi()
\r
3856 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3858 // WASAPI can run either apartment or multi-threaded
\r
3859 HRESULT hr = CoInitialize( NULL );
\r
3860 if ( !FAILED( hr ) )
\r
3861 coInitialized_ = true;
\r
3863 // Instantiate device enumerator
\r
3864 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3865 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3866 ( void** ) &deviceEnumerator_ );
\r
3868 if ( FAILED( hr ) ) {
\r
3869 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3870 error( RtAudioError::DRIVER_ERROR );
\r
3874 //-----------------------------------------------------------------------------
\r
3876 RtApiWasapi::~RtApiWasapi()
\r
3878 if ( stream_.state != STREAM_CLOSED )
\r
3881 SAFE_RELEASE( deviceEnumerator_ );
\r
3883 // If this object previously called CoInitialize()
\r
3884 if ( coInitialized_ )
\r
3888 //=============================================================================
\r
3890 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3892 unsigned int captureDeviceCount = 0;
\r
3893 unsigned int renderDeviceCount = 0;
\r
3895 IMMDeviceCollection* captureDevices = NULL;
\r
3896 IMMDeviceCollection* renderDevices = NULL;
\r
3898 // Count capture devices
\r
3899 errorText_.clear();
\r
3900 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3901 if ( FAILED( hr ) ) {
\r
3902 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3906 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3907 if ( FAILED( hr ) ) {
\r
3908 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3912 // Count render devices
\r
3913 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3914 if ( FAILED( hr ) ) {
\r
3915 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3919 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3920 if ( FAILED( hr ) ) {
\r
3921 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3926 // release all references
\r
3927 SAFE_RELEASE( captureDevices );
\r
3928 SAFE_RELEASE( renderDevices );
\r
3930 if ( errorText_.empty() )
\r
3931 return captureDeviceCount + renderDeviceCount;
\r
3933 error( RtAudioError::DRIVER_ERROR );
\r
3937 //-----------------------------------------------------------------------------
\r
3939 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3941 RtAudio::DeviceInfo info;
\r
3942 unsigned int captureDeviceCount = 0;
\r
3943 unsigned int renderDeviceCount = 0;
\r
3944 std::wstring deviceName;
\r
3945 std::string defaultDeviceName;
\r
3946 bool isCaptureDevice = false;
\r
3948 PROPVARIANT deviceNameProp;
\r
3949 PROPVARIANT defaultDeviceNameProp;
\r
3951 IMMDeviceCollection* captureDevices = NULL;
\r
3952 IMMDeviceCollection* renderDevices = NULL;
\r
3953 IMMDevice* devicePtr = NULL;
\r
3954 IMMDevice* defaultDevicePtr = NULL;
\r
3955 IAudioClient* audioClient = NULL;
\r
3956 IPropertyStore* devicePropStore = NULL;
\r
3957 IPropertyStore* defaultDevicePropStore = NULL;
\r
3959 WAVEFORMATEX* deviceFormat = NULL;
\r
3960 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3963 info.probed = false;
\r
3965 // Count capture devices
\r
3966 errorText_.clear();
\r
3967 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3968 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3969 if ( FAILED( hr ) ) {
\r
3970 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3974 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3975 if ( FAILED( hr ) ) {
\r
3976 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3980 // Count render devices
\r
3981 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3982 if ( FAILED( hr ) ) {
\r
3983 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3987 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3988 if ( FAILED( hr ) ) {
\r
3989 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3993 // validate device index
\r
3994 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3995 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3996 errorType = RtAudioError::INVALID_USE;
\r
4000 // determine whether index falls within capture or render devices
\r
4001 if ( device >= renderDeviceCount ) {
\r
4002 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4003 if ( FAILED( hr ) ) {
\r
4004 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4007 isCaptureDevice = true;
\r
4010 hr = renderDevices->Item( device, &devicePtr );
\r
4011 if ( FAILED( hr ) ) {
\r
4012 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4015 isCaptureDevice = false;
\r
4018 // get default device name
\r
4019 if ( isCaptureDevice ) {
\r
4020 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4021 if ( FAILED( hr ) ) {
\r
4022 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4027 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4028 if ( FAILED( hr ) ) {
\r
4029 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4034 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4035 if ( FAILED( hr ) ) {
\r
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4039 PropVariantInit( &defaultDeviceNameProp );
\r
4041 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4042 if ( FAILED( hr ) ) {
\r
4043 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4047 deviceName = defaultDeviceNameProp.pwszVal;
\r
4048 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4051 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4052 if ( FAILED( hr ) ) {
\r
4053 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4057 PropVariantInit( &deviceNameProp );
\r
4059 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4060 if ( FAILED( hr ) ) {
\r
4061 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4065 deviceName = deviceNameProp.pwszVal;
\r
4066 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4069 if ( isCaptureDevice ) {
\r
4070 info.isDefaultInput = info.name == defaultDeviceName;
\r
4071 info.isDefaultOutput = false;
\r
4074 info.isDefaultInput = false;
\r
4075 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4079 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4080 if ( FAILED( hr ) ) {
\r
4081 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4085 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4086 if ( FAILED( hr ) ) {
\r
4087 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4091 if ( isCaptureDevice ) {
\r
4092 info.inputChannels = deviceFormat->nChannels;
\r
4093 info.outputChannels = 0;
\r
4094 info.duplexChannels = 0;
\r
4097 info.inputChannels = 0;
\r
4098 info.outputChannels = deviceFormat->nChannels;
\r
4099 info.duplexChannels = 0;
\r
4103 info.sampleRates.clear();
\r
4105 // allow support for all sample rates as we have a built-in sample rate converter
\r
4106 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4107 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4111 info.nativeFormats = 0;
\r
4113 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4114 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4115 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4117 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4118 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4120 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4121 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4124 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4125 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4126 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4128 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4129 info.nativeFormats |= RTAUDIO_SINT8;
\r
4131 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4132 info.nativeFormats |= RTAUDIO_SINT16;
\r
4134 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4135 info.nativeFormats |= RTAUDIO_SINT24;
\r
4137 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4138 info.nativeFormats |= RTAUDIO_SINT32;
\r
4143 info.probed = true;
\r
4146 // release all references
\r
4147 PropVariantClear( &deviceNameProp );
\r
4148 PropVariantClear( &defaultDeviceNameProp );
\r
4150 SAFE_RELEASE( captureDevices );
\r
4151 SAFE_RELEASE( renderDevices );
\r
4152 SAFE_RELEASE( devicePtr );
\r
4153 SAFE_RELEASE( defaultDevicePtr );
\r
4154 SAFE_RELEASE( audioClient );
\r
4155 SAFE_RELEASE( devicePropStore );
\r
4156 SAFE_RELEASE( defaultDevicePropStore );
\r
4158 CoTaskMemFree( deviceFormat );
\r
4159 CoTaskMemFree( closestMatchFormat );
\r
4161 if ( !errorText_.empty() )
\r
4162 error( errorType );
\r
4166 //-----------------------------------------------------------------------------
\r
4168 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4170 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4171 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4179 //-----------------------------------------------------------------------------
\r
4181 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4183 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4184 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4192 //-----------------------------------------------------------------------------
\r
4194 void RtApiWasapi::closeStream( void )
\r
4196 if ( stream_.state == STREAM_CLOSED ) {
\r
4197 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4198 error( RtAudioError::WARNING );
\r
4202 if ( stream_.state != STREAM_STOPPED )
\r
4205 // clean up stream memory
\r
4206 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4207 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4209 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4210 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4212 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4213 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4215 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4216 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4218 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4219 stream_.apiHandle = NULL;
\r
4221 for ( int i = 0; i < 2; i++ ) {
\r
4222 if ( stream_.userBuffer[i] ) {
\r
4223 free( stream_.userBuffer[i] );
\r
4224 stream_.userBuffer[i] = 0;
\r
4228 if ( stream_.deviceBuffer ) {
\r
4229 free( stream_.deviceBuffer );
\r
4230 stream_.deviceBuffer = 0;
\r
4233 // update stream state
\r
4234 stream_.state = STREAM_CLOSED;
\r
4237 //-----------------------------------------------------------------------------
\r
4239 void RtApiWasapi::startStream( void )
\r
4243 if ( stream_.state == STREAM_RUNNING ) {
\r
4244 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4245 error( RtAudioError::WARNING );
\r
4249 // update stream state
\r
4250 stream_.state = STREAM_RUNNING;
\r
4252 // create WASAPI stream thread
\r
4253 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4255 if ( !stream_.callbackInfo.thread ) {
\r
4256 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4257 error( RtAudioError::THREAD_ERROR );
\r
4260 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4261 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4265 //-----------------------------------------------------------------------------
\r
4267 void RtApiWasapi::stopStream( void )
\r
4271 if ( stream_.state == STREAM_STOPPED ) {
\r
4272 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4273 error( RtAudioError::WARNING );
\r
4277 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4278 stream_.state = STREAM_STOPPING;
\r
4280 // wait until stream thread is stopped
\r
4281 while( stream_.state != STREAM_STOPPED ) {
\r
4285 // Wait for the last buffer to play before stopping.
\r
4286 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4288 // stop capture client if applicable
\r
4289 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4290 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4291 if ( FAILED( hr ) ) {
\r
4292 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4293 error( RtAudioError::DRIVER_ERROR );
\r
4298 // stop render client if applicable
\r
4299 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4300 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4301 if ( FAILED( hr ) ) {
\r
4302 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4303 error( RtAudioError::DRIVER_ERROR );
\r
4308 // close thread handle
\r
4309 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4310 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4311 error( RtAudioError::THREAD_ERROR );
\r
4315 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4318 //-----------------------------------------------------------------------------
\r
4320 void RtApiWasapi::abortStream( void )
\r
4324 if ( stream_.state == STREAM_STOPPED ) {
\r
4325 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4326 error( RtAudioError::WARNING );
\r
4330 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4331 stream_.state = STREAM_STOPPING;
\r
4333 // wait until stream thread is stopped
\r
4334 while ( stream_.state != STREAM_STOPPED ) {
\r
4338 // stop capture client if applicable
\r
4339 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4340 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4341 if ( FAILED( hr ) ) {
\r
4342 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4343 error( RtAudioError::DRIVER_ERROR );
\r
4348 // stop render client if applicable
\r
4349 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4350 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4351 if ( FAILED( hr ) ) {
\r
4352 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4353 error( RtAudioError::DRIVER_ERROR );
\r
4358 // close thread handle
\r
4359 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4360 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4361 error( RtAudioError::THREAD_ERROR );
\r
4365 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4368 //-----------------------------------------------------------------------------
\r
4370 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4371 unsigned int firstChannel, unsigned int sampleRate,
\r
4372 RtAudioFormat format, unsigned int* bufferSize,
\r
4373 RtAudio::StreamOptions* options )
\r
4375 bool methodResult = FAILURE;
\r
4376 unsigned int captureDeviceCount = 0;
\r
4377 unsigned int renderDeviceCount = 0;
\r
4379 IMMDeviceCollection* captureDevices = NULL;
\r
4380 IMMDeviceCollection* renderDevices = NULL;
\r
4381 IMMDevice* devicePtr = NULL;
\r
4382 WAVEFORMATEX* deviceFormat = NULL;
\r
4383 unsigned int bufferBytes;
\r
4384 stream_.state = STREAM_STOPPED;
\r
4386 // create API Handle if not already created
\r
4387 if ( !stream_.apiHandle )
\r
4388 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4390 // Count capture devices
\r
4391 errorText_.clear();
\r
4392 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4393 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4394 if ( FAILED( hr ) ) {
\r
4395 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4399 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4400 if ( FAILED( hr ) ) {
\r
4401 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4405 // Count render devices
\r
4406 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4407 if ( FAILED( hr ) ) {
\r
4408 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4412 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4413 if ( FAILED( hr ) ) {
\r
4414 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4418 // validate device index
\r
4419 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4420 errorType = RtAudioError::INVALID_USE;
\r
4421 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4425 // determine whether index falls within capture or render devices
\r
4426 if ( device >= renderDeviceCount ) {
\r
4427 if ( mode != INPUT ) {
\r
4428 errorType = RtAudioError::INVALID_USE;
\r
4429 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4433 // retrieve captureAudioClient from devicePtr
\r
4434 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4436 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4437 if ( FAILED( hr ) ) {
\r
4438 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4442 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4443 NULL, ( void** ) &captureAudioClient );
\r
4444 if ( FAILED( hr ) ) {
\r
4445 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4449 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4450 if ( FAILED( hr ) ) {
\r
4451 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4455 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4456 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4459 if ( mode != OUTPUT ) {
\r
4460 errorType = RtAudioError::INVALID_USE;
\r
4461 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4465 // retrieve renderAudioClient from devicePtr
\r
4466 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4468 hr = renderDevices->Item( device, &devicePtr );
\r
4469 if ( FAILED( hr ) ) {
\r
4470 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4474 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4475 NULL, ( void** ) &renderAudioClient );
\r
4476 if ( FAILED( hr ) ) {
\r
4477 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4481 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4482 if ( FAILED( hr ) ) {
\r
4483 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4487 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4488 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4491 // fill stream data
\r
4492 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4493 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4494 stream_.mode = DUPLEX;
\r
4497 stream_.mode = mode;
\r
4500 stream_.device[mode] = device;
\r
4501 stream_.doByteSwap[mode] = false;
\r
4502 stream_.sampleRate = sampleRate;
\r
4503 stream_.bufferSize = *bufferSize;
\r
4504 stream_.nBuffers = 1;
\r
4505 stream_.nUserChannels[mode] = channels;
\r
4506 stream_.channelOffset[mode] = firstChannel;
\r
4507 stream_.userFormat = format;
\r
4508 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4510 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4511 stream_.userInterleaved = false;
\r
4513 stream_.userInterleaved = true;
\r
4514 stream_.deviceInterleaved[mode] = true;
\r
4516 // Set flags for buffer conversion.
\r
4517 stream_.doConvertBuffer[mode] = false;
\r
4518 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4519 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4520 stream_.doConvertBuffer[mode] = true;
\r
4521 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4522 stream_.nUserChannels[mode] > 1 )
\r
4523 stream_.doConvertBuffer[mode] = true;
\r
4525 if ( stream_.doConvertBuffer[mode] )
\r
4526 setConvertInfo( mode, 0 );
\r
4528 // Allocate necessary internal buffers
\r
4529 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4531 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4532 if ( !stream_.userBuffer[mode] ) {
\r
4533 errorType = RtAudioError::MEMORY_ERROR;
\r
4534 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4538 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4539 stream_.callbackInfo.priority = 15;
\r
4541 stream_.callbackInfo.priority = 0;
\r
4543 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4544 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4546 methodResult = SUCCESS;
\r
4550 SAFE_RELEASE( captureDevices );
\r
4551 SAFE_RELEASE( renderDevices );
\r
4552 SAFE_RELEASE( devicePtr );
\r
4553 CoTaskMemFree( deviceFormat );
\r
4555 // if method failed, close the stream
\r
4556 if ( methodResult == FAILURE )
\r
4559 if ( !errorText_.empty() )
\r
4560 error( errorType );
\r
4561 return methodResult;
\r
4564 //=============================================================================
\r
4566 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4569 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4574 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4577 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4582 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4585 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4590 //-----------------------------------------------------------------------------
\r
4592 void RtApiWasapi::wasapiThread()
\r
4594 // as this is a new thread, we must CoInitialize it
\r
4595 CoInitialize( NULL );
\r
4599 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4600 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4601 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4602 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4603 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4604 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4606 WAVEFORMATEX* captureFormat = NULL;
\r
4607 WAVEFORMATEX* renderFormat = NULL;
\r
4608 float captureSrRatio = 0.0f;
\r
4609 float renderSrRatio = 0.0f;
\r
4610 WasapiBuffer captureBuffer;
\r
4611 WasapiBuffer renderBuffer;
\r
4613 // declare local stream variables
\r
4614 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4615 BYTE* streamBuffer = NULL;
\r
4616 unsigned long captureFlags = 0;
\r
4617 unsigned int bufferFrameCount = 0;
\r
4618 unsigned int numFramesPadding = 0;
\r
4619 unsigned int convBufferSize = 0;
\r
4620 bool callbackPushed = false;
\r
4621 bool callbackPulled = false;
\r
4622 bool callbackStopped = false;
\r
4623 int callbackResult = 0;
\r
4625 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4626 char* convBuffer = NULL;
\r
4627 unsigned int convBuffSize = 0;
\r
4628 unsigned int deviceBuffSize = 0;
\r
4630 errorText_.clear();
\r
4631 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4633 // Attempt to assign "Pro Audio" characteristic to thread
\r
4634 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4636 DWORD taskIndex = 0;
\r
4637 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4638 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4639 FreeLibrary( AvrtDll );
\r
4642 // start capture stream if applicable
\r
4643 if ( captureAudioClient ) {
\r
4644 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4645 if ( FAILED( hr ) ) {
\r
4646 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4650 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4652 // initialize capture stream according to desire buffer size
\r
4653 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4654 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4656 if ( !captureClient ) {
\r
4657 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4658 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4659 desiredBufferPeriod,
\r
4660 desiredBufferPeriod,
\r
4663 if ( FAILED( hr ) ) {
\r
4664 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4668 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4669 ( void** ) &captureClient );
\r
4670 if ( FAILED( hr ) ) {
\r
4671 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4675 // configure captureEvent to trigger on every available capture buffer
\r
4676 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4677 if ( !captureEvent ) {
\r
4678 errorType = RtAudioError::SYSTEM_ERROR;
\r
4679 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4683 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4684 if ( FAILED( hr ) ) {
\r
4685 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4689 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4690 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4693 unsigned int inBufferSize = 0;
\r
4694 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4695 if ( FAILED( hr ) ) {
\r
4696 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4700 // scale outBufferSize according to stream->user sample rate ratio
\r
4701 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4702 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4704 // set captureBuffer size
\r
4705 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4707 // reset the capture stream
\r
4708 hr = captureAudioClient->Reset();
\r
4709 if ( FAILED( hr ) ) {
\r
4710 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4714 // start the capture stream
\r
4715 hr = captureAudioClient->Start();
\r
4716 if ( FAILED( hr ) ) {
\r
4717 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4722 // start render stream if applicable
\r
4723 if ( renderAudioClient ) {
\r
4724 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4725 if ( FAILED( hr ) ) {
\r
4726 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4730 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4732 // initialize render stream according to desire buffer size
\r
4733 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4734 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4736 if ( !renderClient ) {
\r
4737 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4738 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4739 desiredBufferPeriod,
\r
4740 desiredBufferPeriod,
\r
4743 if ( FAILED( hr ) ) {
\r
4744 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4748 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4749 ( void** ) &renderClient );
\r
4750 if ( FAILED( hr ) ) {
\r
4751 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4755 // configure renderEvent to trigger on every available render buffer
\r
4756 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4757 if ( !renderEvent ) {
\r
4758 errorType = RtAudioError::SYSTEM_ERROR;
\r
4759 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4763 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4764 if ( FAILED( hr ) ) {
\r
4765 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4769 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4770 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4773 unsigned int outBufferSize = 0;
\r
4774 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4775 if ( FAILED( hr ) ) {
\r
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4780 // scale inBufferSize according to user->stream sample rate ratio
\r
4781 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4782 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4784 // set renderBuffer size
\r
4785 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4787 // reset the render stream
\r
4788 hr = renderAudioClient->Reset();
\r
4789 if ( FAILED( hr ) ) {
\r
4790 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4794 // start the render stream
\r
4795 hr = renderAudioClient->Start();
\r
4796 if ( FAILED( hr ) ) {
\r
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4802 if ( stream_.mode == INPUT ) {
\r
4803 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4804 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4806 else if ( stream_.mode == OUTPUT ) {
\r
4807 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4808 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4810 else if ( stream_.mode == DUPLEX ) {
\r
4811 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4812 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4813 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4814 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4817 convBuffer = ( char* ) malloc( convBuffSize );
\r
4818 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4819 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4820 errorType = RtAudioError::MEMORY_ERROR;
\r
4821 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4825 // stream process loop
\r
4826 while ( stream_.state != STREAM_STOPPING ) {
\r
4827 if ( !callbackPulled ) {
\r
4830 // 1. Pull callback buffer from inputBuffer
\r
4831 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4832 // Convert callback buffer to user format
\r
4834 if ( captureAudioClient ) {
\r
4835 // Pull callback buffer from inputBuffer
\r
4836 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4837 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4838 stream_.deviceFormat[INPUT] );
\r
4840 if ( callbackPulled ) {
\r
4841 // Convert callback buffer to user sample rate
\r
4842 convertBufferWasapi( stream_.deviceBuffer,
\r
4844 stream_.nDeviceChannels[INPUT],
\r
4845 captureFormat->nSamplesPerSec,
\r
4846 stream_.sampleRate,
\r
4847 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4849 stream_.deviceFormat[INPUT] );
\r
4851 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4852 // Convert callback buffer to user format
\r
4853 convertBuffer( stream_.userBuffer[INPUT],
\r
4854 stream_.deviceBuffer,
\r
4855 stream_.convertInfo[INPUT] );
\r
4858 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4859 memcpy( stream_.userBuffer[INPUT],
\r
4860 stream_.deviceBuffer,
\r
4861 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4866 // if there is no capture stream, set callbackPulled flag
\r
4867 callbackPulled = true;
\r
4870 // Execute Callback
\r
4871 // ================
\r
4872 // 1. Execute user callback method
\r
4873 // 2. Handle return value from callback
\r
4875 // if callback has not requested the stream to stop
\r
4876 if ( callbackPulled && !callbackStopped ) {
\r
4877 // Execute user callback method
\r
4878 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4879 stream_.userBuffer[INPUT],
\r
4880 stream_.bufferSize,
\r
4882 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4883 stream_.callbackInfo.userData );
\r
4885 // Handle return value from callback
\r
4886 if ( callbackResult == 1 ) {
\r
4887 // instantiate a thread to stop this thread
\r
4888 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4889 if ( !threadHandle ) {
\r
4890 errorType = RtAudioError::THREAD_ERROR;
\r
4891 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4894 else if ( !CloseHandle( threadHandle ) ) {
\r
4895 errorType = RtAudioError::THREAD_ERROR;
\r
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4900 callbackStopped = true;
\r
4902 else if ( callbackResult == 2 ) {
\r
4903 // instantiate a thread to stop this thread
\r
4904 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4905 if ( !threadHandle ) {
\r
4906 errorType = RtAudioError::THREAD_ERROR;
\r
4907 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4910 else if ( !CloseHandle( threadHandle ) ) {
\r
4911 errorType = RtAudioError::THREAD_ERROR;
\r
4912 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4916 callbackStopped = true;
\r
4921 // Callback Output
\r
4922 // ===============
\r
4923 // 1. Convert callback buffer to stream format
\r
4924 // 2. Convert callback buffer to stream sample rate and channel count
\r
4925 // 3. Push callback buffer into outputBuffer
\r
4927 if ( renderAudioClient && callbackPulled ) {
\r
4928 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4929 // Convert callback buffer to stream format
\r
4930 convertBuffer( stream_.deviceBuffer,
\r
4931 stream_.userBuffer[OUTPUT],
\r
4932 stream_.convertInfo[OUTPUT] );
\r
4936 // Convert callback buffer to stream sample rate
\r
4937 convertBufferWasapi( convBuffer,
\r
4938 stream_.deviceBuffer,
\r
4939 stream_.nDeviceChannels[OUTPUT],
\r
4940 stream_.sampleRate,
\r
4941 renderFormat->nSamplesPerSec,
\r
4942 stream_.bufferSize,
\r
4944 stream_.deviceFormat[OUTPUT] );
\r
4946 // Push callback buffer into outputBuffer
\r
4947 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4948 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4949 stream_.deviceFormat[OUTPUT] );
\r
4952 // if there is no render stream, set callbackPushed flag
\r
4953 callbackPushed = true;
\r
4958 // 1. Get capture buffer from stream
\r
4959 // 2. Push capture buffer into inputBuffer
\r
4960 // 3. If 2. was successful: Release capture buffer
\r
4962 if ( captureAudioClient ) {
\r
4963 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4964 if ( !callbackPulled ) {
\r
4965 WaitForSingleObject( captureEvent, INFINITE );
\r
4968 // Get capture buffer from stream
\r
4969 hr = captureClient->GetBuffer( &streamBuffer,
\r
4970 &bufferFrameCount,
\r
4971 &captureFlags, NULL, NULL );
\r
4972 if ( FAILED( hr ) ) {
\r
4973 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4977 if ( bufferFrameCount != 0 ) {
\r
4978 // Push capture buffer into inputBuffer
\r
4979 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4980 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4981 stream_.deviceFormat[INPUT] ) )
\r
4983 // Release capture buffer
\r
4984 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4985 if ( FAILED( hr ) ) {
\r
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4992 // Inform WASAPI that capture was unsuccessful
\r
4993 hr = captureClient->ReleaseBuffer( 0 );
\r
4994 if ( FAILED( hr ) ) {
\r
4995 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5002 // Inform WASAPI that capture was unsuccessful
\r
5003 hr = captureClient->ReleaseBuffer( 0 );
\r
5004 if ( FAILED( hr ) ) {
\r
5005 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5013 // 1. Get render buffer from stream
\r
5014 // 2. Pull next buffer from outputBuffer
\r
5015 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5016 // Release render buffer
\r
5018 if ( renderAudioClient ) {
\r
5019 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5020 if ( callbackPulled && !callbackPushed ) {
\r
5021 WaitForSingleObject( renderEvent, INFINITE );
\r
5024 // Get render buffer from stream
\r
5025 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5026 if ( FAILED( hr ) ) {
\r
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5031 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5032 if ( FAILED( hr ) ) {
\r
5033 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5037 bufferFrameCount -= numFramesPadding;
\r
5039 if ( bufferFrameCount != 0 ) {
\r
5040 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5041 if ( FAILED( hr ) ) {
\r
5042 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5046 // Pull next buffer from outputBuffer
\r
5047 // Fill render buffer with next buffer
\r
5048 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5049 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5050 stream_.deviceFormat[OUTPUT] ) )
\r
5052 // Release render buffer
\r
5053 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5054 if ( FAILED( hr ) ) {
\r
5055 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5061 // Inform WASAPI that render was unsuccessful
\r
5062 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5063 if ( FAILED( hr ) ) {
\r
5064 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5071 // Inform WASAPI that render was unsuccessful
\r
5072 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5073 if ( FAILED( hr ) ) {
\r
5074 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5080 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5081 if ( callbackPushed ) {
\r
5082 callbackPulled = false;
\r
5085 // tick stream time
\r
5086 RtApi::tickStreamTime();
\r
5091 CoTaskMemFree( captureFormat );
\r
5092 CoTaskMemFree( renderFormat );
\r
5094 free ( convBuffer );
\r
5098 // update stream state
\r
5099 stream_.state = STREAM_STOPPED;
\r
5101 if ( errorText_.empty() )
\r
5104 error( errorType );
\r
5107 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5111 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5113 // Modified by Robin Davies, October 2005
\r
5114 // - Improvements to DirectX pointer chasing.
\r
5115 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5116 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5117 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5118 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5120 #include <dsound.h>
\r
5121 #include <assert.h>
\r
5122 #include <algorithm>
\r
5124 #if defined(__MINGW32__)
\r
5125 // missing from latest mingw winapi
\r
5126 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5127 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5128 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5129 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5132 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5134 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5135 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5138 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5140 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5141 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5142 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5143 return pointer >= earlierPointer && pointer < laterPointer;
\r
5146 // A structure to hold various information related to the DirectSound
\r
5147 // API implementation.
\r
5149 unsigned int drainCounter; // Tracks callback counts when draining
\r
5150 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5154 UINT bufferPointer[2];
\r
5155 DWORD dsBufferSize[2];
\r
5156 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5160 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5163 // Declarations for utility functions, callbacks, and structures
\r
5164 // specific to the DirectSound implementation.
\r
5165 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5166 LPCTSTR description,
\r
5168 LPVOID lpContext );
\r
5170 static const char* getErrorString( int code );
\r
5172 static unsigned __stdcall callbackHandler( void *ptr );
\r
5181 : found(false) { validId[0] = false; validId[1] = false; }
\r
5184 struct DsProbeData {
\r
5186 std::vector<struct DsDevice>* dsDevices;
\r
5189 RtApiDs :: RtApiDs()
\r
5191 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5192 // accept whatever the mainline chose for a threading model.
\r
5193 coInitialized_ = false;
\r
5194 HRESULT hr = CoInitialize( NULL );
\r
5195 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5198 RtApiDs :: ~RtApiDs()
\r
5200 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5201 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5204 // The DirectSound default output is always the first device.
\r
5205 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5210 // The DirectSound default input is always the first input device,
\r
5211 // which is the first capture device enumerated.
\r
5212 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5217 unsigned int RtApiDs :: getDeviceCount( void )
\r
5219 // Set query flag for previously found devices to false, so that we
\r
5220 // can check for any devices that have disappeared.
\r
5221 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5222 dsDevices[i].found = false;
\r
5224 // Query DirectSound devices.
\r
5225 struct DsProbeData probeInfo;
\r
5226 probeInfo.isInput = false;
\r
5227 probeInfo.dsDevices = &dsDevices;
\r
5228 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5229 if ( FAILED( result ) ) {
\r
5230 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5231 errorText_ = errorStream_.str();
\r
5232 error( RtAudioError::WARNING );
\r
5235 // Query DirectSoundCapture devices.
\r
5236 probeInfo.isInput = true;
\r
5237 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5238 if ( FAILED( result ) ) {
\r
5239 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5240 errorText_ = errorStream_.str();
\r
5241 error( RtAudioError::WARNING );
\r
5244 // Clean out any devices that may have disappeared.
\r
5245 std::vector< int > indices;
\r
5246 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5247 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5248 //unsigned int nErased = 0;
\r
5249 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5250 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5251 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5253 return static_cast<unsigned int>(dsDevices.size());
\r
5256 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5258 RtAudio::DeviceInfo info;
\r
5259 info.probed = false;
\r
5261 if ( dsDevices.size() == 0 ) {
\r
5262 // Force a query of all devices
\r
5264 if ( dsDevices.size() == 0 ) {
\r
5265 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5266 error( RtAudioError::INVALID_USE );
\r
5271 if ( device >= dsDevices.size() ) {
\r
5272 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5273 error( RtAudioError::INVALID_USE );
\r
5278 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5280 LPDIRECTSOUND output;
\r
5282 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5283 if ( FAILED( result ) ) {
\r
5284 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5285 errorText_ = errorStream_.str();
\r
5286 error( RtAudioError::WARNING );
\r
5290 outCaps.dwSize = sizeof( outCaps );
\r
5291 result = output->GetCaps( &outCaps );
\r
5292 if ( FAILED( result ) ) {
\r
5293 output->Release();
\r
5294 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5295 errorText_ = errorStream_.str();
\r
5296 error( RtAudioError::WARNING );
\r
5300 // Get output channel information.
\r
5301 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5303 // Get sample rate information.
\r
5304 info.sampleRates.clear();
\r
5305 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5306 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5307 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5308 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5311 // Get format information.
\r
5312 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5313 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5315 output->Release();
\r
5317 if ( getDefaultOutputDevice() == device )
\r
5318 info.isDefaultOutput = true;
\r
5320 if ( dsDevices[ device ].validId[1] == false ) {
\r
5321 info.name = dsDevices[ device ].name;
\r
5322 info.probed = true;
\r
5328 LPDIRECTSOUNDCAPTURE input;
\r
5329 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5330 if ( FAILED( result ) ) {
\r
5331 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5332 errorText_ = errorStream_.str();
\r
5333 error( RtAudioError::WARNING );
\r
5338 inCaps.dwSize = sizeof( inCaps );
\r
5339 result = input->GetCaps( &inCaps );
\r
5340 if ( FAILED( result ) ) {
\r
5342 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5343 errorText_ = errorStream_.str();
\r
5344 error( RtAudioError::WARNING );
\r
5348 // Get input channel information.
\r
5349 info.inputChannels = inCaps.dwChannels;
\r
5351 // Get sample rate and format information.
\r
5352 std::vector<unsigned int> rates;
\r
5353 if ( inCaps.dwChannels >= 2 ) {
\r
5354 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5355 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5363 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5364 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5369 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5376 else if ( inCaps.dwChannels == 1 ) {
\r
5377 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5386 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5392 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5399 else info.inputChannels = 0; // technically, this would be an error
\r
5403 if ( info.inputChannels == 0 ) return info;
\r
5405 // Copy the supported rates to the info structure but avoid duplication.
\r
5407 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5409 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5410 if ( rates[i] == info.sampleRates[j] ) {
\r
5415 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5417 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5419 // If device opens for both playback and capture, we determine the channels.
\r
5420 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5421 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5423 if ( device == 0 ) info.isDefaultInput = true;
\r
5425 // Copy name and return.
\r
5426 info.name = dsDevices[ device ].name;
\r
5427 info.probed = true;
\r
5431 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5432 unsigned int firstChannel, unsigned int sampleRate,
\r
5433 RtAudioFormat format, unsigned int *bufferSize,
\r
5434 RtAudio::StreamOptions *options )
\r
5436 if ( channels + firstChannel > 2 ) {
\r
5437 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5441 size_t nDevices = dsDevices.size();
\r
5442 if ( nDevices == 0 ) {
\r
5443 // This should not happen because a check is made before this function is called.
\r
5444 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5448 if ( device >= nDevices ) {
\r
5449 // This should not happen because a check is made before this function is called.
\r
5450 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5454 if ( mode == OUTPUT ) {
\r
5455 if ( dsDevices[ device ].validId[0] == false ) {
\r
5456 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5457 errorText_ = errorStream_.str();
\r
5461 else { // mode == INPUT
\r
5462 if ( dsDevices[ device ].validId[1] == false ) {
\r
5463 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5464 errorText_ = errorStream_.str();
\r
5469 // According to a note in PortAudio, using GetDesktopWindow()
\r
5470 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5471 // that occur when the application's window is not the foreground
\r
5472 // window. Also, if the application window closes before the
\r
5473 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5474 // problems when using GetDesktopWindow() but it seems fine now
\r
5475 // (January 2010). I'll leave it commented here.
\r
5476 // HWND hWnd = GetForegroundWindow();
\r
5477 HWND hWnd = GetDesktopWindow();
\r
5479 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5480 // two. This is a judgement call and a value of two is probably too
\r
5481 // low for capture, but it should work for playback.
\r
5483 if ( options ) nBuffers = options->numberOfBuffers;
\r
5484 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5485 if ( nBuffers < 2 ) nBuffers = 3;
\r
5487 // Check the lower range of the user-specified buffer size and set
\r
5488 // (arbitrarily) to a lower bound of 32.
\r
5489 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5491 // Create the wave format structure. The data format setting will
\r
5492 // be determined later.
\r
5493 WAVEFORMATEX waveFormat;
\r
5494 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5495 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5496 waveFormat.nChannels = channels + firstChannel;
\r
5497 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5499 // Determine the device buffer size. By default, we'll use the value
\r
5500 // defined above (32K), but we will grow it to make allowances for
\r
5501 // very large software buffer sizes.
\r
5502 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5503 DWORD dsPointerLeadTime = 0;
\r
5505 void *ohandle = 0, *bhandle = 0;
\r
5507 if ( mode == OUTPUT ) {
\r
5509 LPDIRECTSOUND output;
\r
5510 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5511 if ( FAILED( result ) ) {
\r
5512 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5513 errorText_ = errorStream_.str();
\r
5518 outCaps.dwSize = sizeof( outCaps );
\r
5519 result = output->GetCaps( &outCaps );
\r
5520 if ( FAILED( result ) ) {
\r
5521 output->Release();
\r
5522 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5523 errorText_ = errorStream_.str();
\r
5527 // Check channel information.
\r
5528 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5529 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5530 errorText_ = errorStream_.str();
\r
5534 // Check format information. Use 16-bit format unless not
\r
5535 // supported or user requests 8-bit.
\r
5536 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5537 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5538 waveFormat.wBitsPerSample = 16;
\r
5539 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5542 waveFormat.wBitsPerSample = 8;
\r
5543 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5545 stream_.userFormat = format;
\r
5547 // Update wave format structure and buffer information.
\r
5548 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5549 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5550 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5552 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5553 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5554 dsBufferSize *= 2;
\r
5556 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5557 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5558 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5559 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5560 if ( FAILED( result ) ) {
\r
5561 output->Release();
\r
5562 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5563 errorText_ = errorStream_.str();
\r
5567 // Even though we will write to the secondary buffer, we need to
\r
5568 // access the primary buffer to set the correct output format
\r
5569 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5570 // buffer description.
\r
5571 DSBUFFERDESC bufferDescription;
\r
5572 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5573 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5574 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5576 // Obtain the primary buffer
\r
5577 LPDIRECTSOUNDBUFFER buffer;
\r
5578 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5579 if ( FAILED( result ) ) {
\r
5580 output->Release();
\r
5581 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5582 errorText_ = errorStream_.str();
\r
5586 // Set the primary DS buffer sound format.
\r
5587 result = buffer->SetFormat( &waveFormat );
\r
5588 if ( FAILED( result ) ) {
\r
5589 output->Release();
\r
5590 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5591 errorText_ = errorStream_.str();
\r
5595 // Setup the secondary DS buffer description.
\r
5596 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5597 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5598 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5599 DSBCAPS_GLOBALFOCUS |
\r
5600 DSBCAPS_GETCURRENTPOSITION2 |
\r
5601 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5602 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5603 bufferDescription.lpwfxFormat = &waveFormat;
\r
5605 // Try to create the secondary DS buffer. If that doesn't work,
\r
5606 // try to use software mixing. Otherwise, there's a problem.
\r
5607 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5608 if ( FAILED( result ) ) {
\r
5609 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5610 DSBCAPS_GLOBALFOCUS |
\r
5611 DSBCAPS_GETCURRENTPOSITION2 |
\r
5612 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5613 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5614 if ( FAILED( result ) ) {
\r
5615 output->Release();
\r
5616 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5617 errorText_ = errorStream_.str();
\r
5622 // Get the buffer size ... might be different from what we specified.
\r
5624 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5625 result = buffer->GetCaps( &dsbcaps );
\r
5626 if ( FAILED( result ) ) {
\r
5627 output->Release();
\r
5628 buffer->Release();
\r
5629 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5630 errorText_ = errorStream_.str();
\r
5634 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5636 // Lock the DS buffer
\r
5639 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5640 if ( FAILED( result ) ) {
\r
5641 output->Release();
\r
5642 buffer->Release();
\r
5643 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5644 errorText_ = errorStream_.str();
\r
5648 // Zero the DS buffer
\r
5649 ZeroMemory( audioPtr, dataLen );
\r
5651 // Unlock the DS buffer
\r
5652 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5653 if ( FAILED( result ) ) {
\r
5654 output->Release();
\r
5655 buffer->Release();
\r
5656 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5657 errorText_ = errorStream_.str();
\r
5661 ohandle = (void *) output;
\r
5662 bhandle = (void *) buffer;
\r
5665 if ( mode == INPUT ) {
\r
5667 LPDIRECTSOUNDCAPTURE input;
\r
5668 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5669 if ( FAILED( result ) ) {
\r
5670 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5671 errorText_ = errorStream_.str();
\r
5676 inCaps.dwSize = sizeof( inCaps );
\r
5677 result = input->GetCaps( &inCaps );
\r
5678 if ( FAILED( result ) ) {
\r
5680 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5681 errorText_ = errorStream_.str();
\r
5685 // Check channel information.
\r
5686 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5687 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5691 // Check format information. Use 16-bit format unless user
\r
5692 // requests 8-bit.
\r
5693 DWORD deviceFormats;
\r
5694 if ( channels + firstChannel == 2 ) {
\r
5695 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5696 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5697 waveFormat.wBitsPerSample = 8;
\r
5698 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5700 else { // assume 16-bit is supported
\r
5701 waveFormat.wBitsPerSample = 16;
\r
5702 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5705 else { // channel == 1
\r
5706 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5707 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5708 waveFormat.wBitsPerSample = 8;
\r
5709 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5711 else { // assume 16-bit is supported
\r
5712 waveFormat.wBitsPerSample = 16;
\r
5713 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5716 stream_.userFormat = format;
\r
5718 // Update wave format structure and buffer information.
\r
5719 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5720 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5721 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5723 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5724 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5725 dsBufferSize *= 2;
\r
5727 // Setup the secondary DS buffer description.
\r
5728 DSCBUFFERDESC bufferDescription;
\r
5729 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5730 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5731 bufferDescription.dwFlags = 0;
\r
5732 bufferDescription.dwReserved = 0;
\r
5733 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5734 bufferDescription.lpwfxFormat = &waveFormat;
\r
5736 // Create the capture buffer.
\r
5737 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5738 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5739 if ( FAILED( result ) ) {
\r
5741 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5742 errorText_ = errorStream_.str();
\r
5746 // Get the buffer size ... might be different from what we specified.
\r
5747 DSCBCAPS dscbcaps;
\r
5748 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5749 result = buffer->GetCaps( &dscbcaps );
\r
5750 if ( FAILED( result ) ) {
\r
5752 buffer->Release();
\r
5753 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5754 errorText_ = errorStream_.str();
\r
5758 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5760 // NOTE: We could have a problem here if this is a duplex stream
\r
5761 // and the play and capture hardware buffer sizes are different
\r
5762 // (I'm actually not sure if that is a problem or not).
\r
5763 // Currently, we are not verifying that.
\r
5765 // Lock the capture buffer
\r
5768 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5769 if ( FAILED( result ) ) {
\r
5771 buffer->Release();
\r
5772 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5773 errorText_ = errorStream_.str();
\r
5777 // Zero the buffer
\r
5778 ZeroMemory( audioPtr, dataLen );
\r
5780 // Unlock the buffer
\r
5781 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5782 if ( FAILED( result ) ) {
\r
5784 buffer->Release();
\r
5785 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5786 errorText_ = errorStream_.str();
\r
5790 ohandle = (void *) input;
\r
5791 bhandle = (void *) buffer;
\r
5794 // Set various stream parameters
\r
5795 DsHandle *handle = 0;
\r
5796 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5797 stream_.nUserChannels[mode] = channels;
\r
5798 stream_.bufferSize = *bufferSize;
\r
5799 stream_.channelOffset[mode] = firstChannel;
\r
5800 stream_.deviceInterleaved[mode] = true;
\r
5801 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5802 else stream_.userInterleaved = true;
\r
5804 // Set flag for buffer conversion
\r
5805 stream_.doConvertBuffer[mode] = false;
\r
5806 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5807 stream_.doConvertBuffer[mode] = true;
\r
5808 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5809 stream_.doConvertBuffer[mode] = true;
\r
5810 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5811 stream_.nUserChannels[mode] > 1 )
\r
5812 stream_.doConvertBuffer[mode] = true;
\r
5814 // Allocate necessary internal buffers
\r
5815 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5816 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5817 if ( stream_.userBuffer[mode] == NULL ) {
\r
5818 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5822 if ( stream_.doConvertBuffer[mode] ) {
\r
5824 bool makeBuffer = true;
\r
5825 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5826 if ( mode == INPUT ) {
\r
5827 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5828 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5829 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5833 if ( makeBuffer ) {
\r
5834 bufferBytes *= *bufferSize;
\r
5835 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5836 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5837 if ( stream_.deviceBuffer == NULL ) {
\r
5838 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5844 // Allocate our DsHandle structures for the stream.
\r
5845 if ( stream_.apiHandle == 0 ) {
\r
5847 handle = new DsHandle;
\r
5849 catch ( std::bad_alloc& ) {
\r
5850 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5854 // Create a manual-reset event.
\r
5855 handle->condition = CreateEvent( NULL, // no security
\r
5856 TRUE, // manual-reset
\r
5857 FALSE, // non-signaled initially
\r
5858 NULL ); // unnamed
\r
5859 stream_.apiHandle = (void *) handle;
\r
5862 handle = (DsHandle *) stream_.apiHandle;
\r
5863 handle->id[mode] = ohandle;
\r
5864 handle->buffer[mode] = bhandle;
\r
5865 handle->dsBufferSize[mode] = dsBufferSize;
\r
5866 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5868 stream_.device[mode] = device;
\r
5869 stream_.state = STREAM_STOPPED;
\r
5870 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5871 // We had already set up an output stream.
\r
5872 stream_.mode = DUPLEX;
\r
5874 stream_.mode = mode;
\r
5875 stream_.nBuffers = nBuffers;
\r
5876 stream_.sampleRate = sampleRate;
\r
5878 // Setup the buffer conversion information structure.
\r
5879 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5881 // Setup the callback thread.
\r
5882 if ( stream_.callbackInfo.isRunning == false ) {
\r
5883 unsigned threadId;
\r
5884 stream_.callbackInfo.isRunning = true;
\r
5885 stream_.callbackInfo.object = (void *) this;
\r
5886 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5887 &stream_.callbackInfo, 0, &threadId );
\r
5888 if ( stream_.callbackInfo.thread == 0 ) {
\r
5889 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5893 // Boost DS thread priority
\r
5894 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5900 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5901 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5902 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5903 if ( buffer ) buffer->Release();
\r
5904 object->Release();
\r
5906 if ( handle->buffer[1] ) {
\r
5907 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5908 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5909 if ( buffer ) buffer->Release();
\r
5910 object->Release();
\r
5912 CloseHandle( handle->condition );
\r
5914 stream_.apiHandle = 0;
\r
5917 for ( int i=0; i<2; i++ ) {
\r
5918 if ( stream_.userBuffer[i] ) {
\r
5919 free( stream_.userBuffer[i] );
\r
5920 stream_.userBuffer[i] = 0;
\r
5924 if ( stream_.deviceBuffer ) {
\r
5925 free( stream_.deviceBuffer );
\r
5926 stream_.deviceBuffer = 0;
\r
5929 stream_.state = STREAM_CLOSED;
\r
5933 void RtApiDs :: closeStream()
\r
5935 if ( stream_.state == STREAM_CLOSED ) {
\r
5936 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5937 error( RtAudioError::WARNING );
\r
5941 // Stop the callback thread.
\r
5942 stream_.callbackInfo.isRunning = false;
\r
5943 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5944 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5946 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5948 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5949 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5950 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5953 buffer->Release();
\r
5955 object->Release();
\r
5957 if ( handle->buffer[1] ) {
\r
5958 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5959 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5962 buffer->Release();
\r
5964 object->Release();
\r
5966 CloseHandle( handle->condition );
\r
5968 stream_.apiHandle = 0;
\r
5971 for ( int i=0; i<2; i++ ) {
\r
5972 if ( stream_.userBuffer[i] ) {
\r
5973 free( stream_.userBuffer[i] );
\r
5974 stream_.userBuffer[i] = 0;
\r
5978 if ( stream_.deviceBuffer ) {
\r
5979 free( stream_.deviceBuffer );
\r
5980 stream_.deviceBuffer = 0;
\r
5983 stream_.mode = UNINITIALIZED;
\r
5984 stream_.state = STREAM_CLOSED;
\r
5987 void RtApiDs :: startStream()
\r
5990 if ( stream_.state == STREAM_RUNNING ) {
\r
5991 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5992 error( RtAudioError::WARNING );
\r
5996 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5998 // Increase scheduler frequency on lesser windows (a side-effect of
\r
5999 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6000 // this is already in effect.
\r
6001 timeBeginPeriod( 1 );
\r
6003 buffersRolling = false;
\r
6004 duplexPrerollBytes = 0;
\r
6006 if ( stream_.mode == DUPLEX ) {
\r
6007 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6008 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6011 HRESULT result = 0;
\r
6012 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6014 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6015 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6016 if ( FAILED( result ) ) {
\r
6017 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6018 errorText_ = errorStream_.str();
\r
6023 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6025 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6026 result = buffer->Start( DSCBSTART_LOOPING );
\r
6027 if ( FAILED( result ) ) {
\r
6028 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6029 errorText_ = errorStream_.str();
\r
6034 handle->drainCounter = 0;
\r
6035 handle->internalDrain = false;
\r
6036 ResetEvent( handle->condition );
\r
6037 stream_.state = STREAM_RUNNING;
\r
6040 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6043 void RtApiDs :: stopStream()
\r
6046 if ( stream_.state == STREAM_STOPPED ) {
\r
6047 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6048 error( RtAudioError::WARNING );
\r
6052 HRESULT result = 0;
\r
6055 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6056 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6057 if ( handle->drainCounter == 0 ) {
\r
6058 handle->drainCounter = 2;
\r
6059 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6062 stream_.state = STREAM_STOPPED;
\r
6064 MUTEX_LOCK( &stream_.mutex );
\r
6066 // Stop the buffer and clear memory
\r
6067 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6068 result = buffer->Stop();
\r
6069 if ( FAILED( result ) ) {
\r
6070 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6071 errorText_ = errorStream_.str();
\r
6075 // Lock the buffer and clear it so that if we start to play again,
\r
6076 // we won't have old data playing.
\r
6077 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6078 if ( FAILED( result ) ) {
\r
6079 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6080 errorText_ = errorStream_.str();
\r
6084 // Zero the DS buffer
\r
6085 ZeroMemory( audioPtr, dataLen );
\r
6087 // Unlock the DS buffer
\r
6088 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6089 if ( FAILED( result ) ) {
\r
6090 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6091 errorText_ = errorStream_.str();
\r
6095 // If we start playing again, we must begin at beginning of buffer.
\r
6096 handle->bufferPointer[0] = 0;
\r
6099 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6100 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6104 stream_.state = STREAM_STOPPED;
\r
6106 if ( stream_.mode != DUPLEX )
\r
6107 MUTEX_LOCK( &stream_.mutex );
\r
6109 result = buffer->Stop();
\r
6110 if ( FAILED( result ) ) {
\r
6111 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6112 errorText_ = errorStream_.str();
\r
6116 // Lock the buffer and clear it so that if we start to play again,
\r
6117 // we won't have old data playing.
\r
6118 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6119 if ( FAILED( result ) ) {
\r
6120 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6121 errorText_ = errorStream_.str();
\r
6125 // Zero the DS buffer
\r
6126 ZeroMemory( audioPtr, dataLen );
\r
6128 // Unlock the DS buffer
\r
6129 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6130 if ( FAILED( result ) ) {
\r
6131 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6132 errorText_ = errorStream_.str();
\r
6136 // If we start recording again, we must begin at beginning of buffer.
\r
6137 handle->bufferPointer[1] = 0;
\r
6141 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6142 MUTEX_UNLOCK( &stream_.mutex );
\r
6144 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6147 void RtApiDs :: abortStream()
\r
6150 if ( stream_.state == STREAM_STOPPED ) {
\r
6151 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6152 error( RtAudioError::WARNING );
\r
6156 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6157 handle->drainCounter = 2;
\r
6162 void RtApiDs :: callbackEvent()
\r
6164 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6165 Sleep( 50 ); // sleep 50 milliseconds
\r
6169 if ( stream_.state == STREAM_CLOSED ) {
\r
6170 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6171 error( RtAudioError::WARNING );
\r
6175 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6176 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6178 // Check if we were draining the stream and signal is finished.
\r
6179 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6181 stream_.state = STREAM_STOPPING;
\r
6182 if ( handle->internalDrain == false )
\r
6183 SetEvent( handle->condition );
\r
6189 // Invoke user callback to get fresh output data UNLESS we are
\r
6190 // draining stream.
\r
6191 if ( handle->drainCounter == 0 ) {
\r
6192 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6193 double streamTime = getStreamTime();
\r
6194 RtAudioStreamStatus status = 0;
\r
6195 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6196 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6197 handle->xrun[0] = false;
\r
6199 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6200 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6201 handle->xrun[1] = false;
\r
6203 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6204 stream_.bufferSize, streamTime, status, info->userData );
\r
6205 if ( cbReturnValue == 2 ) {
\r
6206 stream_.state = STREAM_STOPPING;
\r
6207 handle->drainCounter = 2;
\r
6211 else if ( cbReturnValue == 1 ) {
\r
6212 handle->drainCounter = 1;
\r
6213 handle->internalDrain = true;
\r
6218 DWORD currentWritePointer, safeWritePointer;
\r
6219 DWORD currentReadPointer, safeReadPointer;
\r
6220 UINT nextWritePointer;
\r
6222 LPVOID buffer1 = NULL;
\r
6223 LPVOID buffer2 = NULL;
\r
6224 DWORD bufferSize1 = 0;
\r
6225 DWORD bufferSize2 = 0;
\r
6230 MUTEX_LOCK( &stream_.mutex );
\r
6231 if ( stream_.state == STREAM_STOPPED ) {
\r
6232 MUTEX_UNLOCK( &stream_.mutex );
\r
6236 if ( buffersRolling == false ) {
\r
6237 if ( stream_.mode == DUPLEX ) {
\r
6238 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6240 // It takes a while for the devices to get rolling. As a result,
\r
6241 // there's no guarantee that the capture and write device pointers
\r
6242 // will move in lockstep. Wait here for both devices to start
\r
6243 // rolling, and then set our buffer pointers accordingly.
\r
6244 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6245 // bytes later than the write buffer.
\r
6247 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6248 // take place between the two GetCurrentPosition calls... but I'm
\r
6249 // really not sure how to solve the problem. Temporarily boost to
\r
6250 // Realtime priority, maybe; but I'm not sure what priority the
\r
6251 // DirectSound service threads run at. We *should* be roughly
\r
6252 // within a ms or so of correct.
\r
6254 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6255 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6257 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6259 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6260 if ( FAILED( result ) ) {
\r
6261 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6262 errorText_ = errorStream_.str();
\r
6263 error( RtAudioError::SYSTEM_ERROR );
\r
6266 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6267 if ( FAILED( result ) ) {
\r
6268 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6269 errorText_ = errorStream_.str();
\r
6270 error( RtAudioError::SYSTEM_ERROR );
\r
6274 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6275 if ( FAILED( result ) ) {
\r
6276 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6277 errorText_ = errorStream_.str();
\r
6278 error( RtAudioError::SYSTEM_ERROR );
\r
6281 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6282 if ( FAILED( result ) ) {
\r
6283 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6284 errorText_ = errorStream_.str();
\r
6285 error( RtAudioError::SYSTEM_ERROR );
\r
6288 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6292 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6294 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6295 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6296 handle->bufferPointer[1] = safeReadPointer;
\r
6298 else if ( stream_.mode == OUTPUT ) {
\r
6300 // Set the proper nextWritePosition after initial startup.
\r
6301 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6302 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6303 if ( FAILED( result ) ) {
\r
6304 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6305 errorText_ = errorStream_.str();
\r
6306 error( RtAudioError::SYSTEM_ERROR );
\r
6309 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6310 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6313 buffersRolling = true;
\r
6316 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6318 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6320 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6321 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6322 bufferBytes *= formatBytes( stream_.userFormat );
\r
6323 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6326 // Setup parameters and do buffer conversion if necessary.
\r
6327 if ( stream_.doConvertBuffer[0] ) {
\r
6328 buffer = stream_.deviceBuffer;
\r
6329 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6330 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6331 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6334 buffer = stream_.userBuffer[0];
\r
6335 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6336 bufferBytes *= formatBytes( stream_.userFormat );
\r
6339 // No byte swapping necessary in DirectSound implementation.
\r
6341 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6342 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6344 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6345 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6347 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6348 nextWritePointer = handle->bufferPointer[0];
\r
6350 DWORD endWrite, leadPointer;
\r
6352 // Find out where the read and "safe write" pointers are.
\r
6353 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6354 if ( FAILED( result ) ) {
\r
6355 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6356 errorText_ = errorStream_.str();
\r
6357 error( RtAudioError::SYSTEM_ERROR );
\r
6361 // We will copy our output buffer into the region between
\r
6362 // safeWritePointer and leadPointer. If leadPointer is not
\r
6363 // beyond the next endWrite position, wait until it is.
\r
6364 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6365 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6366 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6367 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6368 endWrite = nextWritePointer + bufferBytes;
\r
6370 // Check whether the entire write region is behind the play pointer.
\r
6371 if ( leadPointer >= endWrite ) break;
\r
6373 // If we are here, then we must wait until the leadPointer advances
\r
6374 // beyond the end of our next write region. We use the
\r
6375 // Sleep() function to suspend operation until that happens.
\r
6376 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6377 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6378 if ( millis < 1.0 ) millis = 1.0;
\r
6379 Sleep( (DWORD) millis );
\r
6382 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6383 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6384 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6385 handle->xrun[0] = true;
\r
6386 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6387 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6388 handle->bufferPointer[0] = nextWritePointer;
\r
6389 endWrite = nextWritePointer + bufferBytes;
\r
6392 // Lock free space in the buffer
\r
6393 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6394 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6395 if ( FAILED( result ) ) {
\r
6396 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6397 errorText_ = errorStream_.str();
\r
6398 error( RtAudioError::SYSTEM_ERROR );
\r
6402 // Copy our buffer into the DS buffer
\r
6403 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6404 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6406 // Update our buffer offset and unlock sound buffer
\r
6407 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6408 if ( FAILED( result ) ) {
\r
6409 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6410 errorText_ = errorStream_.str();
\r
6411 error( RtAudioError::SYSTEM_ERROR );
\r
6414 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6415 handle->bufferPointer[0] = nextWritePointer;
\r
6418 // Don't bother draining input
\r
6419 if ( handle->drainCounter ) {
\r
6420 handle->drainCounter++;
\r
6424 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6426 // Setup parameters.
\r
6427 if ( stream_.doConvertBuffer[1] ) {
\r
6428 buffer = stream_.deviceBuffer;
\r
6429 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6430 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6433 buffer = stream_.userBuffer[1];
\r
6434 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6435 bufferBytes *= formatBytes( stream_.userFormat );
\r
6438 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6439 long nextReadPointer = handle->bufferPointer[1];
\r
6440 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6442 // Find out where the write and "safe read" pointers are.
\r
6443 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6444 if ( FAILED( result ) ) {
\r
6445 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6446 errorText_ = errorStream_.str();
\r
6447 error( RtAudioError::SYSTEM_ERROR );
\r
6451 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6452 DWORD endRead = nextReadPointer + bufferBytes;
\r
6454 // Handling depends on whether we are INPUT or DUPLEX.
\r
6455 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6456 // then a wait here will drag the write pointers into the forbidden zone.
\r
6458 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6459 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6460 // practical way to sync up the read and write pointers reliably, given the
\r
6461 // the very complex relationship between phase and increment of the read and write
\r
6464 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6465 // provide a pre-roll period of 0.5 seconds in which we return
\r
6466 // zeros from the read buffer while the pointers sync up.
\r
6468 if ( stream_.mode == DUPLEX ) {
\r
6469 if ( safeReadPointer < endRead ) {
\r
6470 if ( duplexPrerollBytes <= 0 ) {
\r
6471 // Pre-roll time over. Be more agressive.
\r
6472 int adjustment = endRead-safeReadPointer;
\r
6474 handle->xrun[1] = true;
\r
6476 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6477 // and perform fine adjustments later.
\r
6478 // - small adjustments: back off by twice as much.
\r
6479 if ( adjustment >= 2*bufferBytes )
\r
6480 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6482 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6484 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6488 // In pre=roll time. Just do it.
\r
6489 nextReadPointer = safeReadPointer - bufferBytes;
\r
6490 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6492 endRead = nextReadPointer + bufferBytes;
\r
6495 else { // mode == INPUT
\r
6496 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6497 // See comments for playback.
\r
6498 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6499 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6500 if ( millis < 1.0 ) millis = 1.0;
\r
6501 Sleep( (DWORD) millis );
\r
6503 // Wake up and find out where we are now.
\r
6504 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6505 if ( FAILED( result ) ) {
\r
6506 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6507 errorText_ = errorStream_.str();
\r
6508 error( RtAudioError::SYSTEM_ERROR );
\r
6512 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6516 // Lock free space in the buffer
\r
6517 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6518 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6519 if ( FAILED( result ) ) {
\r
6520 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6521 errorText_ = errorStream_.str();
\r
6522 error( RtAudioError::SYSTEM_ERROR );
\r
6526 if ( duplexPrerollBytes <= 0 ) {
\r
6527 // Copy our buffer into the DS buffer
\r
6528 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6529 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6532 memset( buffer, 0, bufferSize1 );
\r
6533 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6534 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6537 // Update our buffer offset and unlock sound buffer
\r
6538 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6539 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6540 if ( FAILED( result ) ) {
\r
6541 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6542 errorText_ = errorStream_.str();
\r
6543 error( RtAudioError::SYSTEM_ERROR );
\r
6546 handle->bufferPointer[1] = nextReadPointer;
\r
6548 // No byte swapping necessary in DirectSound implementation.
\r
6550 // If necessary, convert 8-bit data from unsigned to signed.
\r
6551 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6552 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6554 // Do buffer conversion if necessary.
\r
6555 if ( stream_.doConvertBuffer[1] )
\r
6556 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6560 MUTEX_UNLOCK( &stream_.mutex );
\r
6561 RtApi::tickStreamTime();
\r
6564 // Definitions for utility functions and callbacks
\r
6565 // specific to the DirectSound implementation.
\r
6567 static unsigned __stdcall callbackHandler( void *ptr )
\r
6569 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6570 RtApiDs *object = (RtApiDs *) info->object;
\r
6571 bool* isRunning = &info->isRunning;
\r
6573 while ( *isRunning == true ) {
\r
6574 object->callbackEvent();
\r
6577 _endthreadex( 0 );
\r
6581 #include "tchar.h"
\r
6583 static std::string convertTChar( LPCTSTR name )
\r
6585 #if defined( UNICODE ) || defined( _UNICODE )
\r
6586 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6587 std::string s( length-1, '\0' );
\r
6588 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6590 std::string s( name );
\r
6596 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6597 LPCTSTR description,
\r
6598 LPCTSTR /*module*/,
\r
6599 LPVOID lpContext )
\r
6601 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6602 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6605 bool validDevice = false;
\r
6606 if ( probeInfo.isInput == true ) {
\r
6608 LPDIRECTSOUNDCAPTURE object;
\r
6610 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6611 if ( hr != DS_OK ) return TRUE;
\r
6613 caps.dwSize = sizeof(caps);
\r
6614 hr = object->GetCaps( &caps );
\r
6615 if ( hr == DS_OK ) {
\r
6616 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6617 validDevice = true;
\r
6619 object->Release();
\r
6623 LPDIRECTSOUND object;
\r
6624 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6625 if ( hr != DS_OK ) return TRUE;
\r
6627 caps.dwSize = sizeof(caps);
\r
6628 hr = object->GetCaps( &caps );
\r
6629 if ( hr == DS_OK ) {
\r
6630 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6631 validDevice = true;
\r
6633 object->Release();
\r
6636 // If good device, then save its name and guid.
\r
6637 std::string name = convertTChar( description );
\r
6638 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6639 if ( lpguid == NULL )
\r
6640 name = "Default Device";
\r
6641 if ( validDevice ) {
\r
6642 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6643 if ( dsDevices[i].name == name ) {
\r
6644 dsDevices[i].found = true;
\r
6645 if ( probeInfo.isInput ) {
\r
6646 dsDevices[i].id[1] = lpguid;
\r
6647 dsDevices[i].validId[1] = true;
\r
6650 dsDevices[i].id[0] = lpguid;
\r
6651 dsDevices[i].validId[0] = true;
\r
6658 device.name = name;
\r
6659 device.found = true;
\r
6660 if ( probeInfo.isInput ) {
\r
6661 device.id[1] = lpguid;
\r
6662 device.validId[1] = true;
\r
6665 device.id[0] = lpguid;
\r
6666 device.validId[0] = true;
\r
6668 dsDevices.push_back( device );
\r
6674 static const char* getErrorString( int code )
\r
6678 case DSERR_ALLOCATED:
\r
6679 return "Already allocated";
\r
6681 case DSERR_CONTROLUNAVAIL:
\r
6682 return "Control unavailable";
\r
6684 case DSERR_INVALIDPARAM:
\r
6685 return "Invalid parameter";
\r
6687 case DSERR_INVALIDCALL:
\r
6688 return "Invalid call";
\r
6690 case DSERR_GENERIC:
\r
6691 return "Generic error";
\r
6693 case DSERR_PRIOLEVELNEEDED:
\r
6694 return "Priority level needed";
\r
6696 case DSERR_OUTOFMEMORY:
\r
6697 return "Out of memory";
\r
6699 case DSERR_BADFORMAT:
\r
6700 return "The sample rate or the channel format is not supported";
\r
6702 case DSERR_UNSUPPORTED:
\r
6703 return "Not supported";
\r
6705 case DSERR_NODRIVER:
\r
6706 return "No driver";
\r
6708 case DSERR_ALREADYINITIALIZED:
\r
6709 return "Already initialized";
\r
6711 case DSERR_NOAGGREGATION:
\r
6712 return "No aggregation";
\r
6714 case DSERR_BUFFERLOST:
\r
6715 return "Buffer lost";
\r
6717 case DSERR_OTHERAPPHASPRIO:
\r
6718 return "Another application already has priority";
\r
6720 case DSERR_UNINITIALIZED:
\r
6721 return "Uninitialized";
\r
6724 return "DirectSound unknown error";
\r
6727 //******************** End of __WINDOWS_DS__ *********************//
\r
6731 #if defined(__LINUX_ALSA__)
\r
6733 #include <alsa/asoundlib.h>
\r
6734 #include <unistd.h>
\r
6736 // A structure to hold various information related to the ALSA API
\r
6737 // implementation.
\r
6738 struct AlsaHandle {
\r
6739 snd_pcm_t *handles[2];
\r
6740 bool synchronized;
\r
6742 pthread_cond_t runnable_cv;
\r
6746 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6749 static void *alsaCallbackHandler( void * ptr );
\r
6751 RtApiAlsa :: RtApiAlsa()
\r
6753 // Nothing to do here.
\r
6756 RtApiAlsa :: ~RtApiAlsa()
\r
6758 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6761 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6763 unsigned nDevices = 0;
\r
6764 int result, subdevice, card;
\r
6766 snd_ctl_t *handle;
\r
6768 // Count cards and devices
\r
6770 snd_card_next( &card );
\r
6771 while ( card >= 0 ) {
\r
6772 sprintf( name, "hw:%d", card );
\r
6773 result = snd_ctl_open( &handle, name, 0 );
\r
6774 if ( result < 0 ) {
\r
6775 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6776 errorText_ = errorStream_.str();
\r
6777 error( RtAudioError::WARNING );
\r
6782 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6783 if ( result < 0 ) {
\r
6784 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6785 errorText_ = errorStream_.str();
\r
6786 error( RtAudioError::WARNING );
\r
6789 if ( subdevice < 0 )
\r
6794 snd_ctl_close( handle );
\r
6795 snd_card_next( &card );
\r
6798 result = snd_ctl_open( &handle, "default", 0 );
\r
6799 if (result == 0) {
\r
6801 snd_ctl_close( handle );
\r
6807 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6809 RtAudio::DeviceInfo info;
\r
6810 info.probed = false;
\r
6812 unsigned nDevices = 0;
\r
6813 int result, subdevice, card;
\r
6815 snd_ctl_t *chandle;
\r
6817 // Count cards and devices
\r
6820 snd_card_next( &card );
\r
6821 while ( card >= 0 ) {
\r
6822 sprintf( name, "hw:%d", card );
\r
6823 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6824 if ( result < 0 ) {
\r
6825 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6826 errorText_ = errorStream_.str();
\r
6827 error( RtAudioError::WARNING );
\r
6832 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6833 if ( result < 0 ) {
\r
6834 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6835 errorText_ = errorStream_.str();
\r
6836 error( RtAudioError::WARNING );
\r
6839 if ( subdevice < 0 ) break;
\r
6840 if ( nDevices == device ) {
\r
6841 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6847 snd_ctl_close( chandle );
\r
6848 snd_card_next( &card );
\r
6851 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6852 if ( result == 0 ) {
\r
6853 if ( nDevices == device ) {
\r
6854 strcpy( name, "default" );
\r
6860 if ( nDevices == 0 ) {
\r
6861 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6862 error( RtAudioError::INVALID_USE );
\r
6866 if ( device >= nDevices ) {
\r
6867 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6868 error( RtAudioError::INVALID_USE );
\r
6874 // If a stream is already open, we cannot probe the stream devices.
\r
6875 // Thus, use the saved results.
\r
6876 if ( stream_.state != STREAM_CLOSED &&
\r
6877 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6878 snd_ctl_close( chandle );
\r
6879 if ( device >= devices_.size() ) {
\r
6880 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6881 error( RtAudioError::WARNING );
\r
6884 return devices_[ device ];
\r
6887 int openMode = SND_PCM_ASYNC;
\r
6888 snd_pcm_stream_t stream;
\r
6889 snd_pcm_info_t *pcminfo;
\r
6890 snd_pcm_info_alloca( &pcminfo );
\r
6891 snd_pcm_t *phandle;
\r
6892 snd_pcm_hw_params_t *params;
\r
6893 snd_pcm_hw_params_alloca( ¶ms );
\r
6895 // First try for playback unless default device (which has subdev -1)
\r
6896 stream = SND_PCM_STREAM_PLAYBACK;
\r
6897 snd_pcm_info_set_stream( pcminfo, stream );
\r
6898 if ( subdevice != -1 ) {
\r
6899 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6900 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6902 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6903 if ( result < 0 ) {
\r
6904 // Device probably doesn't support playback.
\r
6905 goto captureProbe;
\r
6909 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6910 if ( result < 0 ) {
\r
6911 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6912 errorText_ = errorStream_.str();
\r
6913 error( RtAudioError::WARNING );
\r
6914 goto captureProbe;
\r
6917 // The device is open ... fill the parameter structure.
\r
6918 result = snd_pcm_hw_params_any( phandle, params );
\r
6919 if ( result < 0 ) {
\r
6920 snd_pcm_close( phandle );
\r
6921 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6922 errorText_ = errorStream_.str();
\r
6923 error( RtAudioError::WARNING );
\r
6924 goto captureProbe;
\r
6927 // Get output channel information.
\r
6928 unsigned int value;
\r
6929 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6930 if ( result < 0 ) {
\r
6931 snd_pcm_close( phandle );
\r
6932 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6933 errorText_ = errorStream_.str();
\r
6934 error( RtAudioError::WARNING );
\r
6935 goto captureProbe;
\r
6937 info.outputChannels = value;
\r
6938 snd_pcm_close( phandle );
\r
6941 stream = SND_PCM_STREAM_CAPTURE;
\r
6942 snd_pcm_info_set_stream( pcminfo, stream );
\r
6944 // Now try for capture unless default device (with subdev = -1)
\r
6945 if ( subdevice != -1 ) {
\r
6946 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6947 snd_ctl_close( chandle );
\r
6948 if ( result < 0 ) {
\r
6949 // Device probably doesn't support capture.
\r
6950 if ( info.outputChannels == 0 ) return info;
\r
6951 goto probeParameters;
\r
6955 snd_ctl_close( chandle );
\r
6957 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6958 if ( result < 0 ) {
\r
6959 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6960 errorText_ = errorStream_.str();
\r
6961 error( RtAudioError::WARNING );
\r
6962 if ( info.outputChannels == 0 ) return info;
\r
6963 goto probeParameters;
\r
6966 // The device is open ... fill the parameter structure.
\r
6967 result = snd_pcm_hw_params_any( phandle, params );
\r
6968 if ( result < 0 ) {
\r
6969 snd_pcm_close( phandle );
\r
6970 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6971 errorText_ = errorStream_.str();
\r
6972 error( RtAudioError::WARNING );
\r
6973 if ( info.outputChannels == 0 ) return info;
\r
6974 goto probeParameters;
\r
6977 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6978 if ( result < 0 ) {
\r
6979 snd_pcm_close( phandle );
\r
6980 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6981 errorText_ = errorStream_.str();
\r
6982 error( RtAudioError::WARNING );
\r
6983 if ( info.outputChannels == 0 ) return info;
\r
6984 goto probeParameters;
\r
6986 info.inputChannels = value;
\r
6987 snd_pcm_close( phandle );
\r
6989 // If device opens for both playback and capture, we determine the channels.
\r
6990 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6991 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6993 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6994 if ( device == 0 && info.outputChannels > 0 )
\r
6995 info.isDefaultOutput = true;
\r
6996 if ( device == 0 && info.inputChannels > 0 )
\r
6997 info.isDefaultInput = true;
\r
7000 // At this point, we just need to figure out the supported data
\r
7001 // formats and sample rates. We'll proceed by opening the device in
\r
7002 // the direction with the maximum number of channels, or playback if
\r
7003 // they are equal. This might limit our sample rate options, but so
\r
7006 if ( info.outputChannels >= info.inputChannels )
\r
7007 stream = SND_PCM_STREAM_PLAYBACK;
\r
7009 stream = SND_PCM_STREAM_CAPTURE;
\r
7010 snd_pcm_info_set_stream( pcminfo, stream );
\r
7012 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7013 if ( result < 0 ) {
\r
7014 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7015 errorText_ = errorStream_.str();
\r
7016 error( RtAudioError::WARNING );
\r
7020 // The device is open ... fill the parameter structure.
\r
7021 result = snd_pcm_hw_params_any( phandle, params );
\r
7022 if ( result < 0 ) {
\r
7023 snd_pcm_close( phandle );
\r
7024 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7025 errorText_ = errorStream_.str();
\r
7026 error( RtAudioError::WARNING );
\r
7030 // Test our discrete set of sample rate values.
\r
7031 info.sampleRates.clear();
\r
7032 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7033 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7034 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7036 if ( info.sampleRates.size() == 0 ) {
\r
7037 snd_pcm_close( phandle );
\r
7038 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7039 errorText_ = errorStream_.str();
\r
7040 error( RtAudioError::WARNING );
\r
7044 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7045 snd_pcm_format_t format;
\r
7046 info.nativeFormats = 0;
\r
7047 format = SND_PCM_FORMAT_S8;
\r
7048 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7049 info.nativeFormats |= RTAUDIO_SINT8;
\r
7050 format = SND_PCM_FORMAT_S16;
\r
7051 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7052 info.nativeFormats |= RTAUDIO_SINT16;
\r
7053 format = SND_PCM_FORMAT_S24;
\r
7054 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7055 info.nativeFormats |= RTAUDIO_SINT24;
\r
7056 format = SND_PCM_FORMAT_S32;
\r
7057 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7058 info.nativeFormats |= RTAUDIO_SINT32;
\r
7059 format = SND_PCM_FORMAT_FLOAT;
\r
7060 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7061 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7062 format = SND_PCM_FORMAT_FLOAT64;
\r
7063 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7064 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7066 // Check that we have at least one supported format
\r
7067 if ( info.nativeFormats == 0 ) {
\r
7068 snd_pcm_close( phandle );
\r
7069 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7070 errorText_ = errorStream_.str();
\r
7071 error( RtAudioError::WARNING );
\r
7075 // Get the device name
\r
7077 result = snd_card_get_name( card, &cardname );
\r
7078 if ( result >= 0 ) {
\r
7079 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7084 // That's all ... close the device and return
\r
7085 snd_pcm_close( phandle );
\r
7086 info.probed = true;
\r
7090 void RtApiAlsa :: saveDeviceInfo( void )
\r
7094 unsigned int nDevices = getDeviceCount();
\r
7095 devices_.resize( nDevices );
\r
7096 for ( unsigned int i=0; i<nDevices; i++ )
\r
7097 devices_[i] = getDeviceInfo( i );
\r
7100 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7101 unsigned int firstChannel, unsigned int sampleRate,
\r
7102 RtAudioFormat format, unsigned int *bufferSize,
\r
7103 RtAudio::StreamOptions *options )
\r
7106 #if defined(__RTAUDIO_DEBUG__)
\r
7107 snd_output_t *out;
\r
7108 snd_output_stdio_attach(&out, stderr, 0);
\r
7111 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7113 unsigned nDevices = 0;
\r
7114 int result, subdevice, card;
\r
7116 snd_ctl_t *chandle;
\r
7118 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7119 snprintf(name, sizeof(name), "%s", "default");
\r
7121 // Count cards and devices
\r
7123 snd_card_next( &card );
\r
7124 while ( card >= 0 ) {
\r
7125 sprintf( name, "hw:%d", card );
\r
7126 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7127 if ( result < 0 ) {
\r
7128 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7129 errorText_ = errorStream_.str();
\r
7134 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7135 if ( result < 0 ) break;
\r
7136 if ( subdevice < 0 ) break;
\r
7137 if ( nDevices == device ) {
\r
7138 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7139 snd_ctl_close( chandle );
\r
7144 snd_ctl_close( chandle );
\r
7145 snd_card_next( &card );
\r
7148 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7149 if ( result == 0 ) {
\r
7150 if ( nDevices == device ) {
\r
7151 strcpy( name, "default" );
\r
7157 if ( nDevices == 0 ) {
\r
7158 // This should not happen because a check is made before this function is called.
\r
7159 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7163 if ( device >= nDevices ) {
\r
7164 // This should not happen because a check is made before this function is called.
\r
7165 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7172 // The getDeviceInfo() function will not work for a device that is
\r
7173 // already open. Thus, we'll probe the system before opening a
\r
7174 // stream and save the results for use by getDeviceInfo().
\r
7175 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7176 this->saveDeviceInfo();
\r
7178 snd_pcm_stream_t stream;
\r
7179 if ( mode == OUTPUT )
\r
7180 stream = SND_PCM_STREAM_PLAYBACK;
\r
7182 stream = SND_PCM_STREAM_CAPTURE;
\r
7184 snd_pcm_t *phandle;
\r
7185 int openMode = SND_PCM_ASYNC;
\r
7186 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7187 if ( result < 0 ) {
\r
7188 if ( mode == OUTPUT )
\r
7189 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7191 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7192 errorText_ = errorStream_.str();
\r
7196 // Fill the parameter structure.
\r
7197 snd_pcm_hw_params_t *hw_params;
\r
7198 snd_pcm_hw_params_alloca( &hw_params );
\r
7199 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7200 if ( result < 0 ) {
\r
7201 snd_pcm_close( phandle );
\r
7202 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7203 errorText_ = errorStream_.str();
\r
7207 #if defined(__RTAUDIO_DEBUG__)
\r
7208 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7209 snd_pcm_hw_params_dump( hw_params, out );
\r
7212 // Set access ... check user preference.
\r
7213 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7214 stream_.userInterleaved = false;
\r
7215 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7216 if ( result < 0 ) {
\r
7217 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7218 stream_.deviceInterleaved[mode] = true;
\r
7221 stream_.deviceInterleaved[mode] = false;
\r
7224 stream_.userInterleaved = true;
\r
7225 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7226 if ( result < 0 ) {
\r
7227 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7228 stream_.deviceInterleaved[mode] = false;
\r
7231 stream_.deviceInterleaved[mode] = true;
\r
7234 if ( result < 0 ) {
\r
7235 snd_pcm_close( phandle );
\r
7236 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7237 errorText_ = errorStream_.str();
\r
7241 // Determine how to set the device format.
\r
7242 stream_.userFormat = format;
\r
7243 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7245 if ( format == RTAUDIO_SINT8 )
\r
7246 deviceFormat = SND_PCM_FORMAT_S8;
\r
7247 else if ( format == RTAUDIO_SINT16 )
\r
7248 deviceFormat = SND_PCM_FORMAT_S16;
\r
7249 else if ( format == RTAUDIO_SINT24 )
\r
7250 deviceFormat = SND_PCM_FORMAT_S24;
\r
7251 else if ( format == RTAUDIO_SINT32 )
\r
7252 deviceFormat = SND_PCM_FORMAT_S32;
\r
7253 else if ( format == RTAUDIO_FLOAT32 )
\r
7254 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7255 else if ( format == RTAUDIO_FLOAT64 )
\r
7256 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7258 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7259 stream_.deviceFormat[mode] = format;
\r
7263 // The user requested format is not natively supported by the device.
\r
7264 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7265 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7266 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7270 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7271 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7272 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7276 deviceFormat = SND_PCM_FORMAT_S32;
\r
7277 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7278 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7282 deviceFormat = SND_PCM_FORMAT_S24;
\r
7283 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7284 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7288 deviceFormat = SND_PCM_FORMAT_S16;
\r
7289 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7290 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7294 deviceFormat = SND_PCM_FORMAT_S8;
\r
7295 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7296 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7300 // If we get here, no supported format was found.
\r
7301 snd_pcm_close( phandle );
\r
7302 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7303 errorText_ = errorStream_.str();
\r
7307 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7308 if ( result < 0 ) {
\r
7309 snd_pcm_close( phandle );
\r
7310 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7311 errorText_ = errorStream_.str();
\r
7315 // Determine whether byte-swaping is necessary.
\r
7316 stream_.doByteSwap[mode] = false;
\r
7317 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7318 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7319 if ( result == 0 )
\r
7320 stream_.doByteSwap[mode] = true;
\r
7321 else if (result < 0) {
\r
7322 snd_pcm_close( phandle );
\r
7323 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7324 errorText_ = errorStream_.str();
\r
7329 // Set the sample rate.
\r
7330 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7331 if ( result < 0 ) {
\r
7332 snd_pcm_close( phandle );
\r
7333 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7334 errorText_ = errorStream_.str();
\r
7338 // Determine the number of channels for this device. We support a possible
\r
7339 // minimum device channel number > than the value requested by the user.
\r
7340 stream_.nUserChannels[mode] = channels;
\r
7341 unsigned int value;
\r
7342 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7343 unsigned int deviceChannels = value;
\r
7344 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7345 snd_pcm_close( phandle );
\r
7346 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7347 errorText_ = errorStream_.str();
\r
7351 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7352 if ( result < 0 ) {
\r
7353 snd_pcm_close( phandle );
\r
7354 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7355 errorText_ = errorStream_.str();
\r
7358 deviceChannels = value;
\r
7359 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7360 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7362 // Set the device channels.
\r
7363 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7364 if ( result < 0 ) {
\r
7365 snd_pcm_close( phandle );
\r
7366 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7367 errorText_ = errorStream_.str();
\r
7371 // Set the buffer (or period) size.
\r
7373 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7374 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7375 if ( result < 0 ) {
\r
7376 snd_pcm_close( phandle );
\r
7377 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7378 errorText_ = errorStream_.str();
\r
7381 *bufferSize = periodSize;
\r
7383 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7384 unsigned int periods = 0;
\r
7385 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7386 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7387 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7388 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7389 if ( result < 0 ) {
\r
7390 snd_pcm_close( phandle );
\r
7391 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7392 errorText_ = errorStream_.str();
\r
7396 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7397 // MUST be the same in both directions!
\r
7398 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7399 snd_pcm_close( phandle );
\r
7400 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7401 errorText_ = errorStream_.str();
\r
7405 stream_.bufferSize = *bufferSize;
\r
7407 // Install the hardware configuration
\r
7408 result = snd_pcm_hw_params( phandle, hw_params );
\r
7409 if ( result < 0 ) {
\r
7410 snd_pcm_close( phandle );
\r
7411 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7412 errorText_ = errorStream_.str();
\r
7416 #if defined(__RTAUDIO_DEBUG__)
\r
7417 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7418 snd_pcm_hw_params_dump( hw_params, out );
\r
7421 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7422 snd_pcm_sw_params_t *sw_params = NULL;
\r
7423 snd_pcm_sw_params_alloca( &sw_params );
\r
7424 snd_pcm_sw_params_current( phandle, sw_params );
\r
7425 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7426 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7427 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7429 // The following two settings were suggested by Theo Veenker
\r
7430 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7431 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7433 // here are two options for a fix
\r
7434 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7435 snd_pcm_uframes_t val;
\r
7436 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7437 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7439 result = snd_pcm_sw_params( phandle, sw_params );
\r
7440 if ( result < 0 ) {
\r
7441 snd_pcm_close( phandle );
\r
7442 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7443 errorText_ = errorStream_.str();
\r
7447 #if defined(__RTAUDIO_DEBUG__)
\r
7448 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7449 snd_pcm_sw_params_dump( sw_params, out );
\r
7452 // Set flags for buffer conversion
\r
7453 stream_.doConvertBuffer[mode] = false;
\r
7454 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7455 stream_.doConvertBuffer[mode] = true;
\r
7456 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7457 stream_.doConvertBuffer[mode] = true;
\r
7458 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7459 stream_.nUserChannels[mode] > 1 )
\r
7460 stream_.doConvertBuffer[mode] = true;
\r
7462 // Allocate the ApiHandle if necessary and then save.
\r
7463 AlsaHandle *apiInfo = 0;
\r
7464 if ( stream_.apiHandle == 0 ) {
\r
7466 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7468 catch ( std::bad_alloc& ) {
\r
7469 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7473 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7474 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7478 stream_.apiHandle = (void *) apiInfo;
\r
7479 apiInfo->handles[0] = 0;
\r
7480 apiInfo->handles[1] = 0;
\r
7483 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7485 apiInfo->handles[mode] = phandle;
\r
7488 // Allocate necessary internal buffers.
\r
7489 unsigned long bufferBytes;
\r
7490 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7491 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7492 if ( stream_.userBuffer[mode] == NULL ) {
\r
7493 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7497 if ( stream_.doConvertBuffer[mode] ) {
\r
7499 bool makeBuffer = true;
\r
7500 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7501 if ( mode == INPUT ) {
\r
7502 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7503 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7504 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7508 if ( makeBuffer ) {
\r
7509 bufferBytes *= *bufferSize;
\r
7510 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7511 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7512 if ( stream_.deviceBuffer == NULL ) {
\r
7513 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7519 stream_.sampleRate = sampleRate;
\r
7520 stream_.nBuffers = periods;
\r
7521 stream_.device[mode] = device;
\r
7522 stream_.state = STREAM_STOPPED;
\r
7524 // Setup the buffer conversion information structure.
\r
7525 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7527 // Setup thread if necessary.
\r
7528 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7529 // We had already set up an output stream.
\r
7530 stream_.mode = DUPLEX;
\r
7531 // Link the streams if possible.
\r
7532 apiInfo->synchronized = false;
\r
7533 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7534 apiInfo->synchronized = true;
\r
7536 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7537 error( RtAudioError::WARNING );
\r
7541 stream_.mode = mode;
\r
7543 // Setup callback thread.
\r
7544 stream_.callbackInfo.object = (void *) this;
\r
7546 // Set the thread attributes for joinable and realtime scheduling
\r
7547 // priority (optional). The higher priority will only take affect
\r
7548 // if the program is run as root or suid. Note, under Linux
\r
7549 // processes with CAP_SYS_NICE privilege, a user can change
\r
7550 // scheduling policy and priority (thus need not be root). See
\r
7551 // POSIX "capabilities".
\r
7552 pthread_attr_t attr;
\r
7553 pthread_attr_init( &attr );
\r
7554 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7556 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7557 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7558 // We previously attempted to increase the audio callback priority
\r
7559 // to SCHED_RR here via the attributes. However, while no errors
\r
7560 // were reported in doing so, it did not work. So, now this is
\r
7561 // done in the alsaCallbackHandler function.
\r
7562 stream_.callbackInfo.doRealtime = true;
\r
7563 int priority = options->priority;
\r
7564 int min = sched_get_priority_min( SCHED_RR );
\r
7565 int max = sched_get_priority_max( SCHED_RR );
\r
7566 if ( priority < min ) priority = min;
\r
7567 else if ( priority > max ) priority = max;
\r
7568 stream_.callbackInfo.priority = priority;
\r
7572 stream_.callbackInfo.isRunning = true;
\r
7573 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7574 pthread_attr_destroy( &attr );
\r
7576 stream_.callbackInfo.isRunning = false;
\r
7577 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7586 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7587 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7588 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7590 stream_.apiHandle = 0;
\r
7593 if ( phandle) snd_pcm_close( phandle );
\r
7595 for ( int i=0; i<2; i++ ) {
\r
7596 if ( stream_.userBuffer[i] ) {
\r
7597 free( stream_.userBuffer[i] );
\r
7598 stream_.userBuffer[i] = 0;
\r
7602 if ( stream_.deviceBuffer ) {
\r
7603 free( stream_.deviceBuffer );
\r
7604 stream_.deviceBuffer = 0;
\r
7607 stream_.state = STREAM_CLOSED;
\r
7611 void RtApiAlsa :: closeStream()
\r
7613 if ( stream_.state == STREAM_CLOSED ) {
\r
7614 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7615 error( RtAudioError::WARNING );
\r
7619 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7620 stream_.callbackInfo.isRunning = false;
\r
7621 MUTEX_LOCK( &stream_.mutex );
\r
7622 if ( stream_.state == STREAM_STOPPED ) {
\r
7623 apiInfo->runnable = true;
\r
7624 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7626 MUTEX_UNLOCK( &stream_.mutex );
\r
7627 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7629 if ( stream_.state == STREAM_RUNNING ) {
\r
7630 stream_.state = STREAM_STOPPED;
\r
7631 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7632 snd_pcm_drop( apiInfo->handles[0] );
\r
7633 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7634 snd_pcm_drop( apiInfo->handles[1] );
\r
7638 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7639 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7640 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7642 stream_.apiHandle = 0;
\r
7645 for ( int i=0; i<2; i++ ) {
\r
7646 if ( stream_.userBuffer[i] ) {
\r
7647 free( stream_.userBuffer[i] );
\r
7648 stream_.userBuffer[i] = 0;
\r
7652 if ( stream_.deviceBuffer ) {
\r
7653 free( stream_.deviceBuffer );
\r
7654 stream_.deviceBuffer = 0;
\r
7657 stream_.mode = UNINITIALIZED;
\r
7658 stream_.state = STREAM_CLOSED;
\r
7661 void RtApiAlsa :: startStream()
\r
7663 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7666 if ( stream_.state == STREAM_RUNNING ) {
\r
7667 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7668 error( RtAudioError::WARNING );
\r
7672 MUTEX_LOCK( &stream_.mutex );
\r
7675 snd_pcm_state_t state;
\r
7676 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7677 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7678 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7679 state = snd_pcm_state( handle[0] );
\r
7680 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7681 result = snd_pcm_prepare( handle[0] );
\r
7682 if ( result < 0 ) {
\r
7683 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7684 errorText_ = errorStream_.str();
\r
7690 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7691 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7692 state = snd_pcm_state( handle[1] );
\r
7693 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7694 result = snd_pcm_prepare( handle[1] );
\r
7695 if ( result < 0 ) {
\r
7696 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7697 errorText_ = errorStream_.str();
\r
7703 stream_.state = STREAM_RUNNING;
\r
7706 apiInfo->runnable = true;
\r
7707 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7708 MUTEX_UNLOCK( &stream_.mutex );
\r
7710 if ( result >= 0 ) return;
\r
7711 error( RtAudioError::SYSTEM_ERROR );
\r
7714 void RtApiAlsa :: stopStream()
\r
7717 if ( stream_.state == STREAM_STOPPED ) {
\r
7718 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7719 error( RtAudioError::WARNING );
\r
7723 stream_.state = STREAM_STOPPED;
\r
7724 MUTEX_LOCK( &stream_.mutex );
\r
7727 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7728 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7729 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7730 if ( apiInfo->synchronized )
\r
7731 result = snd_pcm_drop( handle[0] );
\r
7733 result = snd_pcm_drain( handle[0] );
\r
7734 if ( result < 0 ) {
\r
7735 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7736 errorText_ = errorStream_.str();
\r
7741 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7742 result = snd_pcm_drop( handle[1] );
\r
7743 if ( result < 0 ) {
\r
7744 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7745 errorText_ = errorStream_.str();
\r
7751 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7752 MUTEX_UNLOCK( &stream_.mutex );
\r
7754 if ( result >= 0 ) return;
\r
7755 error( RtAudioError::SYSTEM_ERROR );
\r
7758 void RtApiAlsa :: abortStream()
\r
7761 if ( stream_.state == STREAM_STOPPED ) {
\r
7762 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7763 error( RtAudioError::WARNING );
\r
7767 stream_.state = STREAM_STOPPED;
\r
7768 MUTEX_LOCK( &stream_.mutex );
\r
7771 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7772 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7773 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7774 result = snd_pcm_drop( handle[0] );
\r
7775 if ( result < 0 ) {
\r
7776 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7777 errorText_ = errorStream_.str();
\r
7782 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7783 result = snd_pcm_drop( handle[1] );
\r
7784 if ( result < 0 ) {
\r
7785 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7786 errorText_ = errorStream_.str();
\r
7792 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7793 MUTEX_UNLOCK( &stream_.mutex );
\r
7795 if ( result >= 0 ) return;
\r
7796 error( RtAudioError::SYSTEM_ERROR );
\r
7799 void RtApiAlsa :: callbackEvent()
\r
7801 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7802 if ( stream_.state == STREAM_STOPPED ) {
\r
7803 MUTEX_LOCK( &stream_.mutex );
\r
7804 while ( !apiInfo->runnable )
\r
7805 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7807 if ( stream_.state != STREAM_RUNNING ) {
\r
7808 MUTEX_UNLOCK( &stream_.mutex );
\r
7811 MUTEX_UNLOCK( &stream_.mutex );
\r
7814 if ( stream_.state == STREAM_CLOSED ) {
\r
7815 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7816 error( RtAudioError::WARNING );
\r
7820 int doStopStream = 0;
\r
7821 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7822 double streamTime = getStreamTime();
\r
7823 RtAudioStreamStatus status = 0;
\r
7824 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7825 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7826 apiInfo->xrun[0] = false;
\r
7828 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7829 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7830 apiInfo->xrun[1] = false;
\r
7832 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7833 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7835 if ( doStopStream == 2 ) {
\r
7840 MUTEX_LOCK( &stream_.mutex );
\r
7842 // The state might change while waiting on a mutex.
\r
7843 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7848 snd_pcm_t **handle;
\r
7849 snd_pcm_sframes_t frames;
\r
7850 RtAudioFormat format;
\r
7851 handle = (snd_pcm_t **) apiInfo->handles;
\r
7853 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7855 // Setup parameters.
\r
7856 if ( stream_.doConvertBuffer[1] ) {
\r
7857 buffer = stream_.deviceBuffer;
\r
7858 channels = stream_.nDeviceChannels[1];
\r
7859 format = stream_.deviceFormat[1];
\r
7862 buffer = stream_.userBuffer[1];
\r
7863 channels = stream_.nUserChannels[1];
\r
7864 format = stream_.userFormat;
\r
7867 // Read samples from device in interleaved/non-interleaved format.
\r
7868 if ( stream_.deviceInterleaved[1] )
\r
7869 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7871 void *bufs[channels];
\r
7872 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7873 for ( int i=0; i<channels; i++ )
\r
7874 bufs[i] = (void *) (buffer + (i * offset));
\r
7875 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7878 if ( result < (int) stream_.bufferSize ) {
\r
7879 // Either an error or overrun occured.
\r
7880 if ( result == -EPIPE ) {
\r
7881 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7882 if ( state == SND_PCM_STATE_XRUN ) {
\r
7883 apiInfo->xrun[1] = true;
\r
7884 result = snd_pcm_prepare( handle[1] );
\r
7885 if ( result < 0 ) {
\r
7886 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7887 errorText_ = errorStream_.str();
\r
7891 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7892 errorText_ = errorStream_.str();
\r
7896 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7897 errorText_ = errorStream_.str();
\r
7899 error( RtAudioError::WARNING );
\r
7903 // Do byte swapping if necessary.
\r
7904 if ( stream_.doByteSwap[1] )
\r
7905 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7907 // Do buffer conversion if necessary.
\r
7908 if ( stream_.doConvertBuffer[1] )
\r
7909 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7911 // Check stream latency
\r
7912 result = snd_pcm_delay( handle[1], &frames );
\r
7913 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7918 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7920 // Setup parameters and do buffer conversion if necessary.
\r
7921 if ( stream_.doConvertBuffer[0] ) {
\r
7922 buffer = stream_.deviceBuffer;
\r
7923 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7924 channels = stream_.nDeviceChannels[0];
\r
7925 format = stream_.deviceFormat[0];
\r
7928 buffer = stream_.userBuffer[0];
\r
7929 channels = stream_.nUserChannels[0];
\r
7930 format = stream_.userFormat;
\r
7933 // Do byte swapping if necessary.
\r
7934 if ( stream_.doByteSwap[0] )
\r
7935 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7937 // Write samples to device in interleaved/non-interleaved format.
\r
7938 if ( stream_.deviceInterleaved[0] )
\r
7939 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7941 void *bufs[channels];
\r
7942 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7943 for ( int i=0; i<channels; i++ )
\r
7944 bufs[i] = (void *) (buffer + (i * offset));
\r
7945 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7948 if ( result < (int) stream_.bufferSize ) {
\r
7949 // Either an error or underrun occured.
\r
7950 if ( result == -EPIPE ) {
\r
7951 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7952 if ( state == SND_PCM_STATE_XRUN ) {
\r
7953 apiInfo->xrun[0] = true;
\r
7954 result = snd_pcm_prepare( handle[0] );
\r
7955 if ( result < 0 ) {
\r
7956 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7957 errorText_ = errorStream_.str();
\r
7961 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7962 errorText_ = errorStream_.str();
\r
7966 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7967 errorText_ = errorStream_.str();
\r
7969 error( RtAudioError::WARNING );
\r
7973 // Check stream latency
\r
7974 result = snd_pcm_delay( handle[0], &frames );
\r
7975 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7979 MUTEX_UNLOCK( &stream_.mutex );
\r
7981 RtApi::tickStreamTime();
\r
7982 if ( doStopStream == 1 ) this->stopStream();
\r
7985 static void *alsaCallbackHandler( void *ptr )
\r
7987 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7988 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7989 bool *isRunning = &info->isRunning;
\r
7991 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7992 if ( &info->doRealtime ) {
\r
7993 pthread_t tID = pthread_self(); // ID of this thread
\r
7994 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7995 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7999 while ( *isRunning == true ) {
\r
8000 pthread_testcancel();
\r
8001 object->callbackEvent();
\r
8004 pthread_exit( NULL );
\r
8007 //******************** End of __LINUX_ALSA__ *********************//
\r
8010 #if defined(__LINUX_PULSE__)
\r
8012 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8013 // and Tristan Matthews.
\r
8015 #include <pulse/error.h>
\r
8016 #include <pulse/simple.h>
\r
8019 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8020 44100, 48000, 96000, 0};
\r
8022 struct rtaudio_pa_format_mapping_t {
\r
8023 RtAudioFormat rtaudio_format;
\r
8024 pa_sample_format_t pa_format;
\r
8027 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8028 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8029 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8030 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8031 {0, PA_SAMPLE_INVALID}};
\r
8033 struct PulseAudioHandle {
\r
8034 pa_simple *s_play;
\r
8037 pthread_cond_t runnable_cv;
\r
8039 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8042 RtApiPulse::~RtApiPulse()
\r
8044 if ( stream_.state != STREAM_CLOSED )
\r
8048 unsigned int RtApiPulse::getDeviceCount( void )
\r
8053 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8055 RtAudio::DeviceInfo info;
\r
8056 info.probed = true;
\r
8057 info.name = "PulseAudio";
\r
8058 info.outputChannels = 2;
\r
8059 info.inputChannels = 2;
\r
8060 info.duplexChannels = 2;
\r
8061 info.isDefaultOutput = true;
\r
8062 info.isDefaultInput = true;
\r
8064 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8065 info.sampleRates.push_back( *sr );
\r
8067 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8072 static void *pulseaudio_callback( void * user )
\r
8074 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8075 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8076 volatile bool *isRunning = &cbi->isRunning;
\r
8078 while ( *isRunning ) {
\r
8079 pthread_testcancel();
\r
8080 context->callbackEvent();
\r
8083 pthread_exit( NULL );
\r
8086 void RtApiPulse::closeStream( void )
\r
8088 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8090 stream_.callbackInfo.isRunning = false;
\r
8092 MUTEX_LOCK( &stream_.mutex );
\r
8093 if ( stream_.state == STREAM_STOPPED ) {
\r
8094 pah->runnable = true;
\r
8095 pthread_cond_signal( &pah->runnable_cv );
\r
8097 MUTEX_UNLOCK( &stream_.mutex );
\r
8099 pthread_join( pah->thread, 0 );
\r
8100 if ( pah->s_play ) {
\r
8101 pa_simple_flush( pah->s_play, NULL );
\r
8102 pa_simple_free( pah->s_play );
\r
8105 pa_simple_free( pah->s_rec );
\r
8107 pthread_cond_destroy( &pah->runnable_cv );
\r
8109 stream_.apiHandle = 0;
\r
8112 if ( stream_.userBuffer[0] ) {
\r
8113 free( stream_.userBuffer[0] );
\r
8114 stream_.userBuffer[0] = 0;
\r
8116 if ( stream_.userBuffer[1] ) {
\r
8117 free( stream_.userBuffer[1] );
\r
8118 stream_.userBuffer[1] = 0;
\r
8121 stream_.state = STREAM_CLOSED;
\r
8122 stream_.mode = UNINITIALIZED;
\r
8125 void RtApiPulse::callbackEvent( void )
\r
8127 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8129 if ( stream_.state == STREAM_STOPPED ) {
\r
8130 MUTEX_LOCK( &stream_.mutex );
\r
8131 while ( !pah->runnable )
\r
8132 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8134 if ( stream_.state != STREAM_RUNNING ) {
\r
8135 MUTEX_UNLOCK( &stream_.mutex );
\r
8138 MUTEX_UNLOCK( &stream_.mutex );
\r
8141 if ( stream_.state == STREAM_CLOSED ) {
\r
8142 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8143 "this shouldn't happen!";
\r
8144 error( RtAudioError::WARNING );
\r
8148 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8149 double streamTime = getStreamTime();
\r
8150 RtAudioStreamStatus status = 0;
\r
8151 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8152 stream_.bufferSize, streamTime, status,
\r
8153 stream_.callbackInfo.userData );
\r
8155 if ( doStopStream == 2 ) {
\r
8160 MUTEX_LOCK( &stream_.mutex );
\r
8161 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8162 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8164 if ( stream_.state != STREAM_RUNNING )
\r
8169 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8170 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8171 convertBuffer( stream_.deviceBuffer,
\r
8172 stream_.userBuffer[OUTPUT],
\r
8173 stream_.convertInfo[OUTPUT] );
\r
8174 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8175 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8177 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8178 formatBytes( stream_.userFormat );
\r
8180 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8181 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8182 pa_strerror( pa_error ) << ".";
\r
8183 errorText_ = errorStream_.str();
\r
8184 error( RtAudioError::WARNING );
\r
8188 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8189 if ( stream_.doConvertBuffer[INPUT] )
\r
8190 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8191 formatBytes( stream_.deviceFormat[INPUT] );
\r
8193 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8194 formatBytes( stream_.userFormat );
\r
8196 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8197 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8198 pa_strerror( pa_error ) << ".";
\r
8199 errorText_ = errorStream_.str();
\r
8200 error( RtAudioError::WARNING );
\r
8202 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8203 convertBuffer( stream_.userBuffer[INPUT],
\r
8204 stream_.deviceBuffer,
\r
8205 stream_.convertInfo[INPUT] );
\r
8210 MUTEX_UNLOCK( &stream_.mutex );
\r
8211 RtApi::tickStreamTime();
\r
8213 if ( doStopStream == 1 )
\r
8217 void RtApiPulse::startStream( void )
\r
8219 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8221 if ( stream_.state == STREAM_CLOSED ) {
\r
8222 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8223 error( RtAudioError::INVALID_USE );
\r
8226 if ( stream_.state == STREAM_RUNNING ) {
\r
8227 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8228 error( RtAudioError::WARNING );
\r
8232 MUTEX_LOCK( &stream_.mutex );
\r
8234 stream_.state = STREAM_RUNNING;
\r
8236 pah->runnable = true;
\r
8237 pthread_cond_signal( &pah->runnable_cv );
\r
8238 MUTEX_UNLOCK( &stream_.mutex );
\r
8241 void RtApiPulse::stopStream( void )
\r
8243 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8245 if ( stream_.state == STREAM_CLOSED ) {
\r
8246 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8247 error( RtAudioError::INVALID_USE );
\r
8250 if ( stream_.state == STREAM_STOPPED ) {
\r
8251 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8252 error( RtAudioError::WARNING );
\r
8256 stream_.state = STREAM_STOPPED;
\r
8257 MUTEX_LOCK( &stream_.mutex );
\r
8259 if ( pah && pah->s_play ) {
\r
8261 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8262 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8263 pa_strerror( pa_error ) << ".";
\r
8264 errorText_ = errorStream_.str();
\r
8265 MUTEX_UNLOCK( &stream_.mutex );
\r
8266 error( RtAudioError::SYSTEM_ERROR );
\r
8271 stream_.state = STREAM_STOPPED;
\r
8272 MUTEX_UNLOCK( &stream_.mutex );
\r
8275 void RtApiPulse::abortStream( void )
\r
8277 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8279 if ( stream_.state == STREAM_CLOSED ) {
\r
8280 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8281 error( RtAudioError::INVALID_USE );
\r
8284 if ( stream_.state == STREAM_STOPPED ) {
\r
8285 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8286 error( RtAudioError::WARNING );
\r
8290 stream_.state = STREAM_STOPPED;
\r
8291 MUTEX_LOCK( &stream_.mutex );
\r
8293 if ( pah && pah->s_play ) {
\r
8295 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8296 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8297 pa_strerror( pa_error ) << ".";
\r
8298 errorText_ = errorStream_.str();
\r
8299 MUTEX_UNLOCK( &stream_.mutex );
\r
8300 error( RtAudioError::SYSTEM_ERROR );
\r
8305 stream_.state = STREAM_STOPPED;
\r
8306 MUTEX_UNLOCK( &stream_.mutex );
\r
8309 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8310 unsigned int channels, unsigned int firstChannel,
\r
8311 unsigned int sampleRate, RtAudioFormat format,
\r
8312 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8314 PulseAudioHandle *pah = 0;
\r
8315 unsigned long bufferBytes = 0;
\r
8316 pa_sample_spec ss;
\r
8318 if ( device != 0 ) return false;
\r
8319 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8320 if ( channels != 1 && channels != 2 ) {
\r
8321 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8324 ss.channels = channels;
\r
8326 if ( firstChannel != 0 ) return false;
\r
8328 bool sr_found = false;
\r
8329 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8330 if ( sampleRate == *sr ) {
\r
8332 stream_.sampleRate = sampleRate;
\r
8333 ss.rate = sampleRate;
\r
8337 if ( !sr_found ) {
\r
8338 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8342 bool sf_found = 0;
\r
8343 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8344 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8345 if ( format == sf->rtaudio_format ) {
\r
8347 stream_.userFormat = sf->rtaudio_format;
\r
8348 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8349 ss.format = sf->pa_format;
\r
8353 if ( !sf_found ) { // Use internal data format conversion.
\r
8354 stream_.userFormat = format;
\r
8355 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8356 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8359 // Set other stream parameters.
\r
8360 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8361 else stream_.userInterleaved = true;
\r
8362 stream_.deviceInterleaved[mode] = true;
\r
8363 stream_.nBuffers = 1;
\r
8364 stream_.doByteSwap[mode] = false;
\r
8365 stream_.nUserChannels[mode] = channels;
\r
8366 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8367 stream_.channelOffset[mode] = 0;
\r
8368 std::string streamName = "RtAudio";
\r
8370 // Set flags for buffer conversion.
\r
8371 stream_.doConvertBuffer[mode] = false;
\r
8372 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8373 stream_.doConvertBuffer[mode] = true;
\r
8374 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8375 stream_.doConvertBuffer[mode] = true;
\r
8377 // Allocate necessary internal buffers.
\r
8378 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8379 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8380 if ( stream_.userBuffer[mode] == NULL ) {
\r
8381 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8384 stream_.bufferSize = *bufferSize;
\r
8386 if ( stream_.doConvertBuffer[mode] ) {
\r
8388 bool makeBuffer = true;
\r
8389 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8390 if ( mode == INPUT ) {
\r
8391 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8392 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8393 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8397 if ( makeBuffer ) {
\r
8398 bufferBytes *= *bufferSize;
\r
8399 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8400 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8401 if ( stream_.deviceBuffer == NULL ) {
\r
8402 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8408 stream_.device[mode] = device;
\r
8410 // Setup the buffer conversion information structure.
\r
8411 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8413 if ( !stream_.apiHandle ) {
\r
8414 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8416 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8420 stream_.apiHandle = pah;
\r
8421 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8422 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8426 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8429 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8432 pa_buffer_attr buffer_attr;
\r
8433 buffer_attr.fragsize = bufferBytes;
\r
8434 buffer_attr.maxlength = -1;
\r
8436 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8437 if ( !pah->s_rec ) {
\r
8438 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8443 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8444 if ( !pah->s_play ) {
\r
8445 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8453 if ( stream_.mode == UNINITIALIZED )
\r
8454 stream_.mode = mode;
\r
8455 else if ( stream_.mode == mode )
\r
8458 stream_.mode = DUPLEX;
\r
8460 if ( !stream_.callbackInfo.isRunning ) {
\r
8461 stream_.callbackInfo.object = this;
\r
8462 stream_.callbackInfo.isRunning = true;
\r
8463 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8464 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8469 stream_.state = STREAM_STOPPED;
\r
8473 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8474 pthread_cond_destroy( &pah->runnable_cv );
\r
8476 stream_.apiHandle = 0;
\r
8479 for ( int i=0; i<2; i++ ) {
\r
8480 if ( stream_.userBuffer[i] ) {
\r
8481 free( stream_.userBuffer[i] );
\r
8482 stream_.userBuffer[i] = 0;
\r
8486 if ( stream_.deviceBuffer ) {
\r
8487 free( stream_.deviceBuffer );
\r
8488 stream_.deviceBuffer = 0;
\r
8494 //******************** End of __LINUX_PULSE__ *********************//
\r
8497 #if defined(__LINUX_OSS__)
\r
8499 #include <unistd.h>
\r
8500 #include <sys/ioctl.h>
\r
8501 #include <unistd.h>
\r
8502 #include <fcntl.h>
\r
8503 #include <sys/soundcard.h>
\r
8504 #include <errno.h>
\r
8507 static void *ossCallbackHandler(void * ptr);
\r
8509 // A structure to hold various information related to the OSS API
\r
8510 // implementation.
\r
8511 struct OssHandle {
\r
8512 int id[2]; // device ids
\r
8515 pthread_cond_t runnable;
\r
8518 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8521 RtApiOss :: RtApiOss()
\r
8523 // Nothing to do here.
\r
8526 RtApiOss :: ~RtApiOss()
\r
8528 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8531 unsigned int RtApiOss :: getDeviceCount( void )
\r
8533 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8534 if ( mixerfd == -1 ) {
\r
8535 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8536 error( RtAudioError::WARNING );
\r
8540 oss_sysinfo sysinfo;
\r
8541 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8543 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8544 error( RtAudioError::WARNING );
\r
8549 return sysinfo.numaudios;
\r
8552 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8554 RtAudio::DeviceInfo info;
\r
8555 info.probed = false;
\r
8557 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8558 if ( mixerfd == -1 ) {
\r
8559 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8560 error( RtAudioError::WARNING );
\r
8564 oss_sysinfo sysinfo;
\r
8565 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8566 if ( result == -1 ) {
\r
8568 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8569 error( RtAudioError::WARNING );
\r
8573 unsigned nDevices = sysinfo.numaudios;
\r
8574 if ( nDevices == 0 ) {
\r
8576 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8577 error( RtAudioError::INVALID_USE );
\r
8581 if ( device >= nDevices ) {
\r
8583 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8584 error( RtAudioError::INVALID_USE );
\r
8588 oss_audioinfo ainfo;
\r
8589 ainfo.dev = device;
\r
8590 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8592 if ( result == -1 ) {
\r
8593 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8594 errorText_ = errorStream_.str();
\r
8595 error( RtAudioError::WARNING );
\r
8600 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8601 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8602 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8603 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8604 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8607 // Probe data formats ... do for input
\r
8608 unsigned long mask = ainfo.iformats;
\r
8609 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8610 info.nativeFormats |= RTAUDIO_SINT16;
\r
8611 if ( mask & AFMT_S8 )
\r
8612 info.nativeFormats |= RTAUDIO_SINT8;
\r
8613 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8614 info.nativeFormats |= RTAUDIO_SINT32;
\r
8615 if ( mask & AFMT_FLOAT )
\r
8616 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8617 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8618 info.nativeFormats |= RTAUDIO_SINT24;
\r
8620 // Check that we have at least one supported format
\r
8621 if ( info.nativeFormats == 0 ) {
\r
8622 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8623 errorText_ = errorStream_.str();
\r
8624 error( RtAudioError::WARNING );
\r
8628 // Probe the supported sample rates.
\r
8629 info.sampleRates.clear();
\r
8630 if ( ainfo.nrates ) {
\r
8631 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8632 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8633 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8634 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8641 // Check min and max rate values;
\r
8642 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8643 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8644 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8648 if ( info.sampleRates.size() == 0 ) {
\r
8649 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8650 errorText_ = errorStream_.str();
\r
8651 error( RtAudioError::WARNING );
\r
8654 info.probed = true;
\r
8655 info.name = ainfo.name;
\r
8662 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8663 unsigned int firstChannel, unsigned int sampleRate,
\r
8664 RtAudioFormat format, unsigned int *bufferSize,
\r
8665 RtAudio::StreamOptions *options )
\r
8667 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8668 if ( mixerfd == -1 ) {
\r
8669 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8673 oss_sysinfo sysinfo;
\r
8674 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8675 if ( result == -1 ) {
\r
8677 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8681 unsigned nDevices = sysinfo.numaudios;
\r
8682 if ( nDevices == 0 ) {
\r
8683 // This should not happen because a check is made before this function is called.
\r
8685 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8689 if ( device >= nDevices ) {
\r
8690 // This should not happen because a check is made before this function is called.
\r
8692 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8696 oss_audioinfo ainfo;
\r
8697 ainfo.dev = device;
\r
8698 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8700 if ( result == -1 ) {
\r
8701 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8702 errorText_ = errorStream_.str();
\r
8706 // Check if device supports input or output
\r
8707 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8708 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8709 if ( mode == OUTPUT )
\r
8710 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8712 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8713 errorText_ = errorStream_.str();
\r
8718 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8719 if ( mode == OUTPUT )
\r
8720 flags |= O_WRONLY;
\r
8721 else { // mode == INPUT
\r
8722 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8723 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8724 close( handle->id[0] );
\r
8725 handle->id[0] = 0;
\r
8726 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8727 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8728 errorText_ = errorStream_.str();
\r
8731 // Check that the number previously set channels is the same.
\r
8732 if ( stream_.nUserChannels[0] != channels ) {
\r
8733 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8734 errorText_ = errorStream_.str();
\r
8740 flags |= O_RDONLY;
\r
8743 // Set exclusive access if specified.
\r
8744 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8746 // Try to open the device.
\r
8748 fd = open( ainfo.devnode, flags, 0 );
\r
8750 if ( errno == EBUSY )
\r
8751 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8753 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8754 errorText_ = errorStream_.str();
\r
8758 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8760 if ( flags | O_RDWR ) {
\r
8761 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8762 if ( result == -1) {
\r
8763 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8764 errorText_ = errorStream_.str();
\r
8770 // Check the device channel support.
\r
8771 stream_.nUserChannels[mode] = channels;
\r
8772 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8774 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8775 errorText_ = errorStream_.str();
\r
8779 // Set the number of channels.
\r
8780 int deviceChannels = channels + firstChannel;
\r
8781 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8782 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8784 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8785 errorText_ = errorStream_.str();
\r
8788 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8790 // Get the data format mask
\r
8792 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8793 if ( result == -1 ) {
\r
8795 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8796 errorText_ = errorStream_.str();
\r
8800 // Determine how to set the device format.
\r
8801 stream_.userFormat = format;
\r
8802 int deviceFormat = -1;
\r
8803 stream_.doByteSwap[mode] = false;
\r
8804 if ( format == RTAUDIO_SINT8 ) {
\r
8805 if ( mask & AFMT_S8 ) {
\r
8806 deviceFormat = AFMT_S8;
\r
8807 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8810 else if ( format == RTAUDIO_SINT16 ) {
\r
8811 if ( mask & AFMT_S16_NE ) {
\r
8812 deviceFormat = AFMT_S16_NE;
\r
8813 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8815 else if ( mask & AFMT_S16_OE ) {
\r
8816 deviceFormat = AFMT_S16_OE;
\r
8817 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8818 stream_.doByteSwap[mode] = true;
\r
8821 else if ( format == RTAUDIO_SINT24 ) {
\r
8822 if ( mask & AFMT_S24_NE ) {
\r
8823 deviceFormat = AFMT_S24_NE;
\r
8824 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8826 else if ( mask & AFMT_S24_OE ) {
\r
8827 deviceFormat = AFMT_S24_OE;
\r
8828 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8829 stream_.doByteSwap[mode] = true;
\r
8832 else if ( format == RTAUDIO_SINT32 ) {
\r
8833 if ( mask & AFMT_S32_NE ) {
\r
8834 deviceFormat = AFMT_S32_NE;
\r
8835 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8837 else if ( mask & AFMT_S32_OE ) {
\r
8838 deviceFormat = AFMT_S32_OE;
\r
8839 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8840 stream_.doByteSwap[mode] = true;
\r
8844 if ( deviceFormat == -1 ) {
\r
8845 // The user requested format is not natively supported by the device.
\r
8846 if ( mask & AFMT_S16_NE ) {
\r
8847 deviceFormat = AFMT_S16_NE;
\r
8848 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8850 else if ( mask & AFMT_S32_NE ) {
\r
8851 deviceFormat = AFMT_S32_NE;
\r
8852 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8854 else if ( mask & AFMT_S24_NE ) {
\r
8855 deviceFormat = AFMT_S24_NE;
\r
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8858 else if ( mask & AFMT_S16_OE ) {
\r
8859 deviceFormat = AFMT_S16_OE;
\r
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8861 stream_.doByteSwap[mode] = true;
\r
8863 else if ( mask & AFMT_S32_OE ) {
\r
8864 deviceFormat = AFMT_S32_OE;
\r
8865 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8866 stream_.doByteSwap[mode] = true;
\r
8868 else if ( mask & AFMT_S24_OE ) {
\r
8869 deviceFormat = AFMT_S24_OE;
\r
8870 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8871 stream_.doByteSwap[mode] = true;
\r
8873 else if ( mask & AFMT_S8) {
\r
8874 deviceFormat = AFMT_S8;
\r
8875 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8879 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8880 // This really shouldn't happen ...
\r
8882 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8883 errorText_ = errorStream_.str();
\r
8887 // Set the data format.
\r
8888 int temp = deviceFormat;
\r
8889 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8890 if ( result == -1 || deviceFormat != temp ) {
\r
8892 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8893 errorText_ = errorStream_.str();
\r
8897 // Attempt to set the buffer size. According to OSS, the minimum
\r
8898 // number of buffers is two. The supposed minimum buffer size is 16
\r
8899 // bytes, so that will be our lower bound. The argument to this
\r
8900 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8901 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8902 // We'll check the actual value used near the end of the setup
\r
8904 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8905 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8907 if ( options ) buffers = options->numberOfBuffers;
\r
8908 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8909 if ( buffers < 2 ) buffers = 3;
\r
8910 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8911 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8912 if ( result == -1 ) {
\r
8914 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8915 errorText_ = errorStream_.str();
\r
8918 stream_.nBuffers = buffers;
\r
8920 // Save buffer size (in sample frames).
\r
8921 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8922 stream_.bufferSize = *bufferSize;
\r
8924 // Set the sample rate.
\r
8925 int srate = sampleRate;
\r
8926 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8927 if ( result == -1 ) {
\r
8929 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8930 errorText_ = errorStream_.str();
\r
8934 // Verify the sample rate setup worked.
\r
8935 if ( abs( srate - sampleRate ) > 100 ) {
\r
8937 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8938 errorText_ = errorStream_.str();
\r
8941 stream_.sampleRate = sampleRate;
\r
8943 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8944 // We're doing duplex setup here.
\r
8945 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8946 stream_.nDeviceChannels[0] = deviceChannels;
\r
8949 // Set interleaving parameters.
\r
8950 stream_.userInterleaved = true;
\r
8951 stream_.deviceInterleaved[mode] = true;
\r
8952 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8953 stream_.userInterleaved = false;
\r
8955 // Set flags for buffer conversion
\r
8956 stream_.doConvertBuffer[mode] = false;
\r
8957 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8958 stream_.doConvertBuffer[mode] = true;
\r
8959 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8960 stream_.doConvertBuffer[mode] = true;
\r
8961 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8962 stream_.nUserChannels[mode] > 1 )
\r
8963 stream_.doConvertBuffer[mode] = true;
\r
8965 // Allocate the stream handles if necessary and then save.
\r
8966 if ( stream_.apiHandle == 0 ) {
\r
8968 handle = new OssHandle;
\r
8970 catch ( std::bad_alloc& ) {
\r
8971 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8975 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8976 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8980 stream_.apiHandle = (void *) handle;
\r
8983 handle = (OssHandle *) stream_.apiHandle;
\r
8985 handle->id[mode] = fd;
\r
8987 // Allocate necessary internal buffers.
\r
8988 unsigned long bufferBytes;
\r
8989 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8990 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8991 if ( stream_.userBuffer[mode] == NULL ) {
\r
8992 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8996 if ( stream_.doConvertBuffer[mode] ) {
\r
8998 bool makeBuffer = true;
\r
8999 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9000 if ( mode == INPUT ) {
\r
9001 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9002 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9003 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9007 if ( makeBuffer ) {
\r
9008 bufferBytes *= *bufferSize;
\r
9009 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9010 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9011 if ( stream_.deviceBuffer == NULL ) {
\r
9012 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9018 stream_.device[mode] = device;
\r
9019 stream_.state = STREAM_STOPPED;
\r
9021 // Setup the buffer conversion information structure.
\r
9022 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9024 // Setup thread if necessary.
\r
9025 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9026 // We had already set up an output stream.
\r
9027 stream_.mode = DUPLEX;
\r
9028 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9031 stream_.mode = mode;
\r
9033 // Setup callback thread.
\r
9034 stream_.callbackInfo.object = (void *) this;
\r
9036 // Set the thread attributes for joinable and realtime scheduling
\r
9037 // priority. The higher priority will only take affect if the
\r
9038 // program is run as root or suid.
\r
9039 pthread_attr_t attr;
\r
9040 pthread_attr_init( &attr );
\r
9041 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9042 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9043 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9044 struct sched_param param;
\r
9045 int priority = options->priority;
\r
9046 int min = sched_get_priority_min( SCHED_RR );
\r
9047 int max = sched_get_priority_max( SCHED_RR );
\r
9048 if ( priority < min ) priority = min;
\r
9049 else if ( priority > max ) priority = max;
\r
9050 param.sched_priority = priority;
\r
9051 pthread_attr_setschedparam( &attr, ¶m );
\r
9052 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9055 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9057 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9060 stream_.callbackInfo.isRunning = true;
\r
9061 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9062 pthread_attr_destroy( &attr );
\r
9064 stream_.callbackInfo.isRunning = false;
\r
9065 errorText_ = "RtApiOss::error creating callback thread!";
\r
9074 pthread_cond_destroy( &handle->runnable );
\r
9075 if ( handle->id[0] ) close( handle->id[0] );
\r
9076 if ( handle->id[1] ) close( handle->id[1] );
\r
9078 stream_.apiHandle = 0;
\r
9081 for ( int i=0; i<2; i++ ) {
\r
9082 if ( stream_.userBuffer[i] ) {
\r
9083 free( stream_.userBuffer[i] );
\r
9084 stream_.userBuffer[i] = 0;
\r
9088 if ( stream_.deviceBuffer ) {
\r
9089 free( stream_.deviceBuffer );
\r
9090 stream_.deviceBuffer = 0;
\r
9096 void RtApiOss :: closeStream()
\r
9098 if ( stream_.state == STREAM_CLOSED ) {
\r
9099 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9100 error( RtAudioError::WARNING );
\r
9104 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9105 stream_.callbackInfo.isRunning = false;
\r
9106 MUTEX_LOCK( &stream_.mutex );
\r
9107 if ( stream_.state == STREAM_STOPPED )
\r
9108 pthread_cond_signal( &handle->runnable );
\r
9109 MUTEX_UNLOCK( &stream_.mutex );
\r
9110 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9112 if ( stream_.state == STREAM_RUNNING ) {
\r
9113 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9114 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9116 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9117 stream_.state = STREAM_STOPPED;
\r
9121 pthread_cond_destroy( &handle->runnable );
\r
9122 if ( handle->id[0] ) close( handle->id[0] );
\r
9123 if ( handle->id[1] ) close( handle->id[1] );
\r
9125 stream_.apiHandle = 0;
\r
9128 for ( int i=0; i<2; i++ ) {
\r
9129 if ( stream_.userBuffer[i] ) {
\r
9130 free( stream_.userBuffer[i] );
\r
9131 stream_.userBuffer[i] = 0;
\r
9135 if ( stream_.deviceBuffer ) {
\r
9136 free( stream_.deviceBuffer );
\r
9137 stream_.deviceBuffer = 0;
\r
9140 stream_.mode = UNINITIALIZED;
\r
9141 stream_.state = STREAM_CLOSED;
\r
9144 void RtApiOss :: startStream()
\r
9147 if ( stream_.state == STREAM_RUNNING ) {
\r
9148 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9149 error( RtAudioError::WARNING );
\r
9153 MUTEX_LOCK( &stream_.mutex );
\r
9155 stream_.state = STREAM_RUNNING;
\r
9157 // No need to do anything else here ... OSS automatically starts
\r
9158 // when fed samples.
\r
9160 MUTEX_UNLOCK( &stream_.mutex );
\r
9162 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9163 pthread_cond_signal( &handle->runnable );
\r
9166 void RtApiOss :: stopStream()
\r
9169 if ( stream_.state == STREAM_STOPPED ) {
\r
9170 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9171 error( RtAudioError::WARNING );
\r
9175 MUTEX_LOCK( &stream_.mutex );
\r
9177 // The state might change while waiting on a mutex.
\r
9178 if ( stream_.state == STREAM_STOPPED ) {
\r
9179 MUTEX_UNLOCK( &stream_.mutex );
\r
9184 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9185 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9187 // Flush the output with zeros a few times.
\r
9190 RtAudioFormat format;
\r
9192 if ( stream_.doConvertBuffer[0] ) {
\r
9193 buffer = stream_.deviceBuffer;
\r
9194 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9195 format = stream_.deviceFormat[0];
\r
9198 buffer = stream_.userBuffer[0];
\r
9199 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9200 format = stream_.userFormat;
\r
9203 memset( buffer, 0, samples * formatBytes(format) );
\r
9204 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9205 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9206 if ( result == -1 ) {
\r
9207 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9208 error( RtAudioError::WARNING );
\r
9212 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9213 if ( result == -1 ) {
\r
9214 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9215 errorText_ = errorStream_.str();
\r
9218 handle->triggered = false;
\r
9221 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9222 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9223 if ( result == -1 ) {
\r
9224 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9225 errorText_ = errorStream_.str();
\r
9231 stream_.state = STREAM_STOPPED;
\r
9232 MUTEX_UNLOCK( &stream_.mutex );
\r
9234 if ( result != -1 ) return;
\r
9235 error( RtAudioError::SYSTEM_ERROR );
\r
9238 void RtApiOss :: abortStream()
\r
9241 if ( stream_.state == STREAM_STOPPED ) {
\r
9242 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9243 error( RtAudioError::WARNING );
\r
9247 MUTEX_LOCK( &stream_.mutex );
\r
9249 // The state might change while waiting on a mutex.
\r
9250 if ( stream_.state == STREAM_STOPPED ) {
\r
9251 MUTEX_UNLOCK( &stream_.mutex );
\r
9256 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9258 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9259 if ( result == -1 ) {
\r
9260 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9261 errorText_ = errorStream_.str();
\r
9264 handle->triggered = false;
\r
9267 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9268 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9269 if ( result == -1 ) {
\r
9270 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9271 errorText_ = errorStream_.str();
\r
9277 stream_.state = STREAM_STOPPED;
\r
9278 MUTEX_UNLOCK( &stream_.mutex );
\r
9280 if ( result != -1 ) return;
\r
9281 error( RtAudioError::SYSTEM_ERROR );
\r
9284 void RtApiOss :: callbackEvent()
\r
9286 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9287 if ( stream_.state == STREAM_STOPPED ) {
\r
9288 MUTEX_LOCK( &stream_.mutex );
\r
9289 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9290 if ( stream_.state != STREAM_RUNNING ) {
\r
9291 MUTEX_UNLOCK( &stream_.mutex );
\r
9294 MUTEX_UNLOCK( &stream_.mutex );
\r
9297 if ( stream_.state == STREAM_CLOSED ) {
\r
9298 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9299 error( RtAudioError::WARNING );
\r
9303 // Invoke user callback to get fresh output data.
\r
9304 int doStopStream = 0;
\r
9305 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9306 double streamTime = getStreamTime();
\r
9307 RtAudioStreamStatus status = 0;
\r
9308 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9309 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9310 handle->xrun[0] = false;
\r
9312 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9313 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9314 handle->xrun[1] = false;
\r
9316 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9317 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9318 if ( doStopStream == 2 ) {
\r
9319 this->abortStream();
\r
9323 MUTEX_LOCK( &stream_.mutex );
\r
9325 // The state might change while waiting on a mutex.
\r
9326 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9331 RtAudioFormat format;
\r
9333 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9335 // Setup parameters and do buffer conversion if necessary.
\r
9336 if ( stream_.doConvertBuffer[0] ) {
\r
9337 buffer = stream_.deviceBuffer;
\r
9338 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9339 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9340 format = stream_.deviceFormat[0];
\r
9343 buffer = stream_.userBuffer[0];
\r
9344 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9345 format = stream_.userFormat;
\r
9348 // Do byte swapping if necessary.
\r
9349 if ( stream_.doByteSwap[0] )
\r
9350 byteSwapBuffer( buffer, samples, format );
\r
9352 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9354 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9355 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9356 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9357 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9358 handle->triggered = true;
\r
9361 // Write samples to device.
\r
9362 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9364 if ( result == -1 ) {
\r
9365 // We'll assume this is an underrun, though there isn't a
\r
9366 // specific means for determining that.
\r
9367 handle->xrun[0] = true;
\r
9368 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9369 error( RtAudioError::WARNING );
\r
9370 // Continue on to input section.
\r
9374 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9376 // Setup parameters.
\r
9377 if ( stream_.doConvertBuffer[1] ) {
\r
9378 buffer = stream_.deviceBuffer;
\r
9379 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9380 format = stream_.deviceFormat[1];
\r
9383 buffer = stream_.userBuffer[1];
\r
9384 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9385 format = stream_.userFormat;
\r
9388 // Read samples from device.
\r
9389 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9391 if ( result == -1 ) {
\r
9392 // We'll assume this is an overrun, though there isn't a
\r
9393 // specific means for determining that.
\r
9394 handle->xrun[1] = true;
\r
9395 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9396 error( RtAudioError::WARNING );
\r
9400 // Do byte swapping if necessary.
\r
9401 if ( stream_.doByteSwap[1] )
\r
9402 byteSwapBuffer( buffer, samples, format );
\r
9404 // Do buffer conversion if necessary.
\r
9405 if ( stream_.doConvertBuffer[1] )
\r
9406 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9410 MUTEX_UNLOCK( &stream_.mutex );
\r
9412 RtApi::tickStreamTime();
\r
9413 if ( doStopStream == 1 ) this->stopStream();
\r
9416 static void *ossCallbackHandler( void *ptr )
\r
9418 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9419 RtApiOss *object = (RtApiOss *) info->object;
\r
9420 bool *isRunning = &info->isRunning;
\r
9422 while ( *isRunning == true ) {
\r
9423 pthread_testcancel();
\r
9424 object->callbackEvent();
\r
9427 pthread_exit( NULL );
\r
9430 //******************** End of __LINUX_OSS__ *********************//
\r
9434 // *************************************************** //
\r
9436 // Protected common (OS-independent) RtAudio methods.
\r
9438 // *************************************************** //
\r
9440 // This method can be modified to control the behavior of error
\r
9441 // message printing.
\r
9442 void RtApi :: error( RtAudioError::Type type )
\r
9444 errorStream_.str(""); // clear the ostringstream
\r
9446 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9447 if ( errorCallback ) {
\r
9448 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9450 if ( firstErrorOccurred_ )
\r
9453 firstErrorOccurred_ = true;
\r
9454 const std::string errorMessage = errorText_;
\r
9456 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9457 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9461 errorCallback( type, errorMessage );
\r
9462 firstErrorOccurred_ = false;
\r
9466 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9467 std::cerr << '\n' << errorText_ << "\n\n";
\r
9468 else if ( type != RtAudioError::WARNING )
\r
9469 throw( RtAudioError( errorText_, type ) );
\r
9472 void RtApi :: verifyStream()
\r
9474 if ( stream_.state == STREAM_CLOSED ) {
\r
9475 errorText_ = "RtApi:: a stream is not open!";
\r
9476 error( RtAudioError::INVALID_USE );
\r
9480 void RtApi :: clearStreamInfo()
\r
9482 stream_.mode = UNINITIALIZED;
\r
9483 stream_.state = STREAM_CLOSED;
\r
9484 stream_.sampleRate = 0;
\r
9485 stream_.bufferSize = 0;
\r
9486 stream_.nBuffers = 0;
\r
9487 stream_.userFormat = 0;
\r
9488 stream_.userInterleaved = true;
\r
9489 stream_.streamTime = 0.0;
\r
9490 stream_.apiHandle = 0;
\r
9491 stream_.deviceBuffer = 0;
\r
9492 stream_.callbackInfo.callback = 0;
\r
9493 stream_.callbackInfo.userData = 0;
\r
9494 stream_.callbackInfo.isRunning = false;
\r
9495 stream_.callbackInfo.errorCallback = 0;
\r
9496 for ( int i=0; i<2; i++ ) {
\r
9497 stream_.device[i] = 11111;
\r
9498 stream_.doConvertBuffer[i] = false;
\r
9499 stream_.deviceInterleaved[i] = true;
\r
9500 stream_.doByteSwap[i] = false;
\r
9501 stream_.nUserChannels[i] = 0;
\r
9502 stream_.nDeviceChannels[i] = 0;
\r
9503 stream_.channelOffset[i] = 0;
\r
9504 stream_.deviceFormat[i] = 0;
\r
9505 stream_.latency[i] = 0;
\r
9506 stream_.userBuffer[i] = 0;
\r
9507 stream_.convertInfo[i].channels = 0;
\r
9508 stream_.convertInfo[i].inJump = 0;
\r
9509 stream_.convertInfo[i].outJump = 0;
\r
9510 stream_.convertInfo[i].inFormat = 0;
\r
9511 stream_.convertInfo[i].outFormat = 0;
\r
9512 stream_.convertInfo[i].inOffset.clear();
\r
9513 stream_.convertInfo[i].outOffset.clear();
\r
9517 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9519 if ( format == RTAUDIO_SINT16 )
\r
9521 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9523 else if ( format == RTAUDIO_FLOAT64 )
\r
9525 else if ( format == RTAUDIO_SINT24 )
\r
9527 else if ( format == RTAUDIO_SINT8 )
\r
9530 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9531 error( RtAudioError::WARNING );
\r
9536 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9538 if ( mode == INPUT ) { // convert device to user buffer
\r
9539 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9540 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9541 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9542 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9544 else { // convert user to device buffer
\r
9545 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9546 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9547 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9548 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9551 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9552 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9554 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9556 // Set up the interleave/deinterleave offsets.
\r
9557 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9558 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9559 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9560 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9561 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9562 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9563 stream_.convertInfo[mode].inJump = 1;
\r
9567 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9568 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9569 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9570 stream_.convertInfo[mode].outJump = 1;
\r
9574 else { // no (de)interleaving
\r
9575 if ( stream_.userInterleaved ) {
\r
9576 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9577 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9578 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9582 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9583 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9584 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9585 stream_.convertInfo[mode].inJump = 1;
\r
9586 stream_.convertInfo[mode].outJump = 1;
\r
9591 // Add channel offset.
\r
9592 if ( firstChannel > 0 ) {
\r
9593 if ( stream_.deviceInterleaved[mode] ) {
\r
9594 if ( mode == OUTPUT ) {
\r
9595 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9596 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9599 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9600 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9604 if ( mode == OUTPUT ) {
\r
9605 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9606 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9609 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9610 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9616 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9618 // This function does format conversion, input/output channel compensation, and
\r
9619 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9620 // the lower three bytes of a 32-bit integer.
\r
9622 // Clear our device buffer when in/out duplex device channels are different
\r
9623 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9624 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9625 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9628 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9630 Float64 *out = (Float64 *)outBuffer;
\r
9632 if (info.inFormat == RTAUDIO_SINT8) {
\r
9633 signed char *in = (signed char *)inBuffer;
\r
9634 scale = 1.0 / 127.5;
\r
9635 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9636 for (j=0; j<info.channels; j++) {
\r
9637 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9638 out[info.outOffset[j]] += 0.5;
\r
9639 out[info.outOffset[j]] *= scale;
\r
9641 in += info.inJump;
\r
9642 out += info.outJump;
\r
9645 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9646 Int16 *in = (Int16 *)inBuffer;
\r
9647 scale = 1.0 / 32767.5;
\r
9648 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9649 for (j=0; j<info.channels; j++) {
\r
9650 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9651 out[info.outOffset[j]] += 0.5;
\r
9652 out[info.outOffset[j]] *= scale;
\r
9654 in += info.inJump;
\r
9655 out += info.outJump;
\r
9658 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9659 Int24 *in = (Int24 *)inBuffer;
\r
9660 scale = 1.0 / 8388607.5;
\r
9661 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9662 for (j=0; j<info.channels; j++) {
\r
9663 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9664 out[info.outOffset[j]] += 0.5;
\r
9665 out[info.outOffset[j]] *= scale;
\r
9667 in += info.inJump;
\r
9668 out += info.outJump;
\r
9671 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9672 Int32 *in = (Int32 *)inBuffer;
\r
9673 scale = 1.0 / 2147483647.5;
\r
9674 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9675 for (j=0; j<info.channels; j++) {
\r
9676 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9677 out[info.outOffset[j]] += 0.5;
\r
9678 out[info.outOffset[j]] *= scale;
\r
9680 in += info.inJump;
\r
9681 out += info.outJump;
\r
9684 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9685 Float32 *in = (Float32 *)inBuffer;
\r
9686 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9687 for (j=0; j<info.channels; j++) {
\r
9688 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9690 in += info.inJump;
\r
9691 out += info.outJump;
\r
9694 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9695 // Channel compensation and/or (de)interleaving only.
\r
9696 Float64 *in = (Float64 *)inBuffer;
\r
9697 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9698 for (j=0; j<info.channels; j++) {
\r
9699 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9701 in += info.inJump;
\r
9702 out += info.outJump;
\r
9706 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9708 Float32 *out = (Float32 *)outBuffer;
\r
9710 if (info.inFormat == RTAUDIO_SINT8) {
\r
9711 signed char *in = (signed char *)inBuffer;
\r
9712 scale = (Float32) ( 1.0 / 127.5 );
\r
9713 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9714 for (j=0; j<info.channels; j++) {
\r
9715 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9716 out[info.outOffset[j]] += 0.5;
\r
9717 out[info.outOffset[j]] *= scale;
\r
9719 in += info.inJump;
\r
9720 out += info.outJump;
\r
9723 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9724 Int16 *in = (Int16 *)inBuffer;
\r
9725 scale = (Float32) ( 1.0 / 32767.5 );
\r
9726 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9727 for (j=0; j<info.channels; j++) {
\r
9728 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9729 out[info.outOffset[j]] += 0.5;
\r
9730 out[info.outOffset[j]] *= scale;
\r
9732 in += info.inJump;
\r
9733 out += info.outJump;
\r
9736 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9737 Int24 *in = (Int24 *)inBuffer;
\r
9738 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9739 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9740 for (j=0; j<info.channels; j++) {
\r
9741 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9742 out[info.outOffset[j]] += 0.5;
\r
9743 out[info.outOffset[j]] *= scale;
\r
9745 in += info.inJump;
\r
9746 out += info.outJump;
\r
9749 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9750 Int32 *in = (Int32 *)inBuffer;
\r
9751 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9752 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9753 for (j=0; j<info.channels; j++) {
\r
9754 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9755 out[info.outOffset[j]] += 0.5;
\r
9756 out[info.outOffset[j]] *= scale;
\r
9758 in += info.inJump;
\r
9759 out += info.outJump;
\r
9762 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9763 // Channel compensation and/or (de)interleaving only.
\r
9764 Float32 *in = (Float32 *)inBuffer;
\r
9765 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9766 for (j=0; j<info.channels; j++) {
\r
9767 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9769 in += info.inJump;
\r
9770 out += info.outJump;
\r
9773 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9774 Float64 *in = (Float64 *)inBuffer;
\r
9775 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9776 for (j=0; j<info.channels; j++) {
\r
9777 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9779 in += info.inJump;
\r
9780 out += info.outJump;
\r
9784 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9785 Int32 *out = (Int32 *)outBuffer;
\r
9786 if (info.inFormat == RTAUDIO_SINT8) {
\r
9787 signed char *in = (signed char *)inBuffer;
\r
9788 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9789 for (j=0; j<info.channels; j++) {
\r
9790 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9791 out[info.outOffset[j]] <<= 24;
\r
9793 in += info.inJump;
\r
9794 out += info.outJump;
\r
9797 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9798 Int16 *in = (Int16 *)inBuffer;
\r
9799 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9800 for (j=0; j<info.channels; j++) {
\r
9801 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9802 out[info.outOffset[j]] <<= 16;
\r
9804 in += info.inJump;
\r
9805 out += info.outJump;
\r
9808 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9809 Int24 *in = (Int24 *)inBuffer;
\r
9810 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9811 for (j=0; j<info.channels; j++) {
\r
9812 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9813 out[info.outOffset[j]] <<= 8;
\r
9815 in += info.inJump;
\r
9816 out += info.outJump;
\r
9819 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9820 // Channel compensation and/or (de)interleaving only.
\r
9821 Int32 *in = (Int32 *)inBuffer;
\r
9822 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9823 for (j=0; j<info.channels; j++) {
\r
9824 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9826 in += info.inJump;
\r
9827 out += info.outJump;
\r
9830 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9831 Float32 *in = (Float32 *)inBuffer;
\r
9832 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9833 for (j=0; j<info.channels; j++) {
\r
9834 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9836 in += info.inJump;
\r
9837 out += info.outJump;
\r
9840 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9841 Float64 *in = (Float64 *)inBuffer;
\r
9842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9843 for (j=0; j<info.channels; j++) {
\r
9844 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9846 in += info.inJump;
\r
9847 out += info.outJump;
\r
9851 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9852 Int24 *out = (Int24 *)outBuffer;
\r
9853 if (info.inFormat == RTAUDIO_SINT8) {
\r
9854 signed char *in = (signed char *)inBuffer;
\r
9855 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9856 for (j=0; j<info.channels; j++) {
\r
9857 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9858 //out[info.outOffset[j]] <<= 16;
\r
9860 in += info.inJump;
\r
9861 out += info.outJump;
\r
9864 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9865 Int16 *in = (Int16 *)inBuffer;
\r
9866 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9867 for (j=0; j<info.channels; j++) {
\r
9868 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9869 //out[info.outOffset[j]] <<= 8;
\r
9871 in += info.inJump;
\r
9872 out += info.outJump;
\r
9875 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9876 // Channel compensation and/or (de)interleaving only.
\r
9877 Int24 *in = (Int24 *)inBuffer;
\r
9878 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9879 for (j=0; j<info.channels; j++) {
\r
9880 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9882 in += info.inJump;
\r
9883 out += info.outJump;
\r
9886 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9887 Int32 *in = (Int32 *)inBuffer;
\r
9888 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9889 for (j=0; j<info.channels; j++) {
\r
9890 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9891 //out[info.outOffset[j]] >>= 8;
\r
9893 in += info.inJump;
\r
9894 out += info.outJump;
\r
9897 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9898 Float32 *in = (Float32 *)inBuffer;
\r
9899 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9900 for (j=0; j<info.channels; j++) {
\r
9901 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9903 in += info.inJump;
\r
9904 out += info.outJump;
\r
9907 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9908 Float64 *in = (Float64 *)inBuffer;
\r
9909 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9910 for (j=0; j<info.channels; j++) {
\r
9911 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9913 in += info.inJump;
\r
9914 out += info.outJump;
\r
9918 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9919 Int16 *out = (Int16 *)outBuffer;
\r
9920 if (info.inFormat == RTAUDIO_SINT8) {
\r
9921 signed char *in = (signed char *)inBuffer;
\r
9922 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9923 for (j=0; j<info.channels; j++) {
\r
9924 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9925 out[info.outOffset[j]] <<= 8;
\r
9927 in += info.inJump;
\r
9928 out += info.outJump;
\r
9931 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9932 // Channel compensation and/or (de)interleaving only.
\r
9933 Int16 *in = (Int16 *)inBuffer;
\r
9934 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9935 for (j=0; j<info.channels; j++) {
\r
9936 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9938 in += info.inJump;
\r
9939 out += info.outJump;
\r
9942 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9943 Int24 *in = (Int24 *)inBuffer;
\r
9944 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9945 for (j=0; j<info.channels; j++) {
\r
9946 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9948 in += info.inJump;
\r
9949 out += info.outJump;
\r
9952 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9953 Int32 *in = (Int32 *)inBuffer;
\r
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9955 for (j=0; j<info.channels; j++) {
\r
9956 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9958 in += info.inJump;
\r
9959 out += info.outJump;
\r
9962 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9963 Float32 *in = (Float32 *)inBuffer;
\r
9964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9965 for (j=0; j<info.channels; j++) {
\r
9966 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9968 in += info.inJump;
\r
9969 out += info.outJump;
\r
9972 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9973 Float64 *in = (Float64 *)inBuffer;
\r
9974 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9975 for (j=0; j<info.channels; j++) {
\r
9976 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9978 in += info.inJump;
\r
9979 out += info.outJump;
\r
9983 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9984 signed char *out = (signed char *)outBuffer;
\r
9985 if (info.inFormat == RTAUDIO_SINT8) {
\r
9986 // Channel compensation and/or (de)interleaving only.
\r
9987 signed char *in = (signed char *)inBuffer;
\r
9988 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9989 for (j=0; j<info.channels; j++) {
\r
9990 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9992 in += info.inJump;
\r
9993 out += info.outJump;
\r
9996 if (info.inFormat == RTAUDIO_SINT16) {
\r
9997 Int16 *in = (Int16 *)inBuffer;
\r
9998 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9999 for (j=0; j<info.channels; j++) {
\r
10000 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10002 in += info.inJump;
\r
10003 out += info.outJump;
\r
10006 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10007 Int24 *in = (Int24 *)inBuffer;
\r
10008 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10009 for (j=0; j<info.channels; j++) {
\r
10010 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10012 in += info.inJump;
\r
10013 out += info.outJump;
\r
10016 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10017 Int32 *in = (Int32 *)inBuffer;
\r
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10019 for (j=0; j<info.channels; j++) {
\r
10020 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10022 in += info.inJump;
\r
10023 out += info.outJump;
\r
10026 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10027 Float32 *in = (Float32 *)inBuffer;
\r
10028 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10029 for (j=0; j<info.channels; j++) {
\r
10030 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10032 in += info.inJump;
\r
10033 out += info.outJump;
\r
10036 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10037 Float64 *in = (Float64 *)inBuffer;
\r
10038 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10039 for (j=0; j<info.channels; j++) {
\r
10040 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10042 in += info.inJump;
\r
10043 out += info.outJump;
\r
10049 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10050 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10051 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10053 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10055 register char val;
\r
10056 register char *ptr;
\r
10059 if ( format == RTAUDIO_SINT16 ) {
\r
10060 for ( unsigned int i=0; i<samples; i++ ) {
\r
10061 // Swap 1st and 2nd bytes.
\r
10063 *(ptr) = *(ptr+1);
\r
10066 // Increment 2 bytes.
\r
10070 else if ( format == RTAUDIO_SINT32 ||
\r
10071 format == RTAUDIO_FLOAT32 ) {
\r
10072 for ( unsigned int i=0; i<samples; i++ ) {
\r
10073 // Swap 1st and 4th bytes.
\r
10075 *(ptr) = *(ptr+3);
\r
10078 // Swap 2nd and 3rd bytes.
\r
10081 *(ptr) = *(ptr+1);
\r
10084 // Increment 3 more bytes.
\r
10088 else if ( format == RTAUDIO_SINT24 ) {
\r
10089 for ( unsigned int i=0; i<samples; i++ ) {
\r
10090 // Swap 1st and 3rd bytes.
\r
10092 *(ptr) = *(ptr+2);
\r
10095 // Increment 2 more bytes.
\r
10099 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10100 for ( unsigned int i=0; i<samples; i++ ) {
\r
10101 // Swap 1st and 8th bytes
\r
10103 *(ptr) = *(ptr+7);
\r
10106 // Swap 2nd and 7th bytes
\r
10109 *(ptr) = *(ptr+5);
\r
10112 // Swap 3rd and 6th bytes
\r
10115 *(ptr) = *(ptr+3);
\r
10118 // Swap 4th and 5th bytes
\r
10121 *(ptr) = *(ptr+1);
\r
10124 // Increment 5 more bytes.
\r
10130 // Indentation settings for Vim and Emacs
\r
10132 // Local Variables:
\r
10133 // c-basic-offset: 2
\r
10134 // indent-tabs-mode: nil
\r
10137 // vim: et sts=2 sw=2
\r