1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
62 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
64 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
65 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
66 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
67 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
69 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
70 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
73 // *************************************************** //
\r
75 // RtAudio definitions.
\r
77 // *************************************************** //
\r
79 std::string RtAudio :: getVersion( void ) throw()
\r
81 return RTAUDIO_VERSION;
\r
84 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
88 // The order here will control the order of RtAudio's API search in
\r
90 #if defined(__UNIX_JACK__)
\r
91 apis.push_back( UNIX_JACK );
\r
93 #if defined(__LINUX_ALSA__)
\r
94 apis.push_back( LINUX_ALSA );
\r
96 #if defined(__LINUX_PULSE__)
\r
97 apis.push_back( LINUX_PULSE );
\r
99 #if defined(__LINUX_OSS__)
\r
100 apis.push_back( LINUX_OSS );
\r
102 #if defined(__WINDOWS_ASIO__)
\r
103 apis.push_back( WINDOWS_ASIO );
\r
105 #if defined(__WINDOWS_WASAPI__)
\r
106 apis.push_back( WINDOWS_WASAPI );
\r
108 #if defined(__WINDOWS_DS__)
\r
109 apis.push_back( WINDOWS_DS );
\r
111 #if defined(__MACOSX_CORE__)
\r
112 apis.push_back( MACOSX_CORE );
\r
114 #if defined(__RTAUDIO_DUMMY__)
\r
115 apis.push_back( RTAUDIO_DUMMY );
\r
119 void RtAudio :: openRtApi( RtAudio::Api api )
\r
125 #if defined(__UNIX_JACK__)
\r
126 if ( api == UNIX_JACK )
\r
127 rtapi_ = new RtApiJack();
\r
129 #if defined(__LINUX_ALSA__)
\r
130 if ( api == LINUX_ALSA )
\r
131 rtapi_ = new RtApiAlsa();
\r
133 #if defined(__LINUX_PULSE__)
\r
134 if ( api == LINUX_PULSE )
\r
135 rtapi_ = new RtApiPulse();
\r
137 #if defined(__LINUX_OSS__)
\r
138 if ( api == LINUX_OSS )
\r
139 rtapi_ = new RtApiOss();
\r
141 #if defined(__WINDOWS_ASIO__)
\r
142 if ( api == WINDOWS_ASIO )
\r
143 rtapi_ = new RtApiAsio();
\r
145 #if defined(__WINDOWS_WASAPI__)
\r
146 if ( api == WINDOWS_WASAPI )
\r
147 rtapi_ = new RtApiWasapi();
\r
149 #if defined(__WINDOWS_DS__)
\r
150 if ( api == WINDOWS_DS )
\r
151 rtapi_ = new RtApiDs();
\r
153 #if defined(__MACOSX_CORE__)
\r
154 if ( api == MACOSX_CORE )
\r
155 rtapi_ = new RtApiCore();
\r
157 #if defined(__RTAUDIO_DUMMY__)
\r
158 if ( api == RTAUDIO_DUMMY )
\r
159 rtapi_ = new RtApiDummy();
\r
163 RtAudio :: RtAudio( RtAudio::Api api )
\r
167 if ( api != UNSPECIFIED ) {
\r
168 // Attempt to open the specified API.
\r
170 if ( rtapi_ ) return;
\r
172 // No compiled support for specified API value. Issue a debug
\r
173 // warning and continue as if no API was specified.
\r
174 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
177 // Iterate through the compiled APIs and return as soon as we find
\r
178 // one with at least one device or we reach the end of the list.
\r
179 std::vector< RtAudio::Api > apis;
\r
180 getCompiledApi( apis );
\r
181 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
182 openRtApi( apis[i] );
\r
183 if ( rtapi_->getDeviceCount() ) break;
\r
186 if ( rtapi_ ) return;
\r
188 // It should not be possible to get here because the preprocessor
\r
189 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
190 // API-specific definitions are passed to the compiler. But just in
\r
191 // case something weird happens, we'll thow an error.
\r
192 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
193 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
196 RtAudio :: ~RtAudio() throw()
\r
202 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
203 RtAudio::StreamParameters *inputParameters,
\r
204 RtAudioFormat format, unsigned int sampleRate,
\r
205 unsigned int *bufferFrames,
\r
206 RtAudioCallback callback, void *userData,
\r
207 RtAudio::StreamOptions *options,
\r
208 RtAudioErrorCallback errorCallback )
\r
210 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
211 sampleRate, bufferFrames, callback,
\r
212 userData, options, errorCallback );
\r
215 // *************************************************** //
\r
217 // Public RtApi definitions (see end of file for
\r
218 // private or protected utility functions).
\r
220 // *************************************************** //
\r
224 stream_.state = STREAM_CLOSED;
\r
225 stream_.mode = UNINITIALIZED;
\r
226 stream_.apiHandle = 0;
\r
227 stream_.userBuffer[0] = 0;
\r
228 stream_.userBuffer[1] = 0;
\r
229 MUTEX_INITIALIZE( &stream_.mutex );
\r
230 showWarnings_ = true;
\r
231 firstErrorOccurred_ = false;
\r
236 MUTEX_DESTROY( &stream_.mutex );
\r
239 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
240 RtAudio::StreamParameters *iParams,
\r
241 RtAudioFormat format, unsigned int sampleRate,
\r
242 unsigned int *bufferFrames,
\r
243 RtAudioCallback callback, void *userData,
\r
244 RtAudio::StreamOptions *options,
\r
245 RtAudioErrorCallback errorCallback )
\r
247 if ( stream_.state != STREAM_CLOSED ) {
\r
248 errorText_ = "RtApi::openStream: a stream is already open!";
\r
249 error( RtAudioError::INVALID_USE );
\r
253 // Clear stream information potentially left from a previously open stream.
\r
256 if ( oParams && oParams->nChannels < 1 ) {
\r
257 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
258 error( RtAudioError::INVALID_USE );
\r
262 if ( iParams && iParams->nChannels < 1 ) {
\r
263 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 if ( oParams == NULL && iParams == NULL ) {
\r
269 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
270 error( RtAudioError::INVALID_USE );
\r
274 if ( formatBytes(format) == 0 ) {
\r
275 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
276 error( RtAudioError::INVALID_USE );
\r
280 unsigned int nDevices = getDeviceCount();
\r
281 unsigned int oChannels = 0;
\r
283 oChannels = oParams->nChannels;
\r
284 if ( oParams->deviceId >= nDevices ) {
\r
285 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
286 error( RtAudioError::INVALID_USE );
\r
291 unsigned int iChannels = 0;
\r
293 iChannels = iParams->nChannels;
\r
294 if ( iParams->deviceId >= nDevices ) {
\r
295 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
296 error( RtAudioError::INVALID_USE );
\r
303 if ( oChannels > 0 ) {
\r
305 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
306 sampleRate, format, bufferFrames, options );
\r
307 if ( result == false ) {
\r
308 error( RtAudioError::SYSTEM_ERROR );
\r
313 if ( iChannels > 0 ) {
\r
315 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
316 sampleRate, format, bufferFrames, options );
\r
317 if ( result == false ) {
\r
318 if ( oChannels > 0 ) closeStream();
\r
319 error( RtAudioError::SYSTEM_ERROR );
\r
324 stream_.callbackInfo.callback = (void *) callback;
\r
325 stream_.callbackInfo.userData = userData;
\r
326 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
328 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
329 stream_.state = STREAM_STOPPED;
\r
332 unsigned int RtApi :: getDefaultInputDevice( void )
\r
334 // Should be implemented in subclasses if possible.
\r
338 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
340 // Should be implemented in subclasses if possible.
\r
344 void RtApi :: closeStream( void )
\r
346 // MUST be implemented in subclasses!
\r
350 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
351 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
352 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
353 RtAudio::StreamOptions * /*options*/ )
\r
355 // MUST be implemented in subclasses!
\r
359 void RtApi :: tickStreamTime( void )
\r
361 // Subclasses that do not provide their own implementation of
\r
362 // getStreamTime should call this function once per buffer I/O to
\r
363 // provide basic stream time support.
\r
365 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
367 #if defined( HAVE_GETTIMEOFDAY )
\r
368 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
372 long RtApi :: getStreamLatency( void )
\r
376 long totalLatency = 0;
\r
377 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
378 totalLatency = stream_.latency[0];
\r
379 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
380 totalLatency += stream_.latency[1];
\r
382 return totalLatency;
\r
385 double RtApi :: getStreamTime( void )
\r
389 #if defined( HAVE_GETTIMEOFDAY )
\r
390 // Return a very accurate estimate of the stream time by
\r
391 // adding in the elapsed time since the last tick.
\r
392 struct timeval then;
\r
393 struct timeval now;
\r
395 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
396 return stream_.streamTime;
\r
398 gettimeofday( &now, NULL );
\r
399 then = stream_.lastTickTimestamp;
\r
400 return stream_.streamTime +
\r
401 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
402 (then.tv_sec + 0.000001 * then.tv_usec));
\r
404 return stream_.streamTime;
\r
408 void RtApi :: setStreamTime( double time )
\r
413 stream_.streamTime = time;
\r
416 unsigned int RtApi :: getStreamSampleRate( void )
\r
420 return stream_.sampleRate;
\r
424 // *************************************************** //
\r
426 // OS/API-specific methods.
\r
428 // *************************************************** //
\r
430 #if defined(__MACOSX_CORE__)
\r
432 // The OS X CoreAudio API is designed to use a separate callback
\r
433 // procedure for each of its audio devices. A single RtAudio duplex
\r
434 // stream using two different devices is supported here, though it
\r
435 // cannot be guaranteed to always behave correctly because we cannot
\r
436 // synchronize these two callbacks.
\r
438 // A property listener is installed for over/underrun information.
\r
439 // However, no functionality is currently provided to allow property
\r
440 // listeners to trigger user handlers because it is unclear what could
\r
441 // be done if a critical stream parameter (buffer size, sample rate,
\r
442 // device disconnect) notification arrived. The listeners entail
\r
443 // quite a bit of extra code and most likely, a user program wouldn't
\r
444 // be prepared for the result anyway. However, we do provide a flag
\r
445 // to the client callback function to inform of an over/underrun.
\r
447 // A structure to hold various information related to the CoreAudio API
\r
449 struct CoreHandle {
\r
450 AudioDeviceID id[2]; // device ids
\r
451 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
452 AudioDeviceIOProcID procId[2];
\r
454 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
455 UInt32 nStreams[2]; // number of streams to use
\r
457 char *deviceBuffer;
\r
458 pthread_cond_t condition;
\r
459 int drainCounter; // Tracks callback counts when draining
\r
460 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
463 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
466 RtApiCore:: RtApiCore()
\r
468 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
469 // This is a largely undocumented but absolutely necessary
\r
470 // requirement starting with OS-X 10.6. If not called, queries and
\r
471 // updates to various audio device properties are not handled
\r
473 CFRunLoopRef theRunLoop = NULL;
\r
474 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
475 kAudioObjectPropertyScopeGlobal,
\r
476 kAudioObjectPropertyElementMaster };
\r
477 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
478 if ( result != noErr ) {
\r
479 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
480 error( RtAudioError::WARNING );
\r
485 RtApiCore :: ~RtApiCore()
\r
487 // The subclass destructor gets called before the base class
\r
488 // destructor, so close an existing stream before deallocating
\r
489 // apiDeviceId memory.
\r
490 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
493 unsigned int RtApiCore :: getDeviceCount( void )
\r
495 // Find out how many audio devices there are, if any.
\r
497 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
498 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
499 if ( result != noErr ) {
\r
500 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
501 error( RtAudioError::WARNING );
\r
505 return dataSize / sizeof( AudioDeviceID );
\r
508 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
510 unsigned int nDevices = getDeviceCount();
\r
511 if ( nDevices <= 1 ) return 0;
\r
514 UInt32 dataSize = sizeof( AudioDeviceID );
\r
515 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
516 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
517 if ( result != noErr ) {
\r
518 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
519 error( RtAudioError::WARNING );
\r
523 dataSize *= nDevices;
\r
524 AudioDeviceID deviceList[ nDevices ];
\r
525 property.mSelector = kAudioHardwarePropertyDevices;
\r
526 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
527 if ( result != noErr ) {
\r
528 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
529 error( RtAudioError::WARNING );
\r
533 for ( unsigned int i=0; i<nDevices; i++ )
\r
534 if ( id == deviceList[i] ) return i;
\r
536 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
537 error( RtAudioError::WARNING );
\r
541 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
543 unsigned int nDevices = getDeviceCount();
\r
544 if ( nDevices <= 1 ) return 0;
\r
547 UInt32 dataSize = sizeof( AudioDeviceID );
\r
548 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
549 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
550 if ( result != noErr ) {
\r
551 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
552 error( RtAudioError::WARNING );
\r
556 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
557 AudioDeviceID deviceList[ nDevices ];
\r
558 property.mSelector = kAudioHardwarePropertyDevices;
\r
559 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
560 if ( result != noErr ) {
\r
561 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
562 error( RtAudioError::WARNING );
\r
566 for ( unsigned int i=0; i<nDevices; i++ )
\r
567 if ( id == deviceList[i] ) return i;
\r
569 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
570 error( RtAudioError::WARNING );
\r
574 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
576 RtAudio::DeviceInfo info;
\r
577 info.probed = false;
\r
580 unsigned int nDevices = getDeviceCount();
\r
581 if ( nDevices == 0 ) {
\r
582 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
583 error( RtAudioError::INVALID_USE );
\r
587 if ( device >= nDevices ) {
\r
588 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
589 error( RtAudioError::INVALID_USE );
\r
593 AudioDeviceID deviceList[ nDevices ];
\r
594 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
595 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
596 kAudioObjectPropertyScopeGlobal,
\r
597 kAudioObjectPropertyElementMaster };
\r
598 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
599 0, NULL, &dataSize, (void *) &deviceList );
\r
600 if ( result != noErr ) {
\r
601 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
602 error( RtAudioError::WARNING );
\r
606 AudioDeviceID id = deviceList[ device ];
\r
608 // Get the device name.
\r
610 CFStringRef cfname;
\r
611 dataSize = sizeof( CFStringRef );
\r
612 property.mSelector = kAudioObjectPropertyManufacturer;
\r
613 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
614 if ( result != noErr ) {
\r
615 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
616 errorText_ = errorStream_.str();
\r
617 error( RtAudioError::WARNING );
\r
621 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
622 int length = CFStringGetLength(cfname);
\r
623 char *mname = (char *)malloc(length * 3 + 1);
\r
624 #if defined( UNICODE ) || defined( _UNICODE )
\r
625 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
627 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
629 info.name.append( (const char *)mname, strlen(mname) );
\r
630 info.name.append( ": " );
\r
631 CFRelease( cfname );
\r
634 property.mSelector = kAudioObjectPropertyName;
\r
635 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
636 if ( result != noErr ) {
\r
637 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
638 errorText_ = errorStream_.str();
\r
639 error( RtAudioError::WARNING );
\r
643 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
644 length = CFStringGetLength(cfname);
\r
645 char *name = (char *)malloc(length * 3 + 1);
\r
646 #if defined( UNICODE ) || defined( _UNICODE )
\r
647 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
649 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
651 info.name.append( (const char *)name, strlen(name) );
\r
652 CFRelease( cfname );
\r
655 // Get the output stream "configuration".
\r
656 AudioBufferList *bufferList = nil;
\r
657 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
658 property.mScope = kAudioDevicePropertyScopeOutput;
\r
659 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
661 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
662 if ( result != noErr || dataSize == 0 ) {
\r
663 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
664 errorText_ = errorStream_.str();
\r
665 error( RtAudioError::WARNING );
\r
669 // Allocate the AudioBufferList.
\r
670 bufferList = (AudioBufferList *) malloc( dataSize );
\r
671 if ( bufferList == NULL ) {
\r
672 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
673 error( RtAudioError::WARNING );
\r
677 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 free( bufferList );
\r
680 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
681 errorText_ = errorStream_.str();
\r
682 error( RtAudioError::WARNING );
\r
686 // Get output channel information.
\r
687 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
688 for ( i=0; i<nStreams; i++ )
\r
689 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
690 free( bufferList );
\r
692 // Get the input stream "configuration".
\r
693 property.mScope = kAudioDevicePropertyScopeInput;
\r
694 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
695 if ( result != noErr || dataSize == 0 ) {
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Allocate the AudioBufferList.
\r
703 bufferList = (AudioBufferList *) malloc( dataSize );
\r
704 if ( bufferList == NULL ) {
\r
705 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
706 error( RtAudioError::WARNING );
\r
710 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
711 if (result != noErr || dataSize == 0) {
\r
712 free( bufferList );
\r
713 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
714 errorText_ = errorStream_.str();
\r
715 error( RtAudioError::WARNING );
\r
719 // Get input channel information.
\r
720 nStreams = bufferList->mNumberBuffers;
\r
721 for ( i=0; i<nStreams; i++ )
\r
722 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
723 free( bufferList );
\r
725 // If device opens for both playback and capture, we determine the channels.
\r
726 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
727 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
729 // Probe the device sample rates.
\r
730 bool isInput = false;
\r
731 if ( info.outputChannels == 0 ) isInput = true;
\r
733 // Determine the supported sample rates.
\r
734 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
735 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
736 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
737 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
738 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
739 errorText_ = errorStream_.str();
\r
740 error( RtAudioError::WARNING );
\r
744 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
745 AudioValueRange rangeList[ nRanges ];
\r
746 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
747 if ( result != kAudioHardwareNoError ) {
\r
748 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
749 errorText_ = errorStream_.str();
\r
750 error( RtAudioError::WARNING );
\r
754 // The sample rate reporting mechanism is a bit of a mystery. It
\r
755 // seems that it can either return individual rates or a range of
\r
756 // rates. I assume that if the min / max range values are the same,
\r
757 // then that represents a single supported rate and if the min / max
\r
758 // range values are different, the device supports an arbitrary
\r
759 // range of values (though there might be multiple ranges, so we'll
\r
760 // use the most conservative range).
\r
761 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
762 bool haveValueRange = false;
\r
763 info.sampleRates.clear();
\r
764 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
765 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
766 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
768 haveValueRange = true;
\r
769 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
770 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
774 if ( haveValueRange ) {
\r
775 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
776 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
777 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
781 // Sort and remove any redundant values
\r
782 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
783 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
785 if ( info.sampleRates.size() == 0 ) {
\r
786 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
787 errorText_ = errorStream_.str();
\r
788 error( RtAudioError::WARNING );
\r
792 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
793 // Thus, any other "physical" formats supported by the device are of
\r
794 // no interest to the client.
\r
795 info.nativeFormats = RTAUDIO_FLOAT32;
\r
797 if ( info.outputChannels > 0 )
\r
798 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
799 if ( info.inputChannels > 0 )
\r
800 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
802 info.probed = true;
\r
806 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
807 const AudioTimeStamp* /*inNow*/,
\r
808 const AudioBufferList* inInputData,
\r
809 const AudioTimeStamp* /*inInputTime*/,
\r
810 AudioBufferList* outOutputData,
\r
811 const AudioTimeStamp* /*inOutputTime*/,
\r
812 void* infoPointer )
\r
814 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
816 RtApiCore *object = (RtApiCore *) info->object;
\r
817 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
818 return kAudioHardwareUnspecifiedError;
\r
820 return kAudioHardwareNoError;
\r
823 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
825 const AudioObjectPropertyAddress properties[],
\r
826 void* handlePointer )
\r
828 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
829 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
830 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
831 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
832 handle->xrun[1] = true;
\r
834 handle->xrun[0] = true;
\r
838 return kAudioHardwareNoError;
\r
841 static OSStatus rateListener( AudioObjectID inDevice,
\r
842 UInt32 /*nAddresses*/,
\r
843 const AudioObjectPropertyAddress /*properties*/[],
\r
844 void* ratePointer )
\r
846 Float64 *rate = (Float64 *) ratePointer;
\r
847 UInt32 dataSize = sizeof( Float64 );
\r
848 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
849 kAudioObjectPropertyScopeGlobal,
\r
850 kAudioObjectPropertyElementMaster };
\r
851 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
852 return kAudioHardwareNoError;
\r
855 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
856 unsigned int firstChannel, unsigned int sampleRate,
\r
857 RtAudioFormat format, unsigned int *bufferSize,
\r
858 RtAudio::StreamOptions *options )
\r
861 unsigned int nDevices = getDeviceCount();
\r
862 if ( nDevices == 0 ) {
\r
863 // This should not happen because a check is made before this function is called.
\r
864 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
868 if ( device >= nDevices ) {
\r
869 // This should not happen because a check is made before this function is called.
\r
870 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
874 AudioDeviceID deviceList[ nDevices ];
\r
875 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
876 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
877 kAudioObjectPropertyScopeGlobal,
\r
878 kAudioObjectPropertyElementMaster };
\r
879 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
880 0, NULL, &dataSize, (void *) &deviceList );
\r
881 if ( result != noErr ) {
\r
882 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
886 AudioDeviceID id = deviceList[ device ];
\r
888 // Setup for stream mode.
\r
889 bool isInput = false;
\r
890 if ( mode == INPUT ) {
\r
892 property.mScope = kAudioDevicePropertyScopeInput;
\r
895 property.mScope = kAudioDevicePropertyScopeOutput;
\r
897 // Get the stream "configuration".
\r
898 AudioBufferList *bufferList = nil;
\r
900 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
901 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
902 if ( result != noErr || dataSize == 0 ) {
\r
903 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
904 errorText_ = errorStream_.str();
\r
908 // Allocate the AudioBufferList.
\r
909 bufferList = (AudioBufferList *) malloc( dataSize );
\r
910 if ( bufferList == NULL ) {
\r
911 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
915 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
916 if (result != noErr || dataSize == 0) {
\r
917 free( bufferList );
\r
918 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
919 errorText_ = errorStream_.str();
\r
923 // Search for one or more streams that contain the desired number of
\r
924 // channels. CoreAudio devices can have an arbitrary number of
\r
925 // streams and each stream can have an arbitrary number of channels.
\r
926 // For each stream, a single buffer of interleaved samples is
\r
927 // provided. RtAudio prefers the use of one stream of interleaved
\r
928 // data or multiple consecutive single-channel streams. However, we
\r
929 // now support multiple consecutive multi-channel streams of
\r
930 // interleaved data as well.
\r
931 UInt32 iStream, offsetCounter = firstChannel;
\r
932 UInt32 nStreams = bufferList->mNumberBuffers;
\r
933 bool monoMode = false;
\r
934 bool foundStream = false;
\r
936 // First check that the device supports the requested number of
\r
938 UInt32 deviceChannels = 0;
\r
939 for ( iStream=0; iStream<nStreams; iStream++ )
\r
940 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
942 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
943 free( bufferList );
\r
944 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
945 errorText_ = errorStream_.str();
\r
949 // Look for a single stream meeting our needs.
\r
950 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
951 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
952 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
953 if ( streamChannels >= channels + offsetCounter ) {
\r
954 firstStream = iStream;
\r
955 channelOffset = offsetCounter;
\r
956 foundStream = true;
\r
959 if ( streamChannels > offsetCounter ) break;
\r
960 offsetCounter -= streamChannels;
\r
963 // If we didn't find a single stream above, then we should be able
\r
964 // to meet the channel specification with multiple streams.
\r
965 if ( foundStream == false ) {
\r
967 offsetCounter = firstChannel;
\r
968 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
969 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
970 if ( streamChannels > offsetCounter ) break;
\r
971 offsetCounter -= streamChannels;
\r
974 firstStream = iStream;
\r
975 channelOffset = offsetCounter;
\r
976 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
978 if ( streamChannels > 1 ) monoMode = false;
\r
979 while ( channelCounter > 0 ) {
\r
980 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
981 if ( streamChannels > 1 ) monoMode = false;
\r
982 channelCounter -= streamChannels;
\r
987 free( bufferList );
\r
989 // Determine the buffer size.
\r
990 AudioValueRange bufferRange;
\r
991 dataSize = sizeof( AudioValueRange );
\r
992 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
993 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
995 if ( result != noErr ) {
\r
996 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
997 errorText_ = errorStream_.str();
\r
1001 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1002 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1003 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1005 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1006 // need to make this setting for the master channel.
\r
1007 UInt32 theSize = (UInt32) *bufferSize;
\r
1008 dataSize = sizeof( UInt32 );
\r
1009 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1010 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1012 if ( result != noErr ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1019 // MUST be the same in both directions!
\r
1020 *bufferSize = theSize;
\r
1021 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1022 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1023 errorText_ = errorStream_.str();
\r
1027 stream_.bufferSize = *bufferSize;
\r
1028 stream_.nBuffers = 1;
\r
1030 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1031 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1033 dataSize = sizeof( hog_pid );
\r
1034 property.mSelector = kAudioDevicePropertyHogMode;
\r
1035 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1042 if ( hog_pid != getpid() ) {
\r
1043 hog_pid = getpid();
\r
1044 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1045 if ( result != noErr ) {
\r
1046 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1047 errorText_ = errorStream_.str();
\r
1053 // Check and if necessary, change the sample rate for the device.
\r
1054 Float64 nominalRate;
\r
1055 dataSize = sizeof( Float64 );
\r
1056 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1057 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1058 if ( result != noErr ) {
\r
1059 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1060 errorText_ = errorStream_.str();
\r
1064 // Only change the sample rate if off by more than 1 Hz.
\r
1065 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1067 // Set a property listener for the sample rate change
\r
1068 Float64 reportedRate = 0.0;
\r
1069 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1070 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1071 if ( result != noErr ) {
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 nominalRate = (Float64) sampleRate;
\r
1078 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1079 if ( result != noErr ) {
\r
1080 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1082 errorText_ = errorStream_.str();
\r
1086 // Now wait until the reported nominal rate is what we just set.
\r
1087 UInt32 microCounter = 0;
\r
1088 while ( reportedRate != nominalRate ) {
\r
1089 microCounter += 5000;
\r
1090 if ( microCounter > 5000000 ) break;
\r
1094 // Remove the property listener.
\r
1095 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1097 if ( microCounter > 5000000 ) {
\r
1098 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1099 errorText_ = errorStream_.str();
\r
1104 // Now set the stream format for all streams. Also, check the
\r
1105 // physical format of the device and change that if necessary.
\r
1106 AudioStreamBasicDescription description;
\r
1107 dataSize = sizeof( AudioStreamBasicDescription );
\r
1108 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1109 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1110 if ( result != noErr ) {
\r
1111 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1112 errorText_ = errorStream_.str();
\r
1116 // Set the sample rate and data format id. However, only make the
\r
1117 // change if the sample rate is not within 1.0 of the desired
\r
1118 // rate and the format is not linear pcm.
\r
1119 bool updateFormat = false;
\r
1120 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1121 description.mSampleRate = (Float64) sampleRate;
\r
1122 updateFormat = true;
\r
1125 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1126 description.mFormatID = kAudioFormatLinearPCM;
\r
1127 updateFormat = true;
\r
1130 if ( updateFormat ) {
\r
1131 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1132 if ( result != noErr ) {
\r
1133 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1134 errorText_ = errorStream_.str();
\r
1139 // Now check the physical format.
\r
1140 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1141 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1142 if ( result != noErr ) {
\r
1143 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1144 errorText_ = errorStream_.str();
\r
1148 //std::cout << "Current physical stream format:" << std::endl;
\r
1149 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1150 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1151 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1152 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1154 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1155 description.mFormatID = kAudioFormatLinearPCM;
\r
1156 //description.mSampleRate = (Float64) sampleRate;
\r
1157 AudioStreamBasicDescription testDescription = description;
\r
1158 UInt32 formatFlags;
\r
1160 // We'll try higher bit rates first and then work our way down.
\r
1161 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1164 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1165 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1166 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1167 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1168 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1169 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1170 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1171 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1172 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1173 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1175 bool setPhysicalFormat = false;
\r
1176 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1177 testDescription = description;
\r
1178 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1179 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1180 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1181 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1183 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1184 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1185 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1186 if ( result == noErr ) {
\r
1187 setPhysicalFormat = true;
\r
1188 //std::cout << "Updated physical stream format:" << std::endl;
\r
1189 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1190 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1191 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1192 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1197 if ( !setPhysicalFormat ) {
\r
1198 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1199 errorText_ = errorStream_.str();
\r
1202 } // done setting virtual/physical formats.
\r
1204 // Get the stream / device latency.
\r
1206 dataSize = sizeof( UInt32 );
\r
1207 property.mSelector = kAudioDevicePropertyLatency;
\r
1208 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1209 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1210 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1212 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1213 errorText_ = errorStream_.str();
\r
1214 error( RtAudioError::WARNING );
\r
1218 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1219 // always be presented in native-endian format, so we should never
\r
1220 // need to byte swap.
\r
1221 stream_.doByteSwap[mode] = false;
\r
1223 // From the CoreAudio documentation, PCM data must be supplied as
\r
1225 stream_.userFormat = format;
\r
1226 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1228 if ( streamCount == 1 )
\r
1229 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1230 else // multiple streams
\r
1231 stream_.nDeviceChannels[mode] = channels;
\r
1232 stream_.nUserChannels[mode] = channels;
\r
1233 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1234 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1235 else stream_.userInterleaved = true;
\r
1236 stream_.deviceInterleaved[mode] = true;
\r
1237 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1239 // Set flags for buffer conversion.
\r
1240 stream_.doConvertBuffer[mode] = false;
\r
1241 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1243 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1244 stream_.doConvertBuffer[mode] = true;
\r
1245 if ( streamCount == 1 ) {
\r
1246 if ( stream_.nUserChannels[mode] > 1 &&
\r
1247 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1248 stream_.doConvertBuffer[mode] = true;
\r
1250 else if ( monoMode && stream_.userInterleaved )
\r
1251 stream_.doConvertBuffer[mode] = true;
\r
1253 // Allocate our CoreHandle structure for the stream.
\r
1254 CoreHandle *handle = 0;
\r
1255 if ( stream_.apiHandle == 0 ) {
\r
1257 handle = new CoreHandle;
\r
1259 catch ( std::bad_alloc& ) {
\r
1260 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1264 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1265 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1268 stream_.apiHandle = (void *) handle;
\r
1271 handle = (CoreHandle *) stream_.apiHandle;
\r
1272 handle->iStream[mode] = firstStream;
\r
1273 handle->nStreams[mode] = streamCount;
\r
1274 handle->id[mode] = id;
\r
1276 // Allocate necessary internal buffers.
\r
1277 unsigned long bufferBytes;
\r
1278 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1279 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1280 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1281 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1282 if ( stream_.userBuffer[mode] == NULL ) {
\r
1283 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1287 // If possible, we will make use of the CoreAudio stream buffers as
\r
1288 // "device buffers". However, we can't do this if using multiple
\r
1290 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1292 bool makeBuffer = true;
\r
1293 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1294 if ( mode == INPUT ) {
\r
1295 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1296 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1297 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1301 if ( makeBuffer ) {
\r
1302 bufferBytes *= *bufferSize;
\r
1303 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1304 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1305 if ( stream_.deviceBuffer == NULL ) {
\r
1306 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1312 stream_.sampleRate = sampleRate;
\r
1313 stream_.device[mode] = device;
\r
1314 stream_.state = STREAM_STOPPED;
\r
1315 stream_.callbackInfo.object = (void *) this;
\r
1317 // Setup the buffer conversion information structure.
\r
1318 if ( stream_.doConvertBuffer[mode] ) {
\r
1319 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1320 else setConvertInfo( mode, channelOffset );
\r
1323 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1324 // Only one callback procedure per device.
\r
1325 stream_.mode = DUPLEX;
\r
1327 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1328 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1330 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1331 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1333 if ( result != noErr ) {
\r
1334 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1335 errorText_ = errorStream_.str();
\r
1338 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1339 stream_.mode = DUPLEX;
\r
1341 stream_.mode = mode;
\r
1344 // Setup the device property listener for over/underload.
\r
1345 property.mSelector = kAudioDeviceProcessorOverload;
\r
1346 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1347 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1353 pthread_cond_destroy( &handle->condition );
\r
1355 stream_.apiHandle = 0;
\r
1358 for ( int i=0; i<2; i++ ) {
\r
1359 if ( stream_.userBuffer[i] ) {
\r
1360 free( stream_.userBuffer[i] );
\r
1361 stream_.userBuffer[i] = 0;
\r
1365 if ( stream_.deviceBuffer ) {
\r
1366 free( stream_.deviceBuffer );
\r
1367 stream_.deviceBuffer = 0;
\r
1370 stream_.state = STREAM_CLOSED;
\r
1374 void RtApiCore :: closeStream( void )
\r
1376 if ( stream_.state == STREAM_CLOSED ) {
\r
1377 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1378 error( RtAudioError::WARNING );
\r
1382 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1383 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1384 if ( stream_.state == STREAM_RUNNING )
\r
1385 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1386 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1387 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1389 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1390 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1394 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1395 if ( stream_.state == STREAM_RUNNING )
\r
1396 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1397 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1398 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1400 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1401 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1405 for ( int i=0; i<2; i++ ) {
\r
1406 if ( stream_.userBuffer[i] ) {
\r
1407 free( stream_.userBuffer[i] );
\r
1408 stream_.userBuffer[i] = 0;
\r
1412 if ( stream_.deviceBuffer ) {
\r
1413 free( stream_.deviceBuffer );
\r
1414 stream_.deviceBuffer = 0;
\r
1417 // Destroy pthread condition variable.
\r
1418 pthread_cond_destroy( &handle->condition );
\r
1420 stream_.apiHandle = 0;
\r
1422 stream_.mode = UNINITIALIZED;
\r
1423 stream_.state = STREAM_CLOSED;
\r
1426 void RtApiCore :: startStream( void )
\r
1429 if ( stream_.state == STREAM_RUNNING ) {
\r
1430 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1431 error( RtAudioError::WARNING );
\r
1435 OSStatus result = noErr;
\r
1436 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1437 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1439 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1440 if ( result != noErr ) {
\r
1441 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1442 errorText_ = errorStream_.str();
\r
1447 if ( stream_.mode == INPUT ||
\r
1448 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1450 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1451 if ( result != noErr ) {
\r
1452 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1453 errorText_ = errorStream_.str();
\r
1458 handle->drainCounter = 0;
\r
1459 handle->internalDrain = false;
\r
1460 stream_.state = STREAM_RUNNING;
\r
1463 if ( result == noErr ) return;
\r
1464 error( RtAudioError::SYSTEM_ERROR );
\r
1467 void RtApiCore :: stopStream( void )
\r
1470 if ( stream_.state == STREAM_STOPPED ) {
\r
1471 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1472 error( RtAudioError::WARNING );
\r
1476 OSStatus result = noErr;
\r
1477 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1478 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1480 if ( handle->drainCounter == 0 ) {
\r
1481 handle->drainCounter = 2;
\r
1482 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1485 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1486 if ( result != noErr ) {
\r
1487 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1488 errorText_ = errorStream_.str();
\r
1493 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1495 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1496 if ( result != noErr ) {
\r
1497 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1498 errorText_ = errorStream_.str();
\r
1503 stream_.state = STREAM_STOPPED;
\r
1506 if ( result == noErr ) return;
\r
1507 error( RtAudioError::SYSTEM_ERROR );
\r
1510 void RtApiCore :: abortStream( void )
\r
1513 if ( stream_.state == STREAM_STOPPED ) {
\r
1514 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1515 error( RtAudioError::WARNING );
\r
1519 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1520 handle->drainCounter = 2;
\r
1525 // This function will be called by a spawned thread when the user
\r
1526 // callback function signals that the stream should be stopped or
\r
1527 // aborted. It is better to handle it this way because the
\r
1528 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1529 // function is called.
\r
1530 static void *coreStopStream( void *ptr )
\r
1532 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1533 RtApiCore *object = (RtApiCore *) info->object;
\r
1535 object->stopStream();
\r
1536 pthread_exit( NULL );
\r
1539 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1540 const AudioBufferList *inBufferList,
\r
1541 const AudioBufferList *outBufferList )
\r
1543 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1544 if ( stream_.state == STREAM_CLOSED ) {
\r
1545 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1546 error( RtAudioError::WARNING );
\r
1550 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1551 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1553 // Check if we were draining the stream and signal is finished.
\r
1554 if ( handle->drainCounter > 3 ) {
\r
1555 ThreadHandle threadId;
\r
1557 stream_.state = STREAM_STOPPING;
\r
1558 if ( handle->internalDrain == true )
\r
1559 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1560 else // external call to stopStream()
\r
1561 pthread_cond_signal( &handle->condition );
\r
1565 AudioDeviceID outputDevice = handle->id[0];
\r
1567 // Invoke user callback to get fresh output data UNLESS we are
\r
1568 // draining stream or duplex mode AND the input/output devices are
\r
1569 // different AND this function is called for the input device.
\r
1570 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1571 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1572 double streamTime = getStreamTime();
\r
1573 RtAudioStreamStatus status = 0;
\r
1574 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1575 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1576 handle->xrun[0] = false;
\r
1578 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1579 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1580 handle->xrun[1] = false;
\r
1583 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1584 stream_.bufferSize, streamTime, status, info->userData );
\r
1585 if ( cbReturnValue == 2 ) {
\r
1586 stream_.state = STREAM_STOPPING;
\r
1587 handle->drainCounter = 2;
\r
1591 else if ( cbReturnValue == 1 ) {
\r
1592 handle->drainCounter = 1;
\r
1593 handle->internalDrain = true;
\r
1597 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1599 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1601 if ( handle->nStreams[0] == 1 ) {
\r
1602 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1604 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1606 else { // fill multiple streams with zeros
\r
1607 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1608 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1610 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1614 else if ( handle->nStreams[0] == 1 ) {
\r
1615 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1616 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1617 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1619 else { // copy from user buffer
\r
1620 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1621 stream_.userBuffer[0],
\r
1622 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1625 else { // fill multiple streams
\r
1626 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1627 if ( stream_.doConvertBuffer[0] ) {
\r
1628 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1629 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1632 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1633 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1634 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1635 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1636 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1639 else { // fill multiple multi-channel streams with interleaved data
\r
1640 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1641 Float32 *out, *in;
\r
1643 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1644 UInt32 inChannels = stream_.nUserChannels[0];
\r
1645 if ( stream_.doConvertBuffer[0] ) {
\r
1646 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1647 inChannels = stream_.nDeviceChannels[0];
\r
1650 if ( inInterleaved ) inOffset = 1;
\r
1651 else inOffset = stream_.bufferSize;
\r
1653 channelsLeft = inChannels;
\r
1654 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1656 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1657 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1660 // Account for possible channel offset in first stream
\r
1661 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1662 streamChannels -= stream_.channelOffset[0];
\r
1663 outJump = stream_.channelOffset[0];
\r
1667 // Account for possible unfilled channels at end of the last stream
\r
1668 if ( streamChannels > channelsLeft ) {
\r
1669 outJump = streamChannels - channelsLeft;
\r
1670 streamChannels = channelsLeft;
\r
1673 // Determine input buffer offsets and skips
\r
1674 if ( inInterleaved ) {
\r
1675 inJump = inChannels;
\r
1676 in += inChannels - channelsLeft;
\r
1680 in += (inChannels - channelsLeft) * inOffset;
\r
1683 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1684 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1685 *out++ = in[j*inOffset];
\r
1690 channelsLeft -= streamChannels;
\r
1696 // Don't bother draining input
\r
1697 if ( handle->drainCounter ) {
\r
1698 handle->drainCounter++;
\r
1702 AudioDeviceID inputDevice;
\r
1703 inputDevice = handle->id[1];
\r
1704 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1706 if ( handle->nStreams[1] == 1 ) {
\r
1707 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1708 convertBuffer( stream_.userBuffer[1],
\r
1709 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1710 stream_.convertInfo[1] );
\r
1712 else { // copy to user buffer
\r
1713 memcpy( stream_.userBuffer[1],
\r
1714 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1715 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1718 else { // read from multiple streams
\r
1719 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1720 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1722 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1723 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1724 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1725 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1726 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1729 else { // read from multiple multi-channel streams
\r
1730 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1731 Float32 *out, *in;
\r
1733 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1734 UInt32 outChannels = stream_.nUserChannels[1];
\r
1735 if ( stream_.doConvertBuffer[1] ) {
\r
1736 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1737 outChannels = stream_.nDeviceChannels[1];
\r
1740 if ( outInterleaved ) outOffset = 1;
\r
1741 else outOffset = stream_.bufferSize;
\r
1743 channelsLeft = outChannels;
\r
1744 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1746 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1747 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1750 // Account for possible channel offset in first stream
\r
1751 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1752 streamChannels -= stream_.channelOffset[1];
\r
1753 inJump = stream_.channelOffset[1];
\r
1757 // Account for possible unread channels at end of the last stream
\r
1758 if ( streamChannels > channelsLeft ) {
\r
1759 inJump = streamChannels - channelsLeft;
\r
1760 streamChannels = channelsLeft;
\r
1763 // Determine output buffer offsets and skips
\r
1764 if ( outInterleaved ) {
\r
1765 outJump = outChannels;
\r
1766 out += outChannels - channelsLeft;
\r
1770 out += (outChannels - channelsLeft) * outOffset;
\r
1773 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1774 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1775 out[j*outOffset] = *in++;
\r
1780 channelsLeft -= streamChannels;
\r
1784 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1785 convertBuffer( stream_.userBuffer[1],
\r
1786 stream_.deviceBuffer,
\r
1787 stream_.convertInfo[1] );
\r
1793 //MUTEX_UNLOCK( &stream_.mutex );
\r
1795 RtApi::tickStreamTime();
\r
1799 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1803 case kAudioHardwareNotRunningError:
\r
1804 return "kAudioHardwareNotRunningError";
\r
1806 case kAudioHardwareUnspecifiedError:
\r
1807 return "kAudioHardwareUnspecifiedError";
\r
1809 case kAudioHardwareUnknownPropertyError:
\r
1810 return "kAudioHardwareUnknownPropertyError";
\r
1812 case kAudioHardwareBadPropertySizeError:
\r
1813 return "kAudioHardwareBadPropertySizeError";
\r
1815 case kAudioHardwareIllegalOperationError:
\r
1816 return "kAudioHardwareIllegalOperationError";
\r
1818 case kAudioHardwareBadObjectError:
\r
1819 return "kAudioHardwareBadObjectError";
\r
1821 case kAudioHardwareBadDeviceError:
\r
1822 return "kAudioHardwareBadDeviceError";
\r
1824 case kAudioHardwareBadStreamError:
\r
1825 return "kAudioHardwareBadStreamError";
\r
1827 case kAudioHardwareUnsupportedOperationError:
\r
1828 return "kAudioHardwareUnsupportedOperationError";
\r
1830 case kAudioDeviceUnsupportedFormatError:
\r
1831 return "kAudioDeviceUnsupportedFormatError";
\r
1833 case kAudioDevicePermissionsError:
\r
1834 return "kAudioDevicePermissionsError";
\r
1837 return "CoreAudio unknown error";
\r
1841 //******************** End of __MACOSX_CORE__ *********************//
\r
1844 #if defined(__UNIX_JACK__)
\r
1846 // JACK is a low-latency audio server, originally written for the
\r
1847 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1848 // connect a number of different applications to an audio device, as
\r
1849 // well as allowing them to share audio between themselves.
\r
1851 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1852 // have ports connected to the server. The JACK server is typically
\r
1853 // started in a terminal as follows:
\r
1855 // .jackd -d alsa -d hw:0
\r
1857 // or through an interface program such as qjackctl. Many of the
\r
1858 // parameters normally set for a stream are fixed by the JACK server
\r
1859 // and can be specified when the JACK server is started. In
\r
1862 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1864 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1865 // frames, and number of buffers = 4. Once the server is running, it
\r
1866 // is not possible to override these values. If the values are not
\r
1867 // specified in the command-line, the JACK server uses default values.
\r
1869 // The JACK server does not have to be running when an instance of
\r
1870 // RtApiJack is created, though the function getDeviceCount() will
\r
1871 // report 0 devices found until JACK has been started. When no
\r
1872 // devices are available (i.e., the JACK server is not running), a
\r
1873 // stream cannot be opened.
\r
1875 #include <jack/jack.h>
\r
1876 #include <unistd.h>
\r
1879 // A structure to hold various information related to the Jack API
\r
1880 // implementation.
\r
1881 struct JackHandle {
\r
1882 jack_client_t *client;
\r
1883 jack_port_t **ports[2];
\r
1884 std::string deviceName[2];
\r
1886 pthread_cond_t condition;
\r
1887 int drainCounter; // Tracks callback counts when draining
\r
1888 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1891 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1894 static void jackSilentError( const char * ) {};
\r
1896 RtApiJack :: RtApiJack()
\r
1898 // Nothing to do here.
\r
1899 #if !defined(__RTAUDIO_DEBUG__)
\r
1900 // Turn off Jack's internal error reporting.
\r
1901 jack_set_error_function( &jackSilentError );
\r
1905 RtApiJack :: ~RtApiJack()
\r
1907 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1910 unsigned int RtApiJack :: getDeviceCount( void )
\r
1912 // See if we can become a jack client.
\r
1913 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1914 jack_status_t *status = NULL;
\r
1915 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1916 if ( client == 0 ) return 0;
\r
1918 const char **ports;
\r
1919 std::string port, previousPort;
\r
1920 unsigned int nChannels = 0, nDevices = 0;
\r
1921 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1923 // Parse the port names up to the first colon (:).
\r
1924 size_t iColon = 0;
\r
1926 port = (char *) ports[ nChannels ];
\r
1927 iColon = port.find(":");
\r
1928 if ( iColon != std::string::npos ) {
\r
1929 port = port.substr( 0, iColon + 1 );
\r
1930 if ( port != previousPort ) {
\r
1932 previousPort = port;
\r
1935 } while ( ports[++nChannels] );
\r
1939 jack_client_close( client );
\r
1943 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1945 RtAudio::DeviceInfo info;
\r
1946 info.probed = false;
\r
1948 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1949 jack_status_t *status = NULL;
\r
1950 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1951 if ( client == 0 ) {
\r
1952 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1953 error( RtAudioError::WARNING );
\r
1957 const char **ports;
\r
1958 std::string port, previousPort;
\r
1959 unsigned int nPorts = 0, nDevices = 0;
\r
1960 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1962 // Parse the port names up to the first colon (:).
\r
1963 size_t iColon = 0;
\r
1965 port = (char *) ports[ nPorts ];
\r
1966 iColon = port.find(":");
\r
1967 if ( iColon != std::string::npos ) {
\r
1968 port = port.substr( 0, iColon );
\r
1969 if ( port != previousPort ) {
\r
1970 if ( nDevices == device ) info.name = port;
\r
1972 previousPort = port;
\r
1975 } while ( ports[++nPorts] );
\r
1979 if ( device >= nDevices ) {
\r
1980 jack_client_close( client );
\r
1981 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1982 error( RtAudioError::INVALID_USE );
\r
1986 // Get the current jack server sample rate.
\r
1987 info.sampleRates.clear();
\r
1988 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1990 // Count the available ports containing the client name as device
\r
1991 // channels. Jack "input ports" equal RtAudio output channels.
\r
1992 unsigned int nChannels = 0;
\r
1993 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1995 while ( ports[ nChannels ] ) nChannels++;
\r
1997 info.outputChannels = nChannels;
\r
2000 // Jack "output ports" equal RtAudio input channels.
\r
2002 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2004 while ( ports[ nChannels ] ) nChannels++;
\r
2006 info.inputChannels = nChannels;
\r
2009 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2010 jack_client_close(client);
\r
2011 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2012 error( RtAudioError::WARNING );
\r
2016 // If device opens for both playback and capture, we determine the channels.
\r
2017 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2018 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2020 // Jack always uses 32-bit floats.
\r
2021 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2023 // Jack doesn't provide default devices so we'll use the first available one.
\r
2024 if ( device == 0 && info.outputChannels > 0 )
\r
2025 info.isDefaultOutput = true;
\r
2026 if ( device == 0 && info.inputChannels > 0 )
\r
2027 info.isDefaultInput = true;
\r
2029 jack_client_close(client);
\r
2030 info.probed = true;
\r
2034 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2036 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2038 RtApiJack *object = (RtApiJack *) info->object;
\r
2039 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2044 // This function will be called by a spawned thread when the Jack
\r
2045 // server signals that it is shutting down. It is necessary to handle
\r
2046 // it this way because the jackShutdown() function must return before
\r
2047 // the jack_deactivate() function (in closeStream()) will return.
\r
2048 static void *jackCloseStream( void *ptr )
\r
2050 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2051 RtApiJack *object = (RtApiJack *) info->object;
\r
2053 object->closeStream();
\r
2055 pthread_exit( NULL );
\r
2057 static void jackShutdown( void *infoPointer )
\r
2059 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2060 RtApiJack *object = (RtApiJack *) info->object;
\r
2062 // Check current stream state. If stopped, then we'll assume this
\r
2063 // was called as a result of a call to RtApiJack::stopStream (the
\r
2064 // deactivation of a client handle causes this function to be called).
\r
2065 // If not, we'll assume the Jack server is shutting down or some
\r
2066 // other problem occurred and we should close the stream.
\r
2067 if ( object->isStreamRunning() == false ) return;
\r
2069 ThreadHandle threadId;
\r
2070 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2071 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2074 static int jackXrun( void *infoPointer )
\r
2076 JackHandle *handle = (JackHandle *) infoPointer;
\r
2078 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2079 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2084 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2085 unsigned int firstChannel, unsigned int sampleRate,
\r
2086 RtAudioFormat format, unsigned int *bufferSize,
\r
2087 RtAudio::StreamOptions *options )
\r
2089 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2091 // Look for jack server and try to become a client (only do once per stream).
\r
2092 jack_client_t *client = 0;
\r
2093 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2094 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2095 jack_status_t *status = NULL;
\r
2096 if ( options && !options->streamName.empty() )
\r
2097 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2099 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2100 if ( client == 0 ) {
\r
2101 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2102 error( RtAudioError::WARNING );
\r
2107 // The handle must have been created on an earlier pass.
\r
2108 client = handle->client;
\r
2111 const char **ports;
\r
2112 std::string port, previousPort, deviceName;
\r
2113 unsigned int nPorts = 0, nDevices = 0;
\r
2114 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2116 // Parse the port names up to the first colon (:).
\r
2117 size_t iColon = 0;
\r
2119 port = (char *) ports[ nPorts ];
\r
2120 iColon = port.find(":");
\r
2121 if ( iColon != std::string::npos ) {
\r
2122 port = port.substr( 0, iColon );
\r
2123 if ( port != previousPort ) {
\r
2124 if ( nDevices == device ) deviceName = port;
\r
2126 previousPort = port;
\r
2129 } while ( ports[++nPorts] );
\r
2133 if ( device >= nDevices ) {
\r
2134 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2138 // Count the available ports containing the client name as device
\r
2139 // channels. Jack "input ports" equal RtAudio output channels.
\r
2140 unsigned int nChannels = 0;
\r
2141 unsigned long flag = JackPortIsInput;
\r
2142 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2143 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2145 while ( ports[ nChannels ] ) nChannels++;
\r
2149 // Compare the jack ports for specified client to the requested number of channels.
\r
2150 if ( nChannels < (channels + firstChannel) ) {
\r
2151 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2152 errorText_ = errorStream_.str();
\r
2156 // Check the jack server sample rate.
\r
2157 unsigned int jackRate = jack_get_sample_rate( client );
\r
2158 if ( sampleRate != jackRate ) {
\r
2159 jack_client_close( client );
\r
2160 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2161 errorText_ = errorStream_.str();
\r
2164 stream_.sampleRate = jackRate;
\r
2166 // Get the latency of the JACK port.
\r
2167 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2168 if ( ports[ firstChannel ] ) {
\r
2169 // Added by Ge Wang
\r
2170 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2171 // the range (usually the min and max are equal)
\r
2172 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2173 // get the latency range
\r
2174 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2175 // be optimistic, use the min!
\r
2176 stream_.latency[mode] = latrange.min;
\r
2177 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2181 // The jack server always uses 32-bit floating-point data.
\r
2182 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2183 stream_.userFormat = format;
\r
2185 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2186 else stream_.userInterleaved = true;
\r
2188 // Jack always uses non-interleaved buffers.
\r
2189 stream_.deviceInterleaved[mode] = false;
\r
2191 // Jack always provides host byte-ordered data.
\r
2192 stream_.doByteSwap[mode] = false;
\r
2194 // Get the buffer size. The buffer size and number of buffers
\r
2195 // (periods) is set when the jack server is started.
\r
2196 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2197 *bufferSize = stream_.bufferSize;
\r
2199 stream_.nDeviceChannels[mode] = channels;
\r
2200 stream_.nUserChannels[mode] = channels;
\r
2202 // Set flags for buffer conversion.
\r
2203 stream_.doConvertBuffer[mode] = false;
\r
2204 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2205 stream_.doConvertBuffer[mode] = true;
\r
2206 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2207 stream_.nUserChannels[mode] > 1 )
\r
2208 stream_.doConvertBuffer[mode] = true;
\r
2210 // Allocate our JackHandle structure for the stream.
\r
2211 if ( handle == 0 ) {
\r
2213 handle = new JackHandle;
\r
2215 catch ( std::bad_alloc& ) {
\r
2216 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2220 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2221 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2224 stream_.apiHandle = (void *) handle;
\r
2225 handle->client = client;
\r
2227 handle->deviceName[mode] = deviceName;
\r
2229 // Allocate necessary internal buffers.
\r
2230 unsigned long bufferBytes;
\r
2231 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2232 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2233 if ( stream_.userBuffer[mode] == NULL ) {
\r
2234 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2238 if ( stream_.doConvertBuffer[mode] ) {
\r
2240 bool makeBuffer = true;
\r
2241 if ( mode == OUTPUT )
\r
2242 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2243 else { // mode == INPUT
\r
2244 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2245 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2246 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2247 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2251 if ( makeBuffer ) {
\r
2252 bufferBytes *= *bufferSize;
\r
2253 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2254 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2255 if ( stream_.deviceBuffer == NULL ) {
\r
2256 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2262 // Allocate memory for the Jack ports (channels) identifiers.
\r
2263 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2264 if ( handle->ports[mode] == NULL ) {
\r
2265 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2269 stream_.device[mode] = device;
\r
2270 stream_.channelOffset[mode] = firstChannel;
\r
2271 stream_.state = STREAM_STOPPED;
\r
2272 stream_.callbackInfo.object = (void *) this;
\r
2274 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2275 // We had already set up the stream for output.
\r
2276 stream_.mode = DUPLEX;
\r
2278 stream_.mode = mode;
\r
2279 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2280 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2281 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2284 // Register our ports.
\r
2286 if ( mode == OUTPUT ) {
\r
2287 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2288 snprintf( label, 64, "outport %d", i );
\r
2289 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2290 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2294 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2295 snprintf( label, 64, "inport %d", i );
\r
2296 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2297 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2301 // Setup the buffer conversion information structure. We don't use
\r
2302 // buffers to do channel offsets, so we override that parameter
\r
2304 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2310 pthread_cond_destroy( &handle->condition );
\r
2311 jack_client_close( handle->client );
\r
2313 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2314 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2317 stream_.apiHandle = 0;
\r
2320 for ( int i=0; i<2; i++ ) {
\r
2321 if ( stream_.userBuffer[i] ) {
\r
2322 free( stream_.userBuffer[i] );
\r
2323 stream_.userBuffer[i] = 0;
\r
2327 if ( stream_.deviceBuffer ) {
\r
2328 free( stream_.deviceBuffer );
\r
2329 stream_.deviceBuffer = 0;
\r
2335 void RtApiJack :: closeStream( void )
\r
2337 if ( stream_.state == STREAM_CLOSED ) {
\r
2338 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2339 error( RtAudioError::WARNING );
\r
2343 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2346 if ( stream_.state == STREAM_RUNNING )
\r
2347 jack_deactivate( handle->client );
\r
2349 jack_client_close( handle->client );
\r
2353 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2354 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2355 pthread_cond_destroy( &handle->condition );
\r
2357 stream_.apiHandle = 0;
\r
2360 for ( int i=0; i<2; i++ ) {
\r
2361 if ( stream_.userBuffer[i] ) {
\r
2362 free( stream_.userBuffer[i] );
\r
2363 stream_.userBuffer[i] = 0;
\r
2367 if ( stream_.deviceBuffer ) {
\r
2368 free( stream_.deviceBuffer );
\r
2369 stream_.deviceBuffer = 0;
\r
2372 stream_.mode = UNINITIALIZED;
\r
2373 stream_.state = STREAM_CLOSED;
\r
2376 void RtApiJack :: startStream( void )
\r
2379 if ( stream_.state == STREAM_RUNNING ) {
\r
2380 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2381 error( RtAudioError::WARNING );
\r
2385 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2386 int result = jack_activate( handle->client );
\r
2388 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2392 const char **ports;
\r
2394 // Get the list of available ports.
\r
2395 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2397 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2398 if ( ports == NULL) {
\r
2399 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2403 // Now make the port connections. Since RtAudio wasn't designed to
\r
2404 // allow the user to select particular channels of a device, we'll
\r
2405 // just open the first "nChannels" ports with offset.
\r
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2408 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2409 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2412 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2419 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2421 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2422 if ( ports == NULL) {
\r
2423 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2427 // Now make the port connections. See note above.
\r
2428 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2430 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2431 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2434 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2441 handle->drainCounter = 0;
\r
2442 handle->internalDrain = false;
\r
2443 stream_.state = STREAM_RUNNING;
\r
2446 if ( result == 0 ) return;
\r
2447 error( RtAudioError::SYSTEM_ERROR );
\r
2450 void RtApiJack :: stopStream( void )
\r
2453 if ( stream_.state == STREAM_STOPPED ) {
\r
2454 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2455 error( RtAudioError::WARNING );
\r
2459 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2460 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2462 if ( handle->drainCounter == 0 ) {
\r
2463 handle->drainCounter = 2;
\r
2464 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2468 jack_deactivate( handle->client );
\r
2469 stream_.state = STREAM_STOPPED;
\r
2472 void RtApiJack :: abortStream( void )
\r
2475 if ( stream_.state == STREAM_STOPPED ) {
\r
2476 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2477 error( RtAudioError::WARNING );
\r
2481 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2482 handle->drainCounter = 2;
\r
2487 // This function will be called by a spawned thread when the user
\r
2488 // callback function signals that the stream should be stopped or
\r
2489 // aborted. It is necessary to handle it this way because the
\r
2490 // callbackEvent() function must return before the jack_deactivate()
\r
2491 // function will return.
\r
2492 static void *jackStopStream( void *ptr )
\r
2494 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2495 RtApiJack *object = (RtApiJack *) info->object;
\r
2497 object->stopStream();
\r
2498 pthread_exit( NULL );
\r
2501 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2503 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2504 if ( stream_.state == STREAM_CLOSED ) {
\r
2505 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2506 error( RtAudioError::WARNING );
\r
2509 if ( stream_.bufferSize != nframes ) {
\r
2510 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2511 error( RtAudioError::WARNING );
\r
2515 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2516 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2518 // Check if we were draining the stream and signal is finished.
\r
2519 if ( handle->drainCounter > 3 ) {
\r
2520 ThreadHandle threadId;
\r
2522 stream_.state = STREAM_STOPPING;
\r
2523 if ( handle->internalDrain == true )
\r
2524 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2526 pthread_cond_signal( &handle->condition );
\r
2530 // Invoke user callback first, to get fresh output data.
\r
2531 if ( handle->drainCounter == 0 ) {
\r
2532 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2533 double streamTime = getStreamTime();
\r
2534 RtAudioStreamStatus status = 0;
\r
2535 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2536 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2537 handle->xrun[0] = false;
\r
2539 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2540 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2541 handle->xrun[1] = false;
\r
2543 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2544 stream_.bufferSize, streamTime, status, info->userData );
\r
2545 if ( cbReturnValue == 2 ) {
\r
2546 stream_.state = STREAM_STOPPING;
\r
2547 handle->drainCounter = 2;
\r
2549 pthread_create( &id, NULL, jackStopStream, info );
\r
2552 else if ( cbReturnValue == 1 ) {
\r
2553 handle->drainCounter = 1;
\r
2554 handle->internalDrain = true;
\r
2558 jack_default_audio_sample_t *jackbuffer;
\r
2559 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2560 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2562 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2564 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2565 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2566 memset( jackbuffer, 0, bufferBytes );
\r
2570 else if ( stream_.doConvertBuffer[0] ) {
\r
2572 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2574 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2575 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2576 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2579 else { // no buffer conversion
\r
2580 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2581 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2582 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2587 // Don't bother draining input
\r
2588 if ( handle->drainCounter ) {
\r
2589 handle->drainCounter++;
\r
2593 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2595 if ( stream_.doConvertBuffer[1] ) {
\r
2596 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2597 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2598 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2600 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2602 else { // no buffer conversion
\r
2603 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2604 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2605 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2611 RtApi::tickStreamTime();
\r
2614 //******************** End of __UNIX_JACK__ *********************//
\r
2617 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2619 // The ASIO API is designed around a callback scheme, so this
\r
2620 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2621 // Jack. The primary constraint with ASIO is that it only allows
\r
2622 // access to a single driver at a time. Thus, it is not possible to
\r
2623 // have more than one simultaneous RtAudio stream.
\r
2625 // This implementation also requires a number of external ASIO files
\r
2626 // and a few global variables. The ASIO callback scheme does not
\r
2627 // allow for the passing of user data, so we must create a global
\r
2628 // pointer to our callbackInfo structure.
\r
2630 // On unix systems, we make use of a pthread condition variable.
\r
2631 // Since there is no equivalent in Windows, I hacked something based
\r
2632 // on information found in
\r
2633 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2635 #include "asiosys.h"
\r
2637 #include "iasiothiscallresolver.h"
\r
2638 #include "asiodrivers.h"
\r
2641 static AsioDrivers drivers;
\r
2642 static ASIOCallbacks asioCallbacks;
\r
2643 static ASIODriverInfo driverInfo;
\r
2644 static CallbackInfo *asioCallbackInfo;
\r
2645 static bool asioXRun;
\r
2647 struct AsioHandle {
\r
2648 int drainCounter; // Tracks callback counts when draining
\r
2649 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2650 ASIOBufferInfo *bufferInfos;
\r
2654 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2657 // Function declarations (definitions at end of section)
\r
2658 static const char* getAsioErrorString( ASIOError result );
\r
2659 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2660 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2662 RtApiAsio :: RtApiAsio()
\r
2664 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2665 // CoInitialize beforehand, but it must be for appartment threading
\r
2666 // (in which case, CoInitilialize will return S_FALSE here).
\r
2667 coInitialized_ = false;
\r
2668 HRESULT hr = CoInitialize( NULL );
\r
2669 if ( FAILED(hr) ) {
\r
2670 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2671 error( RtAudioError::WARNING );
\r
2673 coInitialized_ = true;
\r
2675 drivers.removeCurrentDriver();
\r
2676 driverInfo.asioVersion = 2;
\r
2678 // See note in DirectSound implementation about GetDesktopWindow().
\r
2679 driverInfo.sysRef = GetForegroundWindow();
\r
2682 RtApiAsio :: ~RtApiAsio()
\r
2684 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2685 if ( coInitialized_ ) CoUninitialize();
\r
2688 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2690 return (unsigned int) drivers.asioGetNumDev();
\r
2693 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2695 RtAudio::DeviceInfo info;
\r
2696 info.probed = false;
\r
2699 unsigned int nDevices = getDeviceCount();
\r
2700 if ( nDevices == 0 ) {
\r
2701 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2702 error( RtAudioError::INVALID_USE );
\r
2706 if ( device >= nDevices ) {
\r
2707 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2708 error( RtAudioError::INVALID_USE );
\r
2712 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2713 if ( stream_.state != STREAM_CLOSED ) {
\r
2714 if ( device >= devices_.size() ) {
\r
2715 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2716 error( RtAudioError::WARNING );
\r
2719 return devices_[ device ];
\r
2722 char driverName[32];
\r
2723 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2724 if ( result != ASE_OK ) {
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtAudioError::WARNING );
\r
2731 info.name = driverName;
\r
2733 if ( !drivers.loadDriver( driverName ) ) {
\r
2734 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2735 errorText_ = errorStream_.str();
\r
2736 error( RtAudioError::WARNING );
\r
2740 result = ASIOInit( &driverInfo );
\r
2741 if ( result != ASE_OK ) {
\r
2742 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2743 errorText_ = errorStream_.str();
\r
2744 error( RtAudioError::WARNING );
\r
2748 // Determine the device channel information.
\r
2749 long inputChannels, outputChannels;
\r
2750 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2751 if ( result != ASE_OK ) {
\r
2752 drivers.removeCurrentDriver();
\r
2753 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2754 errorText_ = errorStream_.str();
\r
2755 error( RtAudioError::WARNING );
\r
2759 info.outputChannels = outputChannels;
\r
2760 info.inputChannels = inputChannels;
\r
2761 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2762 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2764 // Determine the supported sample rates.
\r
2765 info.sampleRates.clear();
\r
2766 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2767 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2768 if ( result == ASE_OK )
\r
2769 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2772 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2773 ASIOChannelInfo channelInfo;
\r
2774 channelInfo.channel = 0;
\r
2775 channelInfo.isInput = true;
\r
2776 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2777 result = ASIOGetChannelInfo( &channelInfo );
\r
2778 if ( result != ASE_OK ) {
\r
2779 drivers.removeCurrentDriver();
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.nativeFormats = 0;
\r
2787 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2788 info.nativeFormats |= RTAUDIO_SINT16;
\r
2789 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2790 info.nativeFormats |= RTAUDIO_SINT32;
\r
2791 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2792 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2793 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2794 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2795 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2796 info.nativeFormats |= RTAUDIO_SINT24;
\r
2798 if ( info.outputChannels > 0 )
\r
2799 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2800 if ( info.inputChannels > 0 )
\r
2801 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2803 info.probed = true;
\r
2804 drivers.removeCurrentDriver();
\r
2808 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2810 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2811 object->callbackEvent( index );
\r
2814 void RtApiAsio :: saveDeviceInfo( void )
\r
2818 unsigned int nDevices = getDeviceCount();
\r
2819 devices_.resize( nDevices );
\r
2820 for ( unsigned int i=0; i<nDevices; i++ )
\r
2821 devices_[i] = getDeviceInfo( i );
\r
2824 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2825 unsigned int firstChannel, unsigned int sampleRate,
\r
2826 RtAudioFormat format, unsigned int *bufferSize,
\r
2827 RtAudio::StreamOptions *options )
\r
2829 // For ASIO, a duplex stream MUST use the same driver.
\r
2830 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2831 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2835 char driverName[32];
\r
2836 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2837 if ( result != ASE_OK ) {
\r
2838 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2839 errorText_ = errorStream_.str();
\r
2843 // Only load the driver once for duplex stream.
\r
2844 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2845 // The getDeviceInfo() function will not work when a stream is open
\r
2846 // because ASIO does not allow multiple devices to run at the same
\r
2847 // time. Thus, we'll probe the system before opening a stream and
\r
2848 // save the results for use by getDeviceInfo().
\r
2849 this->saveDeviceInfo();
\r
2851 if ( !drivers.loadDriver( driverName ) ) {
\r
2852 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2853 errorText_ = errorStream_.str();
\r
2857 result = ASIOInit( &driverInfo );
\r
2858 if ( result != ASE_OK ) {
\r
2859 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2860 errorText_ = errorStream_.str();
\r
2865 // Check the device channel count.
\r
2866 long inputChannels, outputChannels;
\r
2867 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2868 if ( result != ASE_OK ) {
\r
2869 drivers.removeCurrentDriver();
\r
2870 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2871 errorText_ = errorStream_.str();
\r
2875 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2876 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2877 drivers.removeCurrentDriver();
\r
2878 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2879 errorText_ = errorStream_.str();
\r
2882 stream_.nDeviceChannels[mode] = channels;
\r
2883 stream_.nUserChannels[mode] = channels;
\r
2884 stream_.channelOffset[mode] = firstChannel;
\r
2886 // Verify the sample rate is supported.
\r
2887 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2888 if ( result != ASE_OK ) {
\r
2889 drivers.removeCurrentDriver();
\r
2890 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2891 errorText_ = errorStream_.str();
\r
2895 // Get the current sample rate
\r
2896 ASIOSampleRate currentRate;
\r
2897 result = ASIOGetSampleRate( ¤tRate );
\r
2898 if ( result != ASE_OK ) {
\r
2899 drivers.removeCurrentDriver();
\r
2900 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2901 errorText_ = errorStream_.str();
\r
2905 // Set the sample rate only if necessary
\r
2906 if ( currentRate != sampleRate ) {
\r
2907 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2908 if ( result != ASE_OK ) {
\r
2909 drivers.removeCurrentDriver();
\r
2910 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2911 errorText_ = errorStream_.str();
\r
2916 // Determine the driver data type.
\r
2917 ASIOChannelInfo channelInfo;
\r
2918 channelInfo.channel = 0;
\r
2919 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2920 else channelInfo.isInput = true;
\r
2921 result = ASIOGetChannelInfo( &channelInfo );
\r
2922 if ( result != ASE_OK ) {
\r
2923 drivers.removeCurrentDriver();
\r
2924 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2925 errorText_ = errorStream_.str();
\r
2929 // Assuming WINDOWS host is always little-endian.
\r
2930 stream_.doByteSwap[mode] = false;
\r
2931 stream_.userFormat = format;
\r
2932 stream_.deviceFormat[mode] = 0;
\r
2933 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2934 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2935 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2937 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2938 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2939 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2941 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2942 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2943 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2945 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2946 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2947 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2949 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2950 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2951 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2954 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2955 drivers.removeCurrentDriver();
\r
2956 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2957 errorText_ = errorStream_.str();
\r
2961 // Set the buffer size. For a duplex stream, this will end up
\r
2962 // setting the buffer size based on the input constraints, which
\r
2964 long minSize, maxSize, preferSize, granularity;
\r
2965 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2966 if ( result != ASE_OK ) {
\r
2967 drivers.removeCurrentDriver();
\r
2968 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2969 errorText_ = errorStream_.str();
\r
2973 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2974 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2975 else if ( granularity == -1 ) {
\r
2976 // Make sure bufferSize is a power of two.
\r
2977 int log2_of_min_size = 0;
\r
2978 int log2_of_max_size = 0;
\r
2980 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2981 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2982 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2985 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2986 int min_delta_num = log2_of_min_size;
\r
2988 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2989 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2990 if (current_delta < min_delta) {
\r
2991 min_delta = current_delta;
\r
2992 min_delta_num = i;
\r
2996 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2997 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2998 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3000 else if ( granularity != 0 ) {
\r
3001 // Set to an even multiple of granularity, rounding up.
\r
3002 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3005 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
3006 drivers.removeCurrentDriver();
\r
3007 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3011 stream_.bufferSize = *bufferSize;
\r
3012 stream_.nBuffers = 2;
\r
3014 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3015 else stream_.userInterleaved = true;
\r
3017 // ASIO always uses non-interleaved buffers.
\r
3018 stream_.deviceInterleaved[mode] = false;
\r
3020 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3021 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3022 if ( handle == 0 ) {
\r
3024 handle = new AsioHandle;
\r
3026 catch ( std::bad_alloc& ) {
\r
3027 //if ( handle == NULL ) {
\r
3028 drivers.removeCurrentDriver();
\r
3029 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3032 handle->bufferInfos = 0;
\r
3034 // Create a manual-reset event.
\r
3035 handle->condition = CreateEvent( NULL, // no security
\r
3036 TRUE, // manual-reset
\r
3037 FALSE, // non-signaled initially
\r
3038 NULL ); // unnamed
\r
3039 stream_.apiHandle = (void *) handle;
\r
3042 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3043 // and output separately, we'll have to dispose of previously
\r
3044 // created output buffers for a duplex stream.
\r
3045 long inputLatency, outputLatency;
\r
3046 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3047 ASIODisposeBuffers();
\r
3048 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3051 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3052 bool buffersAllocated = false;
\r
3053 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3054 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3055 if ( handle->bufferInfos == NULL ) {
\r
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3057 errorText_ = errorStream_.str();
\r
3061 ASIOBufferInfo *infos;
\r
3062 infos = handle->bufferInfos;
\r
3063 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3064 infos->isInput = ASIOFalse;
\r
3065 infos->channelNum = i + stream_.channelOffset[0];
\r
3066 infos->buffers[0] = infos->buffers[1] = 0;
\r
3068 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3069 infos->isInput = ASIOTrue;
\r
3070 infos->channelNum = i + stream_.channelOffset[1];
\r
3071 infos->buffers[0] = infos->buffers[1] = 0;
\r
3074 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3075 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3076 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3077 asioCallbacks.asioMessage = &asioMessages;
\r
3078 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3079 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3080 if ( result != ASE_OK ) {
\r
3081 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3082 errorText_ = errorStream_.str();
\r
3085 buffersAllocated = true;
\r
3087 // Set flags for buffer conversion.
\r
3088 stream_.doConvertBuffer[mode] = false;
\r
3089 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3090 stream_.doConvertBuffer[mode] = true;
\r
3091 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3092 stream_.nUserChannels[mode] > 1 )
\r
3093 stream_.doConvertBuffer[mode] = true;
\r
3095 // Allocate necessary internal buffers
\r
3096 unsigned long bufferBytes;
\r
3097 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3098 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3099 if ( stream_.userBuffer[mode] == NULL ) {
\r
3100 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3104 if ( stream_.doConvertBuffer[mode] ) {
\r
3106 bool makeBuffer = true;
\r
3107 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3108 if ( mode == INPUT ) {
\r
3109 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3110 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3111 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3115 if ( makeBuffer ) {
\r
3116 bufferBytes *= *bufferSize;
\r
3117 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3118 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3119 if ( stream_.deviceBuffer == NULL ) {
\r
3120 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3126 stream_.sampleRate = sampleRate;
\r
3127 stream_.device[mode] = device;
\r
3128 stream_.state = STREAM_STOPPED;
\r
3129 asioCallbackInfo = &stream_.callbackInfo;
\r
3130 stream_.callbackInfo.object = (void *) this;
\r
3131 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3132 // We had already set up an output stream.
\r
3133 stream_.mode = DUPLEX;
\r
3135 stream_.mode = mode;
\r
3137 // Determine device latencies
\r
3138 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3139 if ( result != ASE_OK ) {
\r
3140 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3141 errorText_ = errorStream_.str();
\r
3142 error( RtAudioError::WARNING); // warn but don't fail
\r
3145 stream_.latency[0] = outputLatency;
\r
3146 stream_.latency[1] = inputLatency;
\r
3149 // Setup the buffer conversion information structure. We don't use
\r
3150 // buffers to do channel offsets, so we override that parameter
\r
3152 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3157 if ( buffersAllocated )
\r
3158 ASIODisposeBuffers();
\r
3159 drivers.removeCurrentDriver();
\r
3162 CloseHandle( handle->condition );
\r
3163 if ( handle->bufferInfos )
\r
3164 free( handle->bufferInfos );
\r
3166 stream_.apiHandle = 0;
\r
3169 for ( int i=0; i<2; i++ ) {
\r
3170 if ( stream_.userBuffer[i] ) {
\r
3171 free( stream_.userBuffer[i] );
\r
3172 stream_.userBuffer[i] = 0;
\r
3176 if ( stream_.deviceBuffer ) {
\r
3177 free( stream_.deviceBuffer );
\r
3178 stream_.deviceBuffer = 0;
\r
3184 void RtApiAsio :: closeStream()
\r
3186 if ( stream_.state == STREAM_CLOSED ) {
\r
3187 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3188 error( RtAudioError::WARNING );
\r
3192 if ( stream_.state == STREAM_RUNNING ) {
\r
3193 stream_.state = STREAM_STOPPED;
\r
3196 ASIODisposeBuffers();
\r
3197 drivers.removeCurrentDriver();
\r
3199 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3201 CloseHandle( handle->condition );
\r
3202 if ( handle->bufferInfos )
\r
3203 free( handle->bufferInfos );
\r
3205 stream_.apiHandle = 0;
\r
3208 for ( int i=0; i<2; i++ ) {
\r
3209 if ( stream_.userBuffer[i] ) {
\r
3210 free( stream_.userBuffer[i] );
\r
3211 stream_.userBuffer[i] = 0;
\r
3215 if ( stream_.deviceBuffer ) {
\r
3216 free( stream_.deviceBuffer );
\r
3217 stream_.deviceBuffer = 0;
\r
3220 stream_.mode = UNINITIALIZED;
\r
3221 stream_.state = STREAM_CLOSED;
\r
3224 bool stopThreadCalled = false;
\r
3226 void RtApiAsio :: startStream()
\r
3229 if ( stream_.state == STREAM_RUNNING ) {
\r
3230 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3231 error( RtAudioError::WARNING );
\r
3235 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3236 ASIOError result = ASIOStart();
\r
3237 if ( result != ASE_OK ) {
\r
3238 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3239 errorText_ = errorStream_.str();
\r
3243 handle->drainCounter = 0;
\r
3244 handle->internalDrain = false;
\r
3245 ResetEvent( handle->condition );
\r
3246 stream_.state = STREAM_RUNNING;
\r
3250 stopThreadCalled = false;
\r
3252 if ( result == ASE_OK ) return;
\r
3253 error( RtAudioError::SYSTEM_ERROR );
\r
3256 void RtApiAsio :: stopStream()
\r
3259 if ( stream_.state == STREAM_STOPPED ) {
\r
3260 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3261 error( RtAudioError::WARNING );
\r
3265 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3266 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3267 if ( handle->drainCounter == 0 ) {
\r
3268 handle->drainCounter = 2;
\r
3269 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3273 stream_.state = STREAM_STOPPED;
\r
3275 ASIOError result = ASIOStop();
\r
3276 if ( result != ASE_OK ) {
\r
3277 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3278 errorText_ = errorStream_.str();
\r
3281 if ( result == ASE_OK ) return;
\r
3282 error( RtAudioError::SYSTEM_ERROR );
\r
3285 void RtApiAsio :: abortStream()
\r
3288 if ( stream_.state == STREAM_STOPPED ) {
\r
3289 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3290 error( RtAudioError::WARNING );
\r
3294 // The following lines were commented-out because some behavior was
\r
3295 // noted where the device buffers need to be zeroed to avoid
\r
3296 // continuing sound, even when the device buffers are completely
\r
3297 // disposed. So now, calling abort is the same as calling stop.
\r
3298 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3299 // handle->drainCounter = 2;
\r
3303 // This function will be called by a spawned thread when the user
\r
3304 // callback function signals that the stream should be stopped or
\r
3305 // aborted. It is necessary to handle it this way because the
\r
3306 // callbackEvent() function must return before the ASIOStop()
\r
3307 // function will return.
\r
3308 static unsigned __stdcall asioStopStream( void *ptr )
\r
3310 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3311 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3313 object->stopStream();
\r
3314 _endthreadex( 0 );
\r
3318 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3320 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3321 if ( stream_.state == STREAM_CLOSED ) {
\r
3322 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3323 error( RtAudioError::WARNING );
\r
3327 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3328 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3330 // Check if we were draining the stream and signal if finished.
\r
3331 if ( handle->drainCounter > 3 ) {
\r
3333 stream_.state = STREAM_STOPPING;
\r
3334 if ( handle->internalDrain == false )
\r
3335 SetEvent( handle->condition );
\r
3336 else { // spawn a thread to stop the stream
\r
3337 unsigned threadId;
\r
3338 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3339 &stream_.callbackInfo, 0, &threadId );
\r
3344 // Invoke user callback to get fresh output data UNLESS we are
\r
3345 // draining stream.
\r
3346 if ( handle->drainCounter == 0 ) {
\r
3347 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3348 double streamTime = getStreamTime();
\r
3349 RtAudioStreamStatus status = 0;
\r
3350 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3351 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3354 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3355 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3358 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3359 stream_.bufferSize, streamTime, status, info->userData );
\r
3360 if ( cbReturnValue == 2 ) {
\r
3361 stream_.state = STREAM_STOPPING;
\r
3362 handle->drainCounter = 2;
\r
3363 unsigned threadId;
\r
3364 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3365 &stream_.callbackInfo, 0, &threadId );
\r
3368 else if ( cbReturnValue == 1 ) {
\r
3369 handle->drainCounter = 1;
\r
3370 handle->internalDrain = true;
\r
3374 unsigned int nChannels, bufferBytes, i, j;
\r
3375 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3378 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3380 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3382 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3383 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3384 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3388 else if ( stream_.doConvertBuffer[0] ) {
\r
3390 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3391 if ( stream_.doByteSwap[0] )
\r
3392 byteSwapBuffer( stream_.deviceBuffer,
\r
3393 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3394 stream_.deviceFormat[0] );
\r
3396 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3397 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3398 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3399 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3405 if ( stream_.doByteSwap[0] )
\r
3406 byteSwapBuffer( stream_.userBuffer[0],
\r
3407 stream_.bufferSize * stream_.nUserChannels[0],
\r
3408 stream_.userFormat );
\r
3410 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3411 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3412 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3413 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3419 // Don't bother draining input
\r
3420 if ( handle->drainCounter ) {
\r
3421 handle->drainCounter++;
\r
3425 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3427 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3429 if (stream_.doConvertBuffer[1]) {
\r
3431 // Always interleave ASIO input data.
\r
3432 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3433 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3434 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3435 handle->bufferInfos[i].buffers[bufferIndex],
\r
3439 if ( stream_.doByteSwap[1] )
\r
3440 byteSwapBuffer( stream_.deviceBuffer,
\r
3441 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3442 stream_.deviceFormat[1] );
\r
3443 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3447 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3448 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3449 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3450 handle->bufferInfos[i].buffers[bufferIndex],
\r
3455 if ( stream_.doByteSwap[1] )
\r
3456 byteSwapBuffer( stream_.userBuffer[1],
\r
3457 stream_.bufferSize * stream_.nUserChannels[1],
\r
3458 stream_.userFormat );
\r
3463 // The following call was suggested by Malte Clasen. While the API
\r
3464 // documentation indicates it should not be required, some device
\r
3465 // drivers apparently do not function correctly without it.
\r
3466 ASIOOutputReady();
\r
3468 RtApi::tickStreamTime();
\r
3472 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3474 // The ASIO documentation says that this usually only happens during
\r
3475 // external sync. Audio processing is not stopped by the driver,
\r
3476 // actual sample rate might not have even changed, maybe only the
\r
3477 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3480 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3482 object->stopStream();
\r
3484 catch ( RtAudioError &exception ) {
\r
3485 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3489 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3492 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3496 switch( selector ) {
\r
3497 case kAsioSelectorSupported:
\r
3498 if ( value == kAsioResetRequest
\r
3499 || value == kAsioEngineVersion
\r
3500 || value == kAsioResyncRequest
\r
3501 || value == kAsioLatenciesChanged
\r
3502 // The following three were added for ASIO 2.0, you don't
\r
3503 // necessarily have to support them.
\r
3504 || value == kAsioSupportsTimeInfo
\r
3505 || value == kAsioSupportsTimeCode
\r
3506 || value == kAsioSupportsInputMonitor)
\r
3509 case kAsioResetRequest:
\r
3510 // Defer the task and perform the reset of the driver during the
\r
3511 // next "safe" situation. You cannot reset the driver right now,
\r
3512 // as this code is called from the driver. Reset the driver is
\r
3513 // done by completely destruct is. I.e. ASIOStop(),
\r
3514 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3516 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3519 case kAsioResyncRequest:
\r
3520 // This informs the application that the driver encountered some
\r
3521 // non-fatal data loss. It is used for synchronization purposes
\r
3522 // of different media. Added mainly to work around the Win16Mutex
\r
3523 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3524 // which could lose data because the Mutex was held too long by
\r
3525 // another thread. However a driver can issue it in other
\r
3526 // situations, too.
\r
3527 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3531 case kAsioLatenciesChanged:
\r
3532 // This will inform the host application that the drivers were
\r
3533 // latencies changed. Beware, it this does not mean that the
\r
3534 // buffer sizes have changed! You might need to update internal
\r
3536 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3539 case kAsioEngineVersion:
\r
3540 // Return the supported ASIO version of the host application. If
\r
3541 // a host application does not implement this selector, ASIO 1.0
\r
3542 // is assumed by the driver.
\r
3545 case kAsioSupportsTimeInfo:
\r
3546 // Informs the driver whether the
\r
3547 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3548 // For compatibility with ASIO 1.0 drivers the host application
\r
3549 // should always support the "old" bufferSwitch method, too.
\r
3552 case kAsioSupportsTimeCode:
\r
3553 // Informs the driver whether application is interested in time
\r
3554 // code info. If an application does not need to know about time
\r
3555 // code, the driver has less work to do.
\r
3562 static const char* getAsioErrorString( ASIOError result )
\r
3567 const char*message;
\r
3570 static const Messages m[] =
\r
3572 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3573 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3574 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3575 { ASE_InvalidMode, "Invalid mode." },
\r
3576 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3577 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3578 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3581 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3582 if ( m[i].value == result ) return m[i].message;
\r
3584 return "Unknown error.";
\r
3587 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3591 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3593 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3594 // - Introduces support for the Windows WASAPI API
\r
3595 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3596 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3597 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3602 #include <audioclient.h>
\r
3604 #include <mmdeviceapi.h>
\r
3605 #include <functiondiscoverykeys_devpkey.h>
\r
3607 //=============================================================================
\r
3609 #define SAFE_RELEASE( objectPtr )\
\r
3612 objectPtr->Release();\
\r
3613 objectPtr = NULL;\
\r
3616 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3618 //-----------------------------------------------------------------------------
\r
3620 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3621 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3622 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3623 // provide intermediate storage for read / write synchronization.
\r
3624 class WasapiBuffer
\r
3628 : buffer_( NULL ),
\r
3637 // sets the length of the internal ring buffer
\r
3638 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3641 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3643 bufferSize_ = bufferSize;
\r
3648 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3649 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3651 if ( !buffer || // incoming buffer is NULL
\r
3652 bufferSize == 0 || // incoming buffer has no data
\r
3653 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3658 unsigned int relOutIndex = outIndex_;
\r
3659 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3660 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3661 relOutIndex += bufferSize_;
\r
3664 // "in" index can end on the "out" index but cannot begin at it
\r
3665 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3666 return false; // not enough space between "in" index and "out" index
\r
3669 // copy buffer from external to internal
\r
3670 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3671 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3672 int fromInSize = bufferSize - fromZeroSize;
\r
3676 case RTAUDIO_SINT8:
\r
3677 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3678 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3680 case RTAUDIO_SINT16:
\r
3681 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3682 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3684 case RTAUDIO_SINT24:
\r
3685 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3686 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3688 case RTAUDIO_SINT32:
\r
3689 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3690 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3692 case RTAUDIO_FLOAT32:
\r
3693 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3694 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3696 case RTAUDIO_FLOAT64:
\r
3697 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3698 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3702 // update "in" index
\r
3703 inIndex_ += bufferSize;
\r
3704 inIndex_ %= bufferSize_;
\r
3709 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3710 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3712 if ( !buffer || // incoming buffer is NULL
\r
3713 bufferSize == 0 || // incoming buffer has no data
\r
3714 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3719 unsigned int relInIndex = inIndex_;
\r
3720 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3721 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3722 relInIndex += bufferSize_;
\r
3725 // "out" index can begin at and end on the "in" index
\r
3726 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3727 return false; // not enough space between "out" index and "in" index
\r
3730 // copy buffer from internal to external
\r
3731 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3732 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3733 int fromOutSize = bufferSize - fromZeroSize;
\r
3737 case RTAUDIO_SINT8:
\r
3738 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3739 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3741 case RTAUDIO_SINT16:
\r
3742 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3743 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3745 case RTAUDIO_SINT24:
\r
3746 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3747 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3749 case RTAUDIO_SINT32:
\r
3750 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3751 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3753 case RTAUDIO_FLOAT32:
\r
3754 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3755 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3757 case RTAUDIO_FLOAT64:
\r
3758 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3759 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3763 // update "out" index
\r
3764 outIndex_ += bufferSize;
\r
3765 outIndex_ %= bufferSize_;
\r
3772 unsigned int bufferSize_;
\r
3773 unsigned int inIndex_;
\r
3774 unsigned int outIndex_;
\r
3777 //-----------------------------------------------------------------------------
\r
3779 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3780 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3781 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3782 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3783 // one rate and its multiple.
\r
3784 void convertBufferWasapi( char* outBuffer,
\r
3785 const char* inBuffer,
\r
3786 const unsigned int& channelCount,
\r
3787 const unsigned int& inSampleRate,
\r
3788 const unsigned int& outSampleRate,
\r
3789 const unsigned int& inSampleCount,
\r
3790 unsigned int& outSampleCount,
\r
3791 const RtAudioFormat& format )
\r
3793 // calculate the new outSampleCount and relative sampleStep
\r
3794 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3795 float sampleStep = 1.0f / sampleRatio;
\r
3796 float inSampleFraction = 0.0f;
\r
3798 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3800 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3801 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3803 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3807 case RTAUDIO_SINT8:
\r
3808 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3810 case RTAUDIO_SINT16:
\r
3811 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3813 case RTAUDIO_SINT24:
\r
3814 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3816 case RTAUDIO_SINT32:
\r
3817 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3819 case RTAUDIO_FLOAT32:
\r
3820 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3822 case RTAUDIO_FLOAT64:
\r
3823 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3827 // jump to next in sample
\r
3828 inSampleFraction += sampleStep;
\r
3832 //-----------------------------------------------------------------------------
\r
3834 // A structure to hold various information related to the WASAPI implementation.
\r
3835 struct WasapiHandle
\r
3837 IAudioClient* captureAudioClient;
\r
3838 IAudioClient* renderAudioClient;
\r
3839 IAudioCaptureClient* captureClient;
\r
3840 IAudioRenderClient* renderClient;
\r
3841 HANDLE captureEvent;
\r
3842 HANDLE renderEvent;
\r
3845 : captureAudioClient( NULL ),
\r
3846 renderAudioClient( NULL ),
\r
3847 captureClient( NULL ),
\r
3848 renderClient( NULL ),
\r
3849 captureEvent( NULL ),
\r
3850 renderEvent( NULL ) {}
\r
3853 //=============================================================================
\r
3855 RtApiWasapi::RtApiWasapi()
\r
3856 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3858 // WASAPI can run either apartment or multi-threaded
\r
3859 HRESULT hr = CoInitialize( NULL );
\r
3860 if ( !FAILED( hr ) )
\r
3861 coInitialized_ = true;
\r
3863 // Instantiate device enumerator
\r
3864 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3865 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3866 ( void** ) &deviceEnumerator_ );
\r
3868 if ( FAILED( hr ) ) {
\r
3869 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3870 error( RtAudioError::DRIVER_ERROR );
\r
3874 //-----------------------------------------------------------------------------
\r
3876 RtApiWasapi::~RtApiWasapi()
\r
3878 if ( stream_.state != STREAM_CLOSED )
\r
3881 SAFE_RELEASE( deviceEnumerator_ );
\r
3883 // If this object previously called CoInitialize()
\r
3884 if ( coInitialized_ )
\r
3888 //=============================================================================
\r
3890 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3892 unsigned int captureDeviceCount = 0;
\r
3893 unsigned int renderDeviceCount = 0;
\r
3895 IMMDeviceCollection* captureDevices = NULL;
\r
3896 IMMDeviceCollection* renderDevices = NULL;
\r
3898 // Count capture devices
\r
3899 errorText_.clear();
\r
3900 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3901 if ( FAILED( hr ) ) {
\r
3902 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3906 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3907 if ( FAILED( hr ) ) {
\r
3908 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3912 // Count render devices
\r
3913 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3914 if ( FAILED( hr ) ) {
\r
3915 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3919 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3920 if ( FAILED( hr ) ) {
\r
3921 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3926 // release all references
\r
3927 SAFE_RELEASE( captureDevices );
\r
3928 SAFE_RELEASE( renderDevices );
\r
3930 if ( errorText_.empty() )
\r
3931 return captureDeviceCount + renderDeviceCount;
\r
3933 error( RtAudioError::DRIVER_ERROR );
\r
3937 //-----------------------------------------------------------------------------
\r
3939 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3941 RtAudio::DeviceInfo info;
\r
3942 unsigned int captureDeviceCount = 0;
\r
3943 unsigned int renderDeviceCount = 0;
\r
3944 std::wstring deviceName;
\r
3945 std::string defaultDeviceName;
\r
3946 bool isCaptureDevice = false;
\r
3948 PROPVARIANT deviceNameProp;
\r
3949 PROPVARIANT defaultDeviceNameProp;
\r
3951 IMMDeviceCollection* captureDevices = NULL;
\r
3952 IMMDeviceCollection* renderDevices = NULL;
\r
3953 IMMDevice* devicePtr = NULL;
\r
3954 IMMDevice* defaultDevicePtr = NULL;
\r
3955 IAudioClient* audioClient = NULL;
\r
3956 IPropertyStore* devicePropStore = NULL;
\r
3957 IPropertyStore* defaultDevicePropStore = NULL;
\r
3959 WAVEFORMATEX* deviceFormat = NULL;
\r
3960 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3963 info.probed = false;
\r
3965 // Count capture devices
\r
3966 errorText_.clear();
\r
3967 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3968 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3969 if ( FAILED( hr ) ) {
\r
3970 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3974 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3975 if ( FAILED( hr ) ) {
\r
3976 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3980 // Count render devices
\r
3981 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3982 if ( FAILED( hr ) ) {
\r
3983 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3987 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3988 if ( FAILED( hr ) ) {
\r
3989 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3993 // validate device index
\r
3994 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3995 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3996 errorType = RtAudioError::INVALID_USE;
\r
4000 // determine whether index falls within capture or render devices
\r
4001 if ( device >= renderDeviceCount ) {
\r
4002 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4003 if ( FAILED( hr ) ) {
\r
4004 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4007 isCaptureDevice = true;
\r
4010 hr = renderDevices->Item( device, &devicePtr );
\r
4011 if ( FAILED( hr ) ) {
\r
4012 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4015 isCaptureDevice = false;
\r
4018 // get default device name
\r
4019 if ( isCaptureDevice ) {
\r
4020 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4021 if ( FAILED( hr ) ) {
\r
4022 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4027 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4028 if ( FAILED( hr ) ) {
\r
4029 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4034 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4035 if ( FAILED( hr ) ) {
\r
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4039 PropVariantInit( &defaultDeviceNameProp );
\r
4041 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4042 if ( FAILED( hr ) ) {
\r
4043 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4047 deviceName = defaultDeviceNameProp.pwszVal;
\r
4048 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4051 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4052 if ( FAILED( hr ) ) {
\r
4053 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4057 PropVariantInit( &deviceNameProp );
\r
4059 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4060 if ( FAILED( hr ) ) {
\r
4061 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4065 deviceName = deviceNameProp.pwszVal;
\r
4066 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4069 if ( isCaptureDevice ) {
\r
4070 info.isDefaultInput = info.name == defaultDeviceName;
\r
4071 info.isDefaultOutput = false;
\r
4074 info.isDefaultInput = false;
\r
4075 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4079 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4080 if ( FAILED( hr ) ) {
\r
4081 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4085 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4086 if ( FAILED( hr ) ) {
\r
4087 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4091 if ( isCaptureDevice ) {
\r
4092 info.inputChannels = deviceFormat->nChannels;
\r
4093 info.outputChannels = 0;
\r
4094 info.duplexChannels = 0;
\r
4097 info.inputChannels = 0;
\r
4098 info.outputChannels = deviceFormat->nChannels;
\r
4099 info.duplexChannels = 0;
\r
4103 info.sampleRates.clear();
\r
4105 // allow support for all sample rates as we have a built-in sample rate converter
\r
4106 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4107 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4111 info.nativeFormats = 0;
\r
4113 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4114 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4115 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4117 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4118 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4120 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4121 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4124 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4125 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4126 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4128 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4129 info.nativeFormats |= RTAUDIO_SINT8;
\r
4131 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4132 info.nativeFormats |= RTAUDIO_SINT16;
\r
4134 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4135 info.nativeFormats |= RTAUDIO_SINT24;
\r
4137 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4138 info.nativeFormats |= RTAUDIO_SINT32;
\r
4143 info.probed = true;
\r
4146 // release all references
\r
4147 PropVariantClear( &deviceNameProp );
\r
4148 PropVariantClear( &defaultDeviceNameProp );
\r
4150 SAFE_RELEASE( captureDevices );
\r
4151 SAFE_RELEASE( renderDevices );
\r
4152 SAFE_RELEASE( devicePtr );
\r
4153 SAFE_RELEASE( defaultDevicePtr );
\r
4154 SAFE_RELEASE( audioClient );
\r
4155 SAFE_RELEASE( devicePropStore );
\r
4156 SAFE_RELEASE( defaultDevicePropStore );
\r
4158 CoTaskMemFree( deviceFormat );
\r
4159 CoTaskMemFree( closestMatchFormat );
\r
4161 if ( !errorText_.empty() )
\r
4162 error( errorType );
\r
4166 //-----------------------------------------------------------------------------
\r
4168 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4170 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4171 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4179 //-----------------------------------------------------------------------------
\r
4181 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4183 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4184 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4192 //-----------------------------------------------------------------------------
\r
4194 void RtApiWasapi::closeStream( void )
\r
4196 if ( stream_.state == STREAM_CLOSED ) {
\r
4197 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4198 error( RtAudioError::WARNING );
\r
4202 if ( stream_.state != STREAM_STOPPED )
\r
4205 // clean up stream memory
\r
4206 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4207 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4209 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4210 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4212 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4213 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4215 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4216 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4218 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4219 stream_.apiHandle = NULL;
\r
4221 for ( int i = 0; i < 2; i++ ) {
\r
4222 if ( stream_.userBuffer[i] ) {
\r
4223 free( stream_.userBuffer[i] );
\r
4224 stream_.userBuffer[i] = 0;
\r
4228 if ( stream_.deviceBuffer ) {
\r
4229 free( stream_.deviceBuffer );
\r
4230 stream_.deviceBuffer = 0;
\r
4233 // update stream state
\r
4234 stream_.state = STREAM_CLOSED;
\r
4237 //-----------------------------------------------------------------------------
\r
4239 void RtApiWasapi::startStream( void )
\r
4243 if ( stream_.state == STREAM_RUNNING ) {
\r
4244 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4245 error( RtAudioError::WARNING );
\r
4249 // update stream state
\r
4250 stream_.state = STREAM_RUNNING;
\r
4252 // create WASAPI stream thread
\r
4253 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4255 if ( !stream_.callbackInfo.thread ) {
\r
4256 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4257 error( RtAudioError::THREAD_ERROR );
\r
4260 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4261 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4265 //-----------------------------------------------------------------------------
\r
4267 void RtApiWasapi::stopStream( void )
\r
4271 if ( stream_.state == STREAM_STOPPED ) {
\r
4272 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4273 error( RtAudioError::WARNING );
\r
4277 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4278 stream_.state = STREAM_STOPPING;
\r
4280 // wait until stream thread is stopped
\r
4281 while( stream_.state != STREAM_STOPPED ) {
\r
4285 // Wait for the last buffer to play before stopping.
\r
4286 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4288 // stop capture client if applicable
\r
4289 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4290 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4291 if ( FAILED( hr ) ) {
\r
4292 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4293 error( RtAudioError::DRIVER_ERROR );
\r
4298 // stop render client if applicable
\r
4299 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4300 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4301 if ( FAILED( hr ) ) {
\r
4302 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4303 error( RtAudioError::DRIVER_ERROR );
\r
4308 // close thread handle
\r
4309 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4310 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4311 error( RtAudioError::THREAD_ERROR );
\r
4315 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4318 //-----------------------------------------------------------------------------
\r
4320 void RtApiWasapi::abortStream( void )
\r
4324 if ( stream_.state == STREAM_STOPPED ) {
\r
4325 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4326 error( RtAudioError::WARNING );
\r
4330 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4331 stream_.state = STREAM_STOPPING;
\r
4333 // wait until stream thread is stopped
\r
4334 while ( stream_.state != STREAM_STOPPED ) {
\r
4338 // stop capture client if applicable
\r
4339 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4340 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4341 if ( FAILED( hr ) ) {
\r
4342 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4343 error( RtAudioError::DRIVER_ERROR );
\r
4348 // stop render client if applicable
\r
4349 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4350 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4351 if ( FAILED( hr ) ) {
\r
4352 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4353 error( RtAudioError::DRIVER_ERROR );
\r
4358 // close thread handle
\r
4359 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4360 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4361 error( RtAudioError::THREAD_ERROR );
\r
4365 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4368 //-----------------------------------------------------------------------------
\r
4370 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4371 unsigned int firstChannel, unsigned int sampleRate,
\r
4372 RtAudioFormat format, unsigned int* bufferSize,
\r
4373 RtAudio::StreamOptions* options )
\r
4375 bool methodResult = FAILURE;
\r
4376 unsigned int captureDeviceCount = 0;
\r
4377 unsigned int renderDeviceCount = 0;
\r
4379 IMMDeviceCollection* captureDevices = NULL;
\r
4380 IMMDeviceCollection* renderDevices = NULL;
\r
4381 IMMDevice* devicePtr = NULL;
\r
4382 WAVEFORMATEX* deviceFormat = NULL;
\r
4383 unsigned int bufferBytes;
\r
4384 stream_.state = STREAM_STOPPED;
\r
4386 // create API Handle if not already created
\r
4387 if ( !stream_.apiHandle )
\r
4388 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4390 // Count capture devices
\r
4391 errorText_.clear();
\r
4392 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4393 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4394 if ( FAILED( hr ) ) {
\r
4395 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4399 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4400 if ( FAILED( hr ) ) {
\r
4401 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4405 // Count render devices
\r
4406 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4407 if ( FAILED( hr ) ) {
\r
4408 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4412 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4413 if ( FAILED( hr ) ) {
\r
4414 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4418 // validate device index
\r
4419 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4420 errorType = RtAudioError::INVALID_USE;
\r
4421 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4425 // determine whether index falls within capture or render devices
\r
4426 if ( device >= renderDeviceCount ) {
\r
4427 if ( mode != INPUT ) {
\r
4428 errorType = RtAudioError::INVALID_USE;
\r
4429 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4433 // retrieve captureAudioClient from devicePtr
\r
4434 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4436 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4437 if ( FAILED( hr ) ) {
\r
4438 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4442 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4443 NULL, ( void** ) &captureAudioClient );
\r
4444 if ( FAILED( hr ) ) {
\r
4445 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4449 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4450 if ( FAILED( hr ) ) {
\r
4451 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4455 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4456 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4459 if ( mode != OUTPUT ) {
\r
4460 errorType = RtAudioError::INVALID_USE;
\r
4461 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4465 // retrieve renderAudioClient from devicePtr
\r
4466 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4468 hr = renderDevices->Item( device, &devicePtr );
\r
4469 if ( FAILED( hr ) ) {
\r
4470 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4474 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4475 NULL, ( void** ) &renderAudioClient );
\r
4476 if ( FAILED( hr ) ) {
\r
4477 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4481 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4482 if ( FAILED( hr ) ) {
\r
4483 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4487 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4488 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4491 // fill stream data
\r
4492 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4493 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4494 stream_.mode = DUPLEX;
\r
4497 stream_.mode = mode;
\r
4500 stream_.device[mode] = device;
\r
4501 stream_.doByteSwap[mode] = false;
\r
4502 stream_.sampleRate = sampleRate;
\r
4503 stream_.bufferSize = *bufferSize;
\r
4504 stream_.nBuffers = 1;
\r
4505 stream_.nUserChannels[mode] = channels;
\r
4506 stream_.channelOffset[mode] = firstChannel;
\r
4507 stream_.userFormat = format;
\r
4508 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4510 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4511 stream_.userInterleaved = false;
\r
4513 stream_.userInterleaved = true;
\r
4514 stream_.deviceInterleaved[mode] = true;
\r
4516 // Set flags for buffer conversion.
\r
4517 stream_.doConvertBuffer[mode] = false;
\r
4518 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4519 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4520 stream_.doConvertBuffer[mode] = true;
\r
4521 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4522 stream_.nUserChannels[mode] > 1 )
\r
4523 stream_.doConvertBuffer[mode] = true;
\r
4525 if ( stream_.doConvertBuffer[mode] )
\r
4526 setConvertInfo( mode, 0 );
\r
4528 // Allocate necessary internal buffers
\r
4529 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4531 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4532 if ( !stream_.userBuffer[mode] ) {
\r
4533 errorType = RtAudioError::MEMORY_ERROR;
\r
4534 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4538 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4539 stream_.callbackInfo.priority = 15;
\r
4541 stream_.callbackInfo.priority = 0;
\r
4543 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4544 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4546 methodResult = SUCCESS;
\r
4550 SAFE_RELEASE( captureDevices );
\r
4551 SAFE_RELEASE( renderDevices );
\r
4552 SAFE_RELEASE( devicePtr );
\r
4553 CoTaskMemFree( deviceFormat );
\r
4555 // if method failed, close the stream
\r
4556 if ( methodResult == FAILURE )
\r
4559 if ( !errorText_.empty() )
\r
4560 error( errorType );
\r
4561 return methodResult;
\r
4564 //=============================================================================
\r
4566 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4569 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4574 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4577 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4582 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4585 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4590 //-----------------------------------------------------------------------------
\r
4592 void RtApiWasapi::wasapiThread()
\r
4594 // as this is a new thread, we must CoInitialize it
\r
4595 CoInitialize( NULL );
\r
4599 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4600 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4601 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4602 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4603 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4604 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4606 WAVEFORMATEX* captureFormat = NULL;
\r
4607 WAVEFORMATEX* renderFormat = NULL;
\r
4608 float captureSrRatio = 0.0f;
\r
4609 float renderSrRatio = 0.0f;
\r
4610 WasapiBuffer captureBuffer;
\r
4611 WasapiBuffer renderBuffer;
\r
4613 // declare local stream variables
\r
4614 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4615 BYTE* streamBuffer = NULL;
\r
4616 unsigned long captureFlags = 0;
\r
4617 unsigned int bufferFrameCount = 0;
\r
4618 unsigned int numFramesPadding = 0;
\r
4619 unsigned int convBufferSize = 0;
\r
4620 bool callbackPushed = false;
\r
4621 bool callbackPulled = false;
\r
4622 bool callbackStopped = false;
\r
4623 int callbackResult = 0;
\r
4625 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4626 char* convBuffer = NULL;
\r
4627 unsigned int convBuffSize = 0;
\r
4628 unsigned int deviceBuffSize = 0;
\r
4630 errorText_.clear();
\r
4631 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4633 // Attempt to assign "Pro Audio" characteristic to thread
\r
4634 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4636 DWORD taskIndex = 0;
\r
4637 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4638 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4639 FreeLibrary( AvrtDll );
\r
4642 // start capture stream if applicable
\r
4643 if ( captureAudioClient ) {
\r
4644 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4645 if ( FAILED( hr ) ) {
\r
4646 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4650 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4652 // initialize capture stream according to desire buffer size
\r
4653 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4654 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4656 if ( !captureClient ) {
\r
4657 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4658 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4659 desiredBufferPeriod,
\r
4660 desiredBufferPeriod,
\r
4663 if ( FAILED( hr ) ) {
\r
4664 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4668 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4669 ( void** ) &captureClient );
\r
4670 if ( FAILED( hr ) ) {
\r
4671 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4675 // configure captureEvent to trigger on every available capture buffer
\r
4676 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4677 if ( !captureEvent ) {
\r
4678 errorType = RtAudioError::SYSTEM_ERROR;
\r
4679 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4683 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4684 if ( FAILED( hr ) ) {
\r
4685 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4689 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4690 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4693 unsigned int inBufferSize = 0;
\r
4694 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4695 if ( FAILED( hr ) ) {
\r
4696 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4700 // scale outBufferSize according to stream->user sample rate ratio
\r
4701 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4702 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4704 // set captureBuffer size
\r
4705 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4707 // reset the capture stream
\r
4708 hr = captureAudioClient->Reset();
\r
4709 if ( FAILED( hr ) ) {
\r
4710 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4714 // start the capture stream
\r
4715 hr = captureAudioClient->Start();
\r
4716 if ( FAILED( hr ) ) {
\r
4717 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4722 // start render stream if applicable
\r
4723 if ( renderAudioClient ) {
\r
4724 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4725 if ( FAILED( hr ) ) {
\r
4726 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4730 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4732 // initialize render stream according to desire buffer size
\r
4733 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4734 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4736 if ( !renderClient ) {
\r
4737 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4738 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4739 desiredBufferPeriod,
\r
4740 desiredBufferPeriod,
\r
4743 if ( FAILED( hr ) ) {
\r
4744 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4748 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4749 ( void** ) &renderClient );
\r
4750 if ( FAILED( hr ) ) {
\r
4751 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4755 // configure renderEvent to trigger on every available render buffer
\r
4756 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4757 if ( !renderEvent ) {
\r
4758 errorType = RtAudioError::SYSTEM_ERROR;
\r
4759 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4763 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4764 if ( FAILED( hr ) ) {
\r
4765 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4769 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4770 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4773 unsigned int outBufferSize = 0;
\r
4774 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4775 if ( FAILED( hr ) ) {
\r
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4780 // scale inBufferSize according to user->stream sample rate ratio
\r
4781 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4782 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4784 // set renderBuffer size
\r
4785 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4787 // reset the render stream
\r
4788 hr = renderAudioClient->Reset();
\r
4789 if ( FAILED( hr ) ) {
\r
4790 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4794 // start the render stream
\r
4795 hr = renderAudioClient->Start();
\r
4796 if ( FAILED( hr ) ) {
\r
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4802 if ( stream_.mode == INPUT ) {
\r
4803 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4804 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4806 else if ( stream_.mode == OUTPUT ) {
\r
4807 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4808 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4810 else if ( stream_.mode == DUPLEX ) {
\r
4811 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4812 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4813 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4814 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4817 convBuffer = ( char* ) malloc( convBuffSize );
\r
4818 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4819 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4820 errorType = RtAudioError::MEMORY_ERROR;
\r
4821 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4825 // stream process loop
\r
4826 while ( stream_.state != STREAM_STOPPING ) {
\r
4827 if ( !callbackPulled ) {
\r
4830 // 1. Pull callback buffer from inputBuffer
\r
4831 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4832 // Convert callback buffer to user format
\r
4834 if ( captureAudioClient ) {
\r
4835 // Pull callback buffer from inputBuffer
\r
4836 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4837 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4838 stream_.deviceFormat[INPUT] );
\r
4840 if ( callbackPulled ) {
\r
4841 // Convert callback buffer to user sample rate
\r
4842 convertBufferWasapi( stream_.deviceBuffer,
\r
4844 stream_.nDeviceChannels[INPUT],
\r
4845 captureFormat->nSamplesPerSec,
\r
4846 stream_.sampleRate,
\r
4847 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4849 stream_.deviceFormat[INPUT] );
\r
4851 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4852 // Convert callback buffer to user format
\r
4853 convertBuffer( stream_.userBuffer[INPUT],
\r
4854 stream_.deviceBuffer,
\r
4855 stream_.convertInfo[INPUT] );
\r
4858 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4859 memcpy( stream_.userBuffer[INPUT],
\r
4860 stream_.deviceBuffer,
\r
4861 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4866 // if there is no capture stream, set callbackPulled flag
\r
4867 callbackPulled = true;
\r
4870 // Execute Callback
\r
4871 // ================
\r
4872 // 1. Execute user callback method
\r
4873 // 2. Handle return value from callback
\r
4875 // if callback has not requested the stream to stop
\r
4876 if ( callbackPulled && !callbackStopped ) {
\r
4877 // Execute user callback method
\r
4878 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4879 stream_.userBuffer[INPUT],
\r
4880 stream_.bufferSize,
\r
4882 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4883 stream_.callbackInfo.userData );
\r
4885 // Handle return value from callback
\r
4886 if ( callbackResult == 1 ) {
\r
4887 // instantiate a thread to stop this thread
\r
4888 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4889 if ( !threadHandle ) {
\r
4890 errorType = RtAudioError::THREAD_ERROR;
\r
4891 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4894 else if ( !CloseHandle( threadHandle ) ) {
\r
4895 errorType = RtAudioError::THREAD_ERROR;
\r
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4900 callbackStopped = true;
\r
4902 else if ( callbackResult == 2 ) {
\r
4903 // instantiate a thread to stop this thread
\r
4904 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4905 if ( !threadHandle ) {
\r
4906 errorType = RtAudioError::THREAD_ERROR;
\r
4907 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4910 else if ( !CloseHandle( threadHandle ) ) {
\r
4911 errorType = RtAudioError::THREAD_ERROR;
\r
4912 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4916 callbackStopped = true;
\r
4921 // Callback Output
\r
4922 // ===============
\r
4923 // 1. Convert callback buffer to stream format
\r
4924 // 2. Convert callback buffer to stream sample rate and channel count
\r
4925 // 3. Push callback buffer into outputBuffer
\r
4927 if ( renderAudioClient && callbackPulled ) {
\r
4928 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4929 // Convert callback buffer to stream format
\r
4930 convertBuffer( stream_.deviceBuffer,
\r
4931 stream_.userBuffer[OUTPUT],
\r
4932 stream_.convertInfo[OUTPUT] );
\r
4936 // Convert callback buffer to stream sample rate
\r
4937 convertBufferWasapi( convBuffer,
\r
4938 stream_.deviceBuffer,
\r
4939 stream_.nDeviceChannels[OUTPUT],
\r
4940 stream_.sampleRate,
\r
4941 renderFormat->nSamplesPerSec,
\r
4942 stream_.bufferSize,
\r
4944 stream_.deviceFormat[OUTPUT] );
\r
4946 // Push callback buffer into outputBuffer
\r
4947 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4948 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4949 stream_.deviceFormat[OUTPUT] );
\r
4952 // if there is no render stream, set callbackPushed flag
\r
4953 callbackPushed = true;
\r
4958 // 1. Get capture buffer from stream
\r
4959 // 2. Push capture buffer into inputBuffer
\r
4960 // 3. If 2. was successful: Release capture buffer
\r
4962 if ( captureAudioClient ) {
\r
4963 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4964 if ( !callbackPulled ) {
\r
4965 WaitForSingleObject( captureEvent, INFINITE );
\r
4968 // Get capture buffer from stream
\r
4969 hr = captureClient->GetBuffer( &streamBuffer,
\r
4970 &bufferFrameCount,
\r
4971 &captureFlags, NULL, NULL );
\r
4972 if ( FAILED( hr ) ) {
\r
4973 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4977 if ( bufferFrameCount != 0 ) {
\r
4978 // Push capture buffer into inputBuffer
\r
4979 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4980 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4981 stream_.deviceFormat[INPUT] ) )
\r
4983 // Release capture buffer
\r
4984 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4985 if ( FAILED( hr ) ) {
\r
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4992 // Inform WASAPI that capture was unsuccessful
\r
4993 hr = captureClient->ReleaseBuffer( 0 );
\r
4994 if ( FAILED( hr ) ) {
\r
4995 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5002 // Inform WASAPI that capture was unsuccessful
\r
5003 hr = captureClient->ReleaseBuffer( 0 );
\r
5004 if ( FAILED( hr ) ) {
\r
5005 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5013 // 1. Get render buffer from stream
\r
5014 // 2. Pull next buffer from outputBuffer
\r
5015 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5016 // Release render buffer
\r
5018 if ( renderAudioClient ) {
\r
5019 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5020 if ( callbackPulled && !callbackPushed ) {
\r
5021 WaitForSingleObject( renderEvent, INFINITE );
\r
5024 // Get render buffer from stream
\r
5025 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5026 if ( FAILED( hr ) ) {
\r
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5031 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5032 if ( FAILED( hr ) ) {
\r
5033 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5037 bufferFrameCount -= numFramesPadding;
\r
5039 if ( bufferFrameCount != 0 ) {
\r
5040 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5041 if ( FAILED( hr ) ) {
\r
5042 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5046 // Pull next buffer from outputBuffer
\r
5047 // Fill render buffer with next buffer
\r
5048 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5049 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5050 stream_.deviceFormat[OUTPUT] ) )
\r
5052 // Release render buffer
\r
5053 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5054 if ( FAILED( hr ) ) {
\r
5055 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5061 // Inform WASAPI that render was unsuccessful
\r
5062 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5063 if ( FAILED( hr ) ) {
\r
5064 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5071 // Inform WASAPI that render was unsuccessful
\r
5072 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5073 if ( FAILED( hr ) ) {
\r
5074 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5080 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5081 if ( callbackPushed ) {
\r
5082 callbackPulled = false;
\r
5085 // tick stream time
\r
5086 RtApi::tickStreamTime();
\r
5091 CoTaskMemFree( captureFormat );
\r
5092 CoTaskMemFree( renderFormat );
\r
5094 free ( convBuffer );
\r
5098 // update stream state
\r
5099 stream_.state = STREAM_STOPPED;
\r
5101 if ( errorText_.empty() )
\r
5104 error( errorType );
\r
5107 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5111 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5113 // Modified by Robin Davies, October 2005
\r
5114 // - Improvements to DirectX pointer chasing.
\r
5115 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5116 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5117 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5118 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5120 #include <dsound.h>
\r
5121 #include <assert.h>
\r
5122 #include <algorithm>
\r
5124 #if defined(__MINGW32__)
\r
5125 // missing from latest mingw winapi
\r
5126 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5127 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5128 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5129 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5132 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5134 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5135 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5138 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5140 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5141 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5142 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5143 return pointer >= earlierPointer && pointer < laterPointer;
\r
5146 // A structure to hold various information related to the DirectSound
\r
5147 // API implementation.
\r
5149 unsigned int drainCounter; // Tracks callback counts when draining
\r
5150 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5154 UINT bufferPointer[2];
\r
5155 DWORD dsBufferSize[2];
\r
5156 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5160 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5163 // Declarations for utility functions, callbacks, and structures
\r
5164 // specific to the DirectSound implementation.
\r
5165 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5166 LPCTSTR description,
\r
5168 LPVOID lpContext );
\r
5170 static const char* getErrorString( int code );
\r
5172 static unsigned __stdcall callbackHandler( void *ptr );
\r
5181 : found(false) { validId[0] = false; validId[1] = false; }
\r
5184 struct DsProbeData {
\r
5186 std::vector<struct DsDevice>* dsDevices;
\r
5189 RtApiDs :: RtApiDs()
\r
5191 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5192 // accept whatever the mainline chose for a threading model.
\r
5193 coInitialized_ = false;
\r
5194 HRESULT hr = CoInitialize( NULL );
\r
5195 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5198 RtApiDs :: ~RtApiDs()
\r
5200 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5201 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5204 // The DirectSound default output is always the first device.
\r
5205 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5210 // The DirectSound default input is always the first input device,
\r
5211 // which is the first capture device enumerated.
\r
5212 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5217 unsigned int RtApiDs :: getDeviceCount( void )
\r
5219 // Set query flag for previously found devices to false, so that we
\r
5220 // can check for any devices that have disappeared.
\r
5221 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5222 dsDevices[i].found = false;
\r
5224 // Query DirectSound devices.
\r
5225 struct DsProbeData probeInfo;
\r
5226 probeInfo.isInput = false;
\r
5227 probeInfo.dsDevices = &dsDevices;
\r
5228 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5229 if ( FAILED( result ) ) {
\r
5230 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5231 errorText_ = errorStream_.str();
\r
5232 error( RtAudioError::WARNING );
\r
5235 // Query DirectSoundCapture devices.
\r
5236 probeInfo.isInput = true;
\r
5237 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5238 if ( FAILED( result ) ) {
\r
5239 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5240 errorText_ = errorStream_.str();
\r
5241 error( RtAudioError::WARNING );
\r
5244 // Clean out any devices that may have disappeared.
\r
5245 std::vector< int > indices;
\r
5246 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5247 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5248 //unsigned int nErased = 0;
\r
5249 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5250 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5251 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5253 return static_cast<unsigned int>(dsDevices.size());
\r
5256 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5258 RtAudio::DeviceInfo info;
\r
5259 info.probed = false;
\r
5261 if ( dsDevices.size() == 0 ) {
\r
5262 // Force a query of all devices
\r
5264 if ( dsDevices.size() == 0 ) {
\r
5265 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5266 error( RtAudioError::INVALID_USE );
\r
5271 if ( device >= dsDevices.size() ) {
\r
5272 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5273 error( RtAudioError::INVALID_USE );
\r
5278 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5280 LPDIRECTSOUND output;
\r
5282 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5283 if ( FAILED( result ) ) {
\r
5284 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5285 errorText_ = errorStream_.str();
\r
5286 error( RtAudioError::WARNING );
\r
5290 outCaps.dwSize = sizeof( outCaps );
\r
5291 result = output->GetCaps( &outCaps );
\r
5292 if ( FAILED( result ) ) {
\r
5293 output->Release();
\r
5294 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5295 errorText_ = errorStream_.str();
\r
5296 error( RtAudioError::WARNING );
\r
5300 // Get output channel information.
\r
5301 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5303 // Get sample rate information.
\r
5304 info.sampleRates.clear();
\r
5305 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5306 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5307 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5308 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5311 // Get format information.
\r
5312 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5313 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5315 output->Release();
\r
5317 if ( getDefaultOutputDevice() == device )
\r
5318 info.isDefaultOutput = true;
\r
5320 if ( dsDevices[ device ].validId[1] == false ) {
\r
5321 info.name = dsDevices[ device ].name;
\r
5322 info.probed = true;
\r
5328 LPDIRECTSOUNDCAPTURE input;
\r
5329 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5330 if ( FAILED( result ) ) {
\r
5331 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5332 errorText_ = errorStream_.str();
\r
5333 error( RtAudioError::WARNING );
\r
5338 inCaps.dwSize = sizeof( inCaps );
\r
5339 result = input->GetCaps( &inCaps );
\r
5340 if ( FAILED( result ) ) {
\r
5342 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5343 errorText_ = errorStream_.str();
\r
5344 error( RtAudioError::WARNING );
\r
5348 // Get input channel information.
\r
5349 info.inputChannels = inCaps.dwChannels;
\r
5351 // Get sample rate and format information.
\r
5352 std::vector<unsigned int> rates;
\r
5353 if ( inCaps.dwChannels >= 2 ) {
\r
5354 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5355 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5356 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5363 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5364 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5369 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5376 else if ( inCaps.dwChannels == 1 ) {
\r
5377 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5386 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5392 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5399 else info.inputChannels = 0; // technically, this would be an error
\r
5403 if ( info.inputChannels == 0 ) return info;
\r
5405 // Copy the supported rates to the info structure but avoid duplication.
\r
5407 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5409 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5410 if ( rates[i] == info.sampleRates[j] ) {
\r
5415 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5417 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5419 // If device opens for both playback and capture, we determine the channels.
\r
5420 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5421 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5423 if ( device == 0 ) info.isDefaultInput = true;
\r
5425 // Copy name and return.
\r
5426 info.name = dsDevices[ device ].name;
\r
5427 info.probed = true;
\r
5431 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5432 unsigned int firstChannel, unsigned int sampleRate,
\r
5433 RtAudioFormat format, unsigned int *bufferSize,
\r
5434 RtAudio::StreamOptions *options )
\r
5436 if ( channels + firstChannel > 2 ) {
\r
5437 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5441 size_t nDevices = dsDevices.size();
\r
5442 if ( nDevices == 0 ) {
\r
5443 // This should not happen because a check is made before this function is called.
\r
5444 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5448 if ( device >= nDevices ) {
\r
5449 // This should not happen because a check is made before this function is called.
\r
5450 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5454 if ( mode == OUTPUT ) {
\r
5455 if ( dsDevices[ device ].validId[0] == false ) {
\r
5456 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5457 errorText_ = errorStream_.str();
\r
5461 else { // mode == INPUT
\r
5462 if ( dsDevices[ device ].validId[1] == false ) {
\r
5463 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5464 errorText_ = errorStream_.str();
\r
5469 // According to a note in PortAudio, using GetDesktopWindow()
\r
5470 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5471 // that occur when the application's window is not the foreground
\r
5472 // window. Also, if the application window closes before the
\r
5473 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5474 // problems when using GetDesktopWindow() but it seems fine now
\r
5475 // (January 2010). I'll leave it commented here.
\r
5476 // HWND hWnd = GetForegroundWindow();
\r
5477 HWND hWnd = GetDesktopWindow();
\r
5479 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5480 // two. This is a judgement call and a value of two is probably too
\r
5481 // low for capture, but it should work for playback.
\r
5483 if ( options ) nBuffers = options->numberOfBuffers;
\r
5484 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5485 if ( nBuffers < 2 ) nBuffers = 3;
\r
5487 // Check the lower range of the user-specified buffer size and set
\r
5488 // (arbitrarily) to a lower bound of 32.
\r
5489 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5491 // Create the wave format structure. The data format setting will
\r
5492 // be determined later.
\r
5493 WAVEFORMATEX waveFormat;
\r
5494 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5495 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5496 waveFormat.nChannels = channels + firstChannel;
\r
5497 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5499 // Determine the device buffer size. By default, we'll use the value
\r
5500 // defined above (32K), but we will grow it to make allowances for
\r
5501 // very large software buffer sizes.
\r
5502 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5503 DWORD dsPointerLeadTime = 0;
\r
5505 void *ohandle = 0, *bhandle = 0;
\r
5507 if ( mode == OUTPUT ) {
\r
5509 LPDIRECTSOUND output;
\r
5510 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5511 if ( FAILED( result ) ) {
\r
5512 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5513 errorText_ = errorStream_.str();
\r
5518 outCaps.dwSize = sizeof( outCaps );
\r
5519 result = output->GetCaps( &outCaps );
\r
5520 if ( FAILED( result ) ) {
\r
5521 output->Release();
\r
5522 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5523 errorText_ = errorStream_.str();
\r
5527 // Check channel information.
\r
5528 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5529 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5530 errorText_ = errorStream_.str();
\r
5534 // Check format information. Use 16-bit format unless not
\r
5535 // supported or user requests 8-bit.
\r
5536 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5537 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5538 waveFormat.wBitsPerSample = 16;
\r
5539 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5542 waveFormat.wBitsPerSample = 8;
\r
5543 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5545 stream_.userFormat = format;
\r
5547 // Update wave format structure and buffer information.
\r
5548 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5549 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5550 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5552 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5553 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5554 dsBufferSize *= 2;
\r
5556 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5557 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5558 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5559 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5560 if ( FAILED( result ) ) {
\r
5561 output->Release();
\r
5562 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5563 errorText_ = errorStream_.str();
\r
5567 // Even though we will write to the secondary buffer, we need to
\r
5568 // access the primary buffer to set the correct output format
\r
5569 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5570 // buffer description.
\r
5571 DSBUFFERDESC bufferDescription;
\r
5572 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5573 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5574 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5576 // Obtain the primary buffer
\r
5577 LPDIRECTSOUNDBUFFER buffer;
\r
5578 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5579 if ( FAILED( result ) ) {
\r
5580 output->Release();
\r
5581 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5582 errorText_ = errorStream_.str();
\r
5586 // Set the primary DS buffer sound format.
\r
5587 result = buffer->SetFormat( &waveFormat );
\r
5588 if ( FAILED( result ) ) {
\r
5589 output->Release();
\r
5590 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5591 errorText_ = errorStream_.str();
\r
5595 // Setup the secondary DS buffer description.
\r
5596 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5597 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5598 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5599 DSBCAPS_GLOBALFOCUS |
\r
5600 DSBCAPS_GETCURRENTPOSITION2 |
\r
5601 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5602 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5603 bufferDescription.lpwfxFormat = &waveFormat;
\r
5605 // Try to create the secondary DS buffer. If that doesn't work,
\r
5606 // try to use software mixing. Otherwise, there's a problem.
\r
5607 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5608 if ( FAILED( result ) ) {
\r
5609 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5610 DSBCAPS_GLOBALFOCUS |
\r
5611 DSBCAPS_GETCURRENTPOSITION2 |
\r
5612 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5613 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5614 if ( FAILED( result ) ) {
\r
5615 output->Release();
\r
5616 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5617 errorText_ = errorStream_.str();
\r
5622 // Get the buffer size ... might be different from what we specified.
\r
5624 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5625 result = buffer->GetCaps( &dsbcaps );
\r
5626 if ( FAILED( result ) ) {
\r
5627 output->Release();
\r
5628 buffer->Release();
\r
5629 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5630 errorText_ = errorStream_.str();
\r
5634 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5636 // Lock the DS buffer
\r
5639 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5640 if ( FAILED( result ) ) {
\r
5641 output->Release();
\r
5642 buffer->Release();
\r
5643 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5644 errorText_ = errorStream_.str();
\r
5648 // Zero the DS buffer
\r
5649 ZeroMemory( audioPtr, dataLen );
\r
5651 // Unlock the DS buffer
\r
5652 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5653 if ( FAILED( result ) ) {
\r
5654 output->Release();
\r
5655 buffer->Release();
\r
5656 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5657 errorText_ = errorStream_.str();
\r
5661 ohandle = (void *) output;
\r
5662 bhandle = (void *) buffer;
\r
5665 if ( mode == INPUT ) {
\r
5667 LPDIRECTSOUNDCAPTURE input;
\r
5668 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5669 if ( FAILED( result ) ) {
\r
5670 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5671 errorText_ = errorStream_.str();
\r
5676 inCaps.dwSize = sizeof( inCaps );
\r
5677 result = input->GetCaps( &inCaps );
\r
5678 if ( FAILED( result ) ) {
\r
5680 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5681 errorText_ = errorStream_.str();
\r
5685 // Check channel information.
\r
5686 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5687 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5691 // Check format information. Use 16-bit format unless user
\r
5692 // requests 8-bit.
\r
5693 DWORD deviceFormats;
\r
5694 if ( channels + firstChannel == 2 ) {
\r
5695 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5696 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5697 waveFormat.wBitsPerSample = 8;
\r
5698 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5700 else { // assume 16-bit is supported
\r
5701 waveFormat.wBitsPerSample = 16;
\r
5702 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5705 else { // channel == 1
\r
5706 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5707 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5708 waveFormat.wBitsPerSample = 8;
\r
5709 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5711 else { // assume 16-bit is supported
\r
5712 waveFormat.wBitsPerSample = 16;
\r
5713 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5716 stream_.userFormat = format;
\r
5718 // Update wave format structure and buffer information.
\r
5719 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5720 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5721 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5723 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5724 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5725 dsBufferSize *= 2;
\r
5727 // Setup the secondary DS buffer description.
\r
5728 DSCBUFFERDESC bufferDescription;
\r
5729 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5730 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5731 bufferDescription.dwFlags = 0;
\r
5732 bufferDescription.dwReserved = 0;
\r
5733 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5734 bufferDescription.lpwfxFormat = &waveFormat;
\r
5736 // Create the capture buffer.
\r
5737 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5738 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5739 if ( FAILED( result ) ) {
\r
5741 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5742 errorText_ = errorStream_.str();
\r
5746 // Get the buffer size ... might be different from what we specified.
\r
5747 DSCBCAPS dscbcaps;
\r
5748 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5749 result = buffer->GetCaps( &dscbcaps );
\r
5750 if ( FAILED( result ) ) {
\r
5752 buffer->Release();
\r
5753 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5754 errorText_ = errorStream_.str();
\r
5758 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5760 // NOTE: We could have a problem here if this is a duplex stream
\r
5761 // and the play and capture hardware buffer sizes are different
\r
5762 // (I'm actually not sure if that is a problem or not).
\r
5763 // Currently, we are not verifying that.
\r
5765 // Lock the capture buffer
\r
5768 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5769 if ( FAILED( result ) ) {
\r
5771 buffer->Release();
\r
5772 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5773 errorText_ = errorStream_.str();
\r
5777 // Zero the buffer
\r
5778 ZeroMemory( audioPtr, dataLen );
\r
5780 // Unlock the buffer
\r
5781 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5782 if ( FAILED( result ) ) {
\r
5784 buffer->Release();
\r
5785 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5786 errorText_ = errorStream_.str();
\r
5790 ohandle = (void *) input;
\r
5791 bhandle = (void *) buffer;
\r
5794 // Set various stream parameters
\r
5795 DsHandle *handle = 0;
\r
5796 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5797 stream_.nUserChannels[mode] = channels;
\r
5798 stream_.bufferSize = *bufferSize;
\r
5799 stream_.channelOffset[mode] = firstChannel;
\r
5800 stream_.deviceInterleaved[mode] = true;
\r
5801 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5802 else stream_.userInterleaved = true;
\r
5804 // Set flag for buffer conversion
\r
5805 stream_.doConvertBuffer[mode] = false;
\r
5806 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5807 stream_.doConvertBuffer[mode] = true;
\r
5808 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5809 stream_.doConvertBuffer[mode] = true;
\r
5810 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5811 stream_.nUserChannels[mode] > 1 )
\r
5812 stream_.doConvertBuffer[mode] = true;
\r
5814 // Allocate necessary internal buffers
\r
5815 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5816 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5817 if ( stream_.userBuffer[mode] == NULL ) {
\r
5818 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5822 if ( stream_.doConvertBuffer[mode] ) {
\r
5824 bool makeBuffer = true;
\r
5825 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5826 if ( mode == INPUT ) {
\r
5827 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5828 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5829 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5833 if ( makeBuffer ) {
\r
5834 bufferBytes *= *bufferSize;
\r
5835 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5836 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5837 if ( stream_.deviceBuffer == NULL ) {
\r
5838 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5844 // Allocate our DsHandle structures for the stream.
\r
5845 if ( stream_.apiHandle == 0 ) {
\r
5847 handle = new DsHandle;
\r
5849 catch ( std::bad_alloc& ) {
\r
5850 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5854 // Create a manual-reset event.
\r
5855 handle->condition = CreateEvent( NULL, // no security
\r
5856 TRUE, // manual-reset
\r
5857 FALSE, // non-signaled initially
\r
5858 NULL ); // unnamed
\r
5859 stream_.apiHandle = (void *) handle;
\r
5862 handle = (DsHandle *) stream_.apiHandle;
\r
5863 handle->id[mode] = ohandle;
\r
5864 handle->buffer[mode] = bhandle;
\r
5865 handle->dsBufferSize[mode] = dsBufferSize;
\r
5866 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5868 stream_.device[mode] = device;
\r
5869 stream_.state = STREAM_STOPPED;
\r
5870 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5871 // We had already set up an output stream.
\r
5872 stream_.mode = DUPLEX;
\r
5874 stream_.mode = mode;
\r
5875 stream_.nBuffers = nBuffers;
\r
5876 stream_.sampleRate = sampleRate;
\r
5878 // Setup the buffer conversion information structure.
\r
5879 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5881 // Setup the callback thread.
\r
5882 if ( stream_.callbackInfo.isRunning == false ) {
\r
5883 unsigned threadId;
\r
5884 stream_.callbackInfo.isRunning = true;
\r
5885 stream_.callbackInfo.object = (void *) this;
\r
5886 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5887 &stream_.callbackInfo, 0, &threadId );
\r
5888 if ( stream_.callbackInfo.thread == 0 ) {
\r
5889 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5893 // Boost DS thread priority
\r
5894 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5900 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5901 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5902 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5903 if ( buffer ) buffer->Release();
\r
5904 object->Release();
\r
5906 if ( handle->buffer[1] ) {
\r
5907 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5908 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5909 if ( buffer ) buffer->Release();
\r
5910 object->Release();
\r
5912 CloseHandle( handle->condition );
\r
5914 stream_.apiHandle = 0;
\r
5917 for ( int i=0; i<2; i++ ) {
\r
5918 if ( stream_.userBuffer[i] ) {
\r
5919 free( stream_.userBuffer[i] );
\r
5920 stream_.userBuffer[i] = 0;
\r
5924 if ( stream_.deviceBuffer ) {
\r
5925 free( stream_.deviceBuffer );
\r
5926 stream_.deviceBuffer = 0;
\r
5929 stream_.state = STREAM_CLOSED;
\r
5933 void RtApiDs :: closeStream()
\r
5935 if ( stream_.state == STREAM_CLOSED ) {
\r
5936 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5937 error( RtAudioError::WARNING );
\r
5941 // Stop the callback thread.
\r
5942 stream_.callbackInfo.isRunning = false;
\r
5943 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5944 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5946 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5948 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5949 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5950 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5953 buffer->Release();
\r
5955 object->Release();
\r
5957 if ( handle->buffer[1] ) {
\r
5958 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5959 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5962 buffer->Release();
\r
5964 object->Release();
\r
5966 CloseHandle( handle->condition );
\r
5968 stream_.apiHandle = 0;
\r
5971 for ( int i=0; i<2; i++ ) {
\r
5972 if ( stream_.userBuffer[i] ) {
\r
5973 free( stream_.userBuffer[i] );
\r
5974 stream_.userBuffer[i] = 0;
\r
5978 if ( stream_.deviceBuffer ) {
\r
5979 free( stream_.deviceBuffer );
\r
5980 stream_.deviceBuffer = 0;
\r
5983 stream_.mode = UNINITIALIZED;
\r
5984 stream_.state = STREAM_CLOSED;
\r
5987 void RtApiDs :: startStream()
\r
5990 if ( stream_.state == STREAM_RUNNING ) {
\r
5991 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5992 error( RtAudioError::WARNING );
\r
5996 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5998 // Increase scheduler frequency on lesser windows (a side-effect of
\r
5999 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6000 // this is already in effect.
\r
6001 timeBeginPeriod( 1 );
\r
6003 buffersRolling = false;
\r
6004 duplexPrerollBytes = 0;
\r
6006 if ( stream_.mode == DUPLEX ) {
\r
6007 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6008 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6011 HRESULT result = 0;
\r
6012 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6014 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6015 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6016 if ( FAILED( result ) ) {
\r
6017 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6018 errorText_ = errorStream_.str();
\r
6023 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6025 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6026 result = buffer->Start( DSCBSTART_LOOPING );
\r
6027 if ( FAILED( result ) ) {
\r
6028 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6029 errorText_ = errorStream_.str();
\r
6034 handle->drainCounter = 0;
\r
6035 handle->internalDrain = false;
\r
6036 ResetEvent( handle->condition );
\r
6037 stream_.state = STREAM_RUNNING;
\r
6040 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6043 void RtApiDs :: stopStream()
\r
6046 if ( stream_.state == STREAM_STOPPED ) {
\r
6047 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6048 error( RtAudioError::WARNING );
\r
6052 HRESULT result = 0;
\r
6055 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6056 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6057 if ( handle->drainCounter == 0 ) {
\r
6058 handle->drainCounter = 2;
\r
6059 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6062 stream_.state = STREAM_STOPPED;
\r
6064 MUTEX_LOCK( &stream_.mutex );
\r
6066 // Stop the buffer and clear memory
\r
6067 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6068 result = buffer->Stop();
\r
6069 if ( FAILED( result ) ) {
\r
6070 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6071 errorText_ = errorStream_.str();
\r
6075 // Lock the buffer and clear it so that if we start to play again,
\r
6076 // we won't have old data playing.
\r
6077 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6078 if ( FAILED( result ) ) {
\r
6079 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6080 errorText_ = errorStream_.str();
\r
6084 // Zero the DS buffer
\r
6085 ZeroMemory( audioPtr, dataLen );
\r
6087 // Unlock the DS buffer
\r
6088 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6089 if ( FAILED( result ) ) {
\r
6090 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6091 errorText_ = errorStream_.str();
\r
6095 // If we start playing again, we must begin at beginning of buffer.
\r
6096 handle->bufferPointer[0] = 0;
\r
6099 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6100 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6104 stream_.state = STREAM_STOPPED;
\r
6106 if ( stream_.mode != DUPLEX )
\r
6107 MUTEX_LOCK( &stream_.mutex );
\r
6109 result = buffer->Stop();
\r
6110 if ( FAILED( result ) ) {
\r
6111 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6112 errorText_ = errorStream_.str();
\r
6116 // Lock the buffer and clear it so that if we start to play again,
\r
6117 // we won't have old data playing.
\r
6118 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6119 if ( FAILED( result ) ) {
\r
6120 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6121 errorText_ = errorStream_.str();
\r
6125 // Zero the DS buffer
\r
6126 ZeroMemory( audioPtr, dataLen );
\r
6128 // Unlock the DS buffer
\r
6129 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6130 if ( FAILED( result ) ) {
\r
6131 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6132 errorText_ = errorStream_.str();
\r
6136 // If we start recording again, we must begin at beginning of buffer.
\r
6137 handle->bufferPointer[1] = 0;
\r
6141 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6142 MUTEX_UNLOCK( &stream_.mutex );
\r
6144 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6147 void RtApiDs :: abortStream()
\r
6150 if ( stream_.state == STREAM_STOPPED ) {
\r
6151 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6152 error( RtAudioError::WARNING );
\r
6156 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6157 handle->drainCounter = 2;
\r
6162 void RtApiDs :: callbackEvent()
\r
6164 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6165 Sleep( 50 ); // sleep 50 milliseconds
\r
6169 if ( stream_.state == STREAM_CLOSED ) {
\r
6170 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6171 error( RtAudioError::WARNING );
\r
6175 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6176 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6178 // Check if we were draining the stream and signal is finished.
\r
6179 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6181 stream_.state = STREAM_STOPPING;
\r
6182 if ( handle->internalDrain == false )
\r
6183 SetEvent( handle->condition );
\r
6189 // Invoke user callback to get fresh output data UNLESS we are
\r
6190 // draining stream.
\r
6191 if ( handle->drainCounter == 0 ) {
\r
6192 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6193 double streamTime = getStreamTime();
\r
6194 RtAudioStreamStatus status = 0;
\r
6195 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6196 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6197 handle->xrun[0] = false;
\r
6199 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6200 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6201 handle->xrun[1] = false;
\r
6203 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6204 stream_.bufferSize, streamTime, status, info->userData );
\r
6205 if ( cbReturnValue == 2 ) {
\r
6206 stream_.state = STREAM_STOPPING;
\r
6207 handle->drainCounter = 2;
\r
6211 else if ( cbReturnValue == 1 ) {
\r
6212 handle->drainCounter = 1;
\r
6213 handle->internalDrain = true;
\r
6218 DWORD currentWritePointer, safeWritePointer;
\r
6219 DWORD currentReadPointer, safeReadPointer;
\r
6220 UINT nextWritePointer;
\r
6222 LPVOID buffer1 = NULL;
\r
6223 LPVOID buffer2 = NULL;
\r
6224 DWORD bufferSize1 = 0;
\r
6225 DWORD bufferSize2 = 0;
\r
6230 MUTEX_LOCK( &stream_.mutex );
\r
6231 if ( stream_.state == STREAM_STOPPED ) {
\r
6232 MUTEX_UNLOCK( &stream_.mutex );
\r
6236 if ( buffersRolling == false ) {
\r
6237 if ( stream_.mode == DUPLEX ) {
\r
6238 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6240 // It takes a while for the devices to get rolling. As a result,
\r
6241 // there's no guarantee that the capture and write device pointers
\r
6242 // will move in lockstep. Wait here for both devices to start
\r
6243 // rolling, and then set our buffer pointers accordingly.
\r
6244 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6245 // bytes later than the write buffer.
\r
6247 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6248 // take place between the two GetCurrentPosition calls... but I'm
\r
6249 // really not sure how to solve the problem. Temporarily boost to
\r
6250 // Realtime priority, maybe; but I'm not sure what priority the
\r
6251 // DirectSound service threads run at. We *should* be roughly
\r
6252 // within a ms or so of correct.
\r
6254 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6255 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6257 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6259 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6260 if ( FAILED( result ) ) {
\r
6261 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6262 errorText_ = errorStream_.str();
\r
6263 error( RtAudioError::SYSTEM_ERROR );
\r
6266 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6267 if ( FAILED( result ) ) {
\r
6268 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6269 errorText_ = errorStream_.str();
\r
6270 error( RtAudioError::SYSTEM_ERROR );
\r
6274 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6275 if ( FAILED( result ) ) {
\r
6276 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6277 errorText_ = errorStream_.str();
\r
6278 error( RtAudioError::SYSTEM_ERROR );
\r
6281 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6282 if ( FAILED( result ) ) {
\r
6283 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6284 errorText_ = errorStream_.str();
\r
6285 error( RtAudioError::SYSTEM_ERROR );
\r
6288 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6292 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6294 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6295 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6296 handle->bufferPointer[1] = safeReadPointer;
\r
6298 else if ( stream_.mode == OUTPUT ) {
\r
6300 // Set the proper nextWritePosition after initial startup.
\r
6301 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6302 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6303 if ( FAILED( result ) ) {
\r
6304 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6305 errorText_ = errorStream_.str();
\r
6306 error( RtAudioError::SYSTEM_ERROR );
\r
6309 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6310 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6313 buffersRolling = true;
\r
6316 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6318 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6320 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6321 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6322 bufferBytes *= formatBytes( stream_.userFormat );
\r
6323 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6326 // Setup parameters and do buffer conversion if necessary.
\r
6327 if ( stream_.doConvertBuffer[0] ) {
\r
6328 buffer = stream_.deviceBuffer;
\r
6329 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6330 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6331 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6334 buffer = stream_.userBuffer[0];
\r
6335 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6336 bufferBytes *= formatBytes( stream_.userFormat );
\r
6339 // No byte swapping necessary in DirectSound implementation.
\r
6341 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6342 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6344 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6345 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6347 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6348 nextWritePointer = handle->bufferPointer[0];
\r
6350 DWORD endWrite, leadPointer;
\r
6352 // Find out where the read and "safe write" pointers are.
\r
6353 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6354 if ( FAILED( result ) ) {
\r
6355 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6356 errorText_ = errorStream_.str();
\r
6357 error( RtAudioError::SYSTEM_ERROR );
\r
6361 // We will copy our output buffer into the region between
\r
6362 // safeWritePointer and leadPointer. If leadPointer is not
\r
6363 // beyond the next endWrite position, wait until it is.
\r
6364 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6365 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6366 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6367 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6368 endWrite = nextWritePointer + bufferBytes;
\r
6370 // Check whether the entire write region is behind the play pointer.
\r
6371 if ( leadPointer >= endWrite ) break;
\r
6373 // If we are here, then we must wait until the leadPointer advances
\r
6374 // beyond the end of our next write region. We use the
\r
6375 // Sleep() function to suspend operation until that happens.
\r
6376 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6377 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6378 if ( millis < 1.0 ) millis = 1.0;
\r
6379 Sleep( (DWORD) millis );
\r
6382 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6383 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6384 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6385 handle->xrun[0] = true;
\r
6386 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6387 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6388 handle->bufferPointer[0] = nextWritePointer;
\r
6389 endWrite = nextWritePointer + bufferBytes;
\r
6392 // Lock free space in the buffer
\r
6393 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6394 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6395 if ( FAILED( result ) ) {
\r
6396 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6397 errorText_ = errorStream_.str();
\r
6398 error( RtAudioError::SYSTEM_ERROR );
\r
6402 // Copy our buffer into the DS buffer
\r
6403 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6404 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6406 // Update our buffer offset and unlock sound buffer
\r
6407 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6408 if ( FAILED( result ) ) {
\r
6409 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6410 errorText_ = errorStream_.str();
\r
6411 error( RtAudioError::SYSTEM_ERROR );
\r
6414 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6415 handle->bufferPointer[0] = nextWritePointer;
\r
6418 // Don't bother draining input
\r
6419 if ( handle->drainCounter ) {
\r
6420 handle->drainCounter++;
\r
6424 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6426 // Setup parameters.
\r
6427 if ( stream_.doConvertBuffer[1] ) {
\r
6428 buffer = stream_.deviceBuffer;
\r
6429 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6430 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6433 buffer = stream_.userBuffer[1];
\r
6434 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6435 bufferBytes *= formatBytes( stream_.userFormat );
\r
6438 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6439 long nextReadPointer = handle->bufferPointer[1];
\r
6440 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6442 // Find out where the write and "safe read" pointers are.
\r
6443 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6444 if ( FAILED( result ) ) {
\r
6445 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6446 errorText_ = errorStream_.str();
\r
6447 error( RtAudioError::SYSTEM_ERROR );
\r
6451 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6452 DWORD endRead = nextReadPointer + bufferBytes;
\r
6454 // Handling depends on whether we are INPUT or DUPLEX.
\r
6455 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6456 // then a wait here will drag the write pointers into the forbidden zone.
\r
6458 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6459 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6460 // practical way to sync up the read and write pointers reliably, given the
\r
6461 // the very complex relationship between phase and increment of the read and write
\r
6464 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6465 // provide a pre-roll period of 0.5 seconds in which we return
\r
6466 // zeros from the read buffer while the pointers sync up.
\r
6468 if ( stream_.mode == DUPLEX ) {
\r
6469 if ( safeReadPointer < endRead ) {
\r
6470 if ( duplexPrerollBytes <= 0 ) {
\r
6471 // Pre-roll time over. Be more agressive.
\r
6472 int adjustment = endRead-safeReadPointer;
\r
6474 handle->xrun[1] = true;
\r
6476 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6477 // and perform fine adjustments later.
\r
6478 // - small adjustments: back off by twice as much.
\r
6479 if ( adjustment >= 2*bufferBytes )
\r
6480 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6482 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6484 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6488 // In pre=roll time. Just do it.
\r
6489 nextReadPointer = safeReadPointer - bufferBytes;
\r
6490 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6492 endRead = nextReadPointer + bufferBytes;
\r
6495 else { // mode == INPUT
\r
6496 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6497 // See comments for playback.
\r
6498 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6499 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6500 if ( millis < 1.0 ) millis = 1.0;
\r
6501 Sleep( (DWORD) millis );
\r
6503 // Wake up and find out where we are now.
\r
6504 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6505 if ( FAILED( result ) ) {
\r
6506 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6507 errorText_ = errorStream_.str();
\r
6508 error( RtAudioError::SYSTEM_ERROR );
\r
6512 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6516 // Lock free space in the buffer
\r
6517 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6518 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6519 if ( FAILED( result ) ) {
\r
6520 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6521 errorText_ = errorStream_.str();
\r
6522 error( RtAudioError::SYSTEM_ERROR );
\r
6526 if ( duplexPrerollBytes <= 0 ) {
\r
6527 // Copy our buffer into the DS buffer
\r
6528 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6529 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6532 memset( buffer, 0, bufferSize1 );
\r
6533 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6534 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6537 // Update our buffer offset and unlock sound buffer
\r
6538 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6539 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6540 if ( FAILED( result ) ) {
\r
6541 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6542 errorText_ = errorStream_.str();
\r
6543 error( RtAudioError::SYSTEM_ERROR );
\r
6546 handle->bufferPointer[1] = nextReadPointer;
\r
6548 // No byte swapping necessary in DirectSound implementation.
\r
6550 // If necessary, convert 8-bit data from unsigned to signed.
\r
6551 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6552 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6554 // Do buffer conversion if necessary.
\r
6555 if ( stream_.doConvertBuffer[1] )
\r
6556 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6560 MUTEX_UNLOCK( &stream_.mutex );
\r
6561 RtApi::tickStreamTime();
\r
6564 // Definitions for utility functions and callbacks
\r
6565 // specific to the DirectSound implementation.
\r
6567 static unsigned __stdcall callbackHandler( void *ptr )
\r
6569 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6570 RtApiDs *object = (RtApiDs *) info->object;
\r
6571 bool* isRunning = &info->isRunning;
\r
6573 while ( *isRunning == true ) {
\r
6574 object->callbackEvent();
\r
6577 _endthreadex( 0 );
\r
6581 #include "tchar.h"
\r
6583 static std::string convertTChar( LPCTSTR name )
\r
6585 #if defined( UNICODE ) || defined( _UNICODE )
\r
6586 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6587 std::string s( length-1, '\0' );
\r
6588 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6590 std::string s( name );
\r
6596 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6597 LPCTSTR description,
\r
6598 LPCTSTR /*module*/,
\r
6599 LPVOID lpContext )
\r
6601 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6602 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6605 bool validDevice = false;
\r
6606 if ( probeInfo.isInput == true ) {
\r
6608 LPDIRECTSOUNDCAPTURE object;
\r
6610 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6611 if ( hr != DS_OK ) return TRUE;
\r
6613 caps.dwSize = sizeof(caps);
\r
6614 hr = object->GetCaps( &caps );
\r
6615 if ( hr == DS_OK ) {
\r
6616 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6617 validDevice = true;
\r
6619 object->Release();
\r
6623 LPDIRECTSOUND object;
\r
6624 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6625 if ( hr != DS_OK ) return TRUE;
\r
6627 caps.dwSize = sizeof(caps);
\r
6628 hr = object->GetCaps( &caps );
\r
6629 if ( hr == DS_OK ) {
\r
6630 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6631 validDevice = true;
\r
6633 object->Release();
\r
6636 // If good device, then save its name and guid.
\r
6637 std::string name = convertTChar( description );
\r
6638 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6639 if ( lpguid == NULL )
\r
6640 name = "Default Device";
\r
6641 if ( validDevice ) {
\r
6642 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6643 if ( dsDevices[i].name == name ) {
\r
6644 dsDevices[i].found = true;
\r
6645 if ( probeInfo.isInput ) {
\r
6646 dsDevices[i].id[1] = lpguid;
\r
6647 dsDevices[i].validId[1] = true;
\r
6650 dsDevices[i].id[0] = lpguid;
\r
6651 dsDevices[i].validId[0] = true;
\r
6658 device.name = name;
\r
6659 device.found = true;
\r
6660 if ( probeInfo.isInput ) {
\r
6661 device.id[1] = lpguid;
\r
6662 device.validId[1] = true;
\r
6665 device.id[0] = lpguid;
\r
6666 device.validId[0] = true;
\r
6668 dsDevices.push_back( device );
\r
6674 static const char* getErrorString( int code )
\r
6678 case DSERR_ALLOCATED:
\r
6679 return "Already allocated";
\r
6681 case DSERR_CONTROLUNAVAIL:
\r
6682 return "Control unavailable";
\r
6684 case DSERR_INVALIDPARAM:
\r
6685 return "Invalid parameter";
\r
6687 case DSERR_INVALIDCALL:
\r
6688 return "Invalid call";
\r
6690 case DSERR_GENERIC:
\r
6691 return "Generic error";
\r
6693 case DSERR_PRIOLEVELNEEDED:
\r
6694 return "Priority level needed";
\r
6696 case DSERR_OUTOFMEMORY:
\r
6697 return "Out of memory";
\r
6699 case DSERR_BADFORMAT:
\r
6700 return "The sample rate or the channel format is not supported";
\r
6702 case DSERR_UNSUPPORTED:
\r
6703 return "Not supported";
\r
6705 case DSERR_NODRIVER:
\r
6706 return "No driver";
\r
6708 case DSERR_ALREADYINITIALIZED:
\r
6709 return "Already initialized";
\r
6711 case DSERR_NOAGGREGATION:
\r
6712 return "No aggregation";
\r
6714 case DSERR_BUFFERLOST:
\r
6715 return "Buffer lost";
\r
6717 case DSERR_OTHERAPPHASPRIO:
\r
6718 return "Another application already has priority";
\r
6720 case DSERR_UNINITIALIZED:
\r
6721 return "Uninitialized";
\r
6724 return "DirectSound unknown error";
\r
6727 //******************** End of __WINDOWS_DS__ *********************//
\r
6731 #if defined(__LINUX_ALSA__)
\r
6733 #include <alsa/asoundlib.h>
\r
6734 #include <unistd.h>
\r
6736 // A structure to hold various information related to the ALSA API
\r
6737 // implementation.
\r
6738 struct AlsaHandle {
\r
6739 snd_pcm_t *handles[2];
\r
6740 bool synchronized;
\r
6742 pthread_cond_t runnable_cv;
\r
6746 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6749 static void *alsaCallbackHandler( void * ptr );
\r
6751 RtApiAlsa :: RtApiAlsa()
\r
6753 // Nothing to do here.
\r
6756 RtApiAlsa :: ~RtApiAlsa()
\r
6758 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6761 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6763 unsigned nDevices = 0;
\r
6764 int result, subdevice, card;
\r
6766 snd_ctl_t *handle;
\r
6768 // Count cards and devices
\r
6770 snd_card_next( &card );
\r
6771 while ( card >= 0 ) {
\r
6772 sprintf( name, "hw:%d", card );
\r
6773 result = snd_ctl_open( &handle, name, 0 );
\r
6774 if ( result < 0 ) {
\r
6775 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6776 errorText_ = errorStream_.str();
\r
6777 error( RtAudioError::WARNING );
\r
6782 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6783 if ( result < 0 ) {
\r
6784 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6785 errorText_ = errorStream_.str();
\r
6786 error( RtAudioError::WARNING );
\r
6789 if ( subdevice < 0 )
\r
6794 snd_ctl_close( handle );
\r
6795 snd_card_next( &card );
\r
6798 result = snd_ctl_open( &handle, "default", 0 );
\r
6799 if (result == 0) {
\r
6801 snd_ctl_close( handle );
\r
6807 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6809 RtAudio::DeviceInfo info;
\r
6810 info.probed = false;
\r
6812 unsigned nDevices = 0;
\r
6813 int result, subdevice, card;
\r
6815 snd_ctl_t *chandle;
\r
6817 // Count cards and devices
\r
6819 snd_card_next( &card );
\r
6820 while ( card >= 0 ) {
\r
6821 sprintf( name, "hw:%d", card );
\r
6822 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6823 if ( result < 0 ) {
\r
6824 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6825 errorText_ = errorStream_.str();
\r
6826 error( RtAudioError::WARNING );
\r
6831 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6832 if ( result < 0 ) {
\r
6833 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6834 errorText_ = errorStream_.str();
\r
6835 error( RtAudioError::WARNING );
\r
6838 if ( subdevice < 0 ) break;
\r
6839 if ( nDevices == device ) {
\r
6840 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6846 snd_ctl_close( chandle );
\r
6847 snd_card_next( &card );
\r
6850 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6851 if ( result == 0 ) {
\r
6852 if ( nDevices == device ) {
\r
6853 strcpy( name, "default" );
\r
6859 if ( nDevices == 0 ) {
\r
6860 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6861 error( RtAudioError::INVALID_USE );
\r
6865 if ( device >= nDevices ) {
\r
6866 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6867 error( RtAudioError::INVALID_USE );
\r
6873 // If a stream is already open, we cannot probe the stream devices.
\r
6874 // Thus, use the saved results.
\r
6875 if ( stream_.state != STREAM_CLOSED &&
\r
6876 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6877 snd_ctl_close( chandle );
\r
6878 if ( device >= devices_.size() ) {
\r
6879 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6880 error( RtAudioError::WARNING );
\r
6883 return devices_[ device ];
\r
6886 int openMode = SND_PCM_ASYNC;
\r
6887 snd_pcm_stream_t stream;
\r
6888 snd_pcm_info_t *pcminfo;
\r
6889 snd_pcm_info_alloca( &pcminfo );
\r
6890 snd_pcm_t *phandle;
\r
6891 snd_pcm_hw_params_t *params;
\r
6892 snd_pcm_hw_params_alloca( ¶ms );
\r
6894 // First try for playback unless default device (which has subdev -1)
\r
6895 stream = SND_PCM_STREAM_PLAYBACK;
\r
6896 snd_pcm_info_set_stream( pcminfo, stream );
\r
6897 if ( subdevice != -1 ) {
\r
6898 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6899 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6901 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6902 if ( result < 0 ) {
\r
6903 // Device probably doesn't support playback.
\r
6904 goto captureProbe;
\r
6908 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6909 if ( result < 0 ) {
\r
6910 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6911 errorText_ = errorStream_.str();
\r
6912 error( RtAudioError::WARNING );
\r
6913 goto captureProbe;
\r
6916 // The device is open ... fill the parameter structure.
\r
6917 result = snd_pcm_hw_params_any( phandle, params );
\r
6918 if ( result < 0 ) {
\r
6919 snd_pcm_close( phandle );
\r
6920 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6921 errorText_ = errorStream_.str();
\r
6922 error( RtAudioError::WARNING );
\r
6923 goto captureProbe;
\r
6926 // Get output channel information.
\r
6927 unsigned int value;
\r
6928 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6929 if ( result < 0 ) {
\r
6930 snd_pcm_close( phandle );
\r
6931 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6932 errorText_ = errorStream_.str();
\r
6933 error( RtAudioError::WARNING );
\r
6934 goto captureProbe;
\r
6936 info.outputChannels = value;
\r
6937 snd_pcm_close( phandle );
\r
6940 stream = SND_PCM_STREAM_CAPTURE;
\r
6941 snd_pcm_info_set_stream( pcminfo, stream );
\r
6943 // Now try for capture unless default device (with subdev = -1)
\r
6944 if ( subdevice != -1 ) {
\r
6945 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6946 snd_ctl_close( chandle );
\r
6947 if ( result < 0 ) {
\r
6948 // Device probably doesn't support capture.
\r
6949 if ( info.outputChannels == 0 ) return info;
\r
6950 goto probeParameters;
\r
6954 snd_ctl_close( chandle );
\r
6956 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6957 if ( result < 0 ) {
\r
6958 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6959 errorText_ = errorStream_.str();
\r
6960 error( RtAudioError::WARNING );
\r
6961 if ( info.outputChannels == 0 ) return info;
\r
6962 goto probeParameters;
\r
6965 // The device is open ... fill the parameter structure.
\r
6966 result = snd_pcm_hw_params_any( phandle, params );
\r
6967 if ( result < 0 ) {
\r
6968 snd_pcm_close( phandle );
\r
6969 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6970 errorText_ = errorStream_.str();
\r
6971 error( RtAudioError::WARNING );
\r
6972 if ( info.outputChannels == 0 ) return info;
\r
6973 goto probeParameters;
\r
6976 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6977 if ( result < 0 ) {
\r
6978 snd_pcm_close( phandle );
\r
6979 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6980 errorText_ = errorStream_.str();
\r
6981 error( RtAudioError::WARNING );
\r
6982 if ( info.outputChannels == 0 ) return info;
\r
6983 goto probeParameters;
\r
6985 info.inputChannels = value;
\r
6986 snd_pcm_close( phandle );
\r
6988 // If device opens for both playback and capture, we determine the channels.
\r
6989 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6990 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6992 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6993 if ( device == 0 && info.outputChannels > 0 )
\r
6994 info.isDefaultOutput = true;
\r
6995 if ( device == 0 && info.inputChannels > 0 )
\r
6996 info.isDefaultInput = true;
\r
6999 // At this point, we just need to figure out the supported data
\r
7000 // formats and sample rates. We'll proceed by opening the device in
\r
7001 // the direction with the maximum number of channels, or playback if
\r
7002 // they are equal. This might limit our sample rate options, but so
\r
7005 if ( info.outputChannels >= info.inputChannels )
\r
7006 stream = SND_PCM_STREAM_PLAYBACK;
\r
7008 stream = SND_PCM_STREAM_CAPTURE;
\r
7009 snd_pcm_info_set_stream( pcminfo, stream );
\r
7011 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7012 if ( result < 0 ) {
\r
7013 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7014 errorText_ = errorStream_.str();
\r
7015 error( RtAudioError::WARNING );
\r
7019 // The device is open ... fill the parameter structure.
\r
7020 result = snd_pcm_hw_params_any( phandle, params );
\r
7021 if ( result < 0 ) {
\r
7022 snd_pcm_close( phandle );
\r
7023 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7024 errorText_ = errorStream_.str();
\r
7025 error( RtAudioError::WARNING );
\r
7029 // Test our discrete set of sample rate values.
\r
7030 info.sampleRates.clear();
\r
7031 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7032 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7033 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7035 if ( info.sampleRates.size() == 0 ) {
\r
7036 snd_pcm_close( phandle );
\r
7037 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7038 errorText_ = errorStream_.str();
\r
7039 error( RtAudioError::WARNING );
\r
7043 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7044 snd_pcm_format_t format;
\r
7045 info.nativeFormats = 0;
\r
7046 format = SND_PCM_FORMAT_S8;
\r
7047 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7048 info.nativeFormats |= RTAUDIO_SINT8;
\r
7049 format = SND_PCM_FORMAT_S16;
\r
7050 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7051 info.nativeFormats |= RTAUDIO_SINT16;
\r
7052 format = SND_PCM_FORMAT_S24;
\r
7053 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7054 info.nativeFormats |= RTAUDIO_SINT24;
\r
7055 format = SND_PCM_FORMAT_S32;
\r
7056 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7057 info.nativeFormats |= RTAUDIO_SINT32;
\r
7058 format = SND_PCM_FORMAT_FLOAT;
\r
7059 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7060 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7061 format = SND_PCM_FORMAT_FLOAT64;
\r
7062 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7063 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7065 // Check that we have at least one supported format
\r
7066 if ( info.nativeFormats == 0 ) {
\r
7067 snd_pcm_close( phandle );
\r
7068 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7069 errorText_ = errorStream_.str();
\r
7070 error( RtAudioError::WARNING );
\r
7074 // Get the device name
\r
7076 result = snd_card_get_name( card, &cardname );
\r
7077 if ( result >= 0 ) {
\r
7078 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7083 // That's all ... close the device and return
\r
7084 snd_pcm_close( phandle );
\r
7085 info.probed = true;
\r
7089 void RtApiAlsa :: saveDeviceInfo( void )
\r
7093 unsigned int nDevices = getDeviceCount();
\r
7094 devices_.resize( nDevices );
\r
7095 for ( unsigned int i=0; i<nDevices; i++ )
\r
7096 devices_[i] = getDeviceInfo( i );
\r
7099 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7100 unsigned int firstChannel, unsigned int sampleRate,
\r
7101 RtAudioFormat format, unsigned int *bufferSize,
\r
7102 RtAudio::StreamOptions *options )
\r
7105 #if defined(__RTAUDIO_DEBUG__)
\r
7106 snd_output_t *out;
\r
7107 snd_output_stdio_attach(&out, stderr, 0);
\r
7110 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7112 unsigned nDevices = 0;
\r
7113 int result, subdevice, card;
\r
7115 snd_ctl_t *chandle;
\r
7117 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7118 snprintf(name, sizeof(name), "%s", "default");
\r
7120 // Count cards and devices
\r
7122 snd_card_next( &card );
\r
7123 while ( card >= 0 ) {
\r
7124 sprintf( name, "hw:%d", card );
\r
7125 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7126 if ( result < 0 ) {
\r
7127 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7128 errorText_ = errorStream_.str();
\r
7133 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7134 if ( result < 0 ) break;
\r
7135 if ( subdevice < 0 ) break;
\r
7136 if ( nDevices == device ) {
\r
7137 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7138 snd_ctl_close( chandle );
\r
7143 snd_ctl_close( chandle );
\r
7144 snd_card_next( &card );
\r
7147 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7148 if ( result == 0 ) {
\r
7149 if ( nDevices == device ) {
\r
7150 strcpy( name, "default" );
\r
7156 if ( nDevices == 0 ) {
\r
7157 // This should not happen because a check is made before this function is called.
\r
7158 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7162 if ( device >= nDevices ) {
\r
7163 // This should not happen because a check is made before this function is called.
\r
7164 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7171 // The getDeviceInfo() function will not work for a device that is
\r
7172 // already open. Thus, we'll probe the system before opening a
\r
7173 // stream and save the results for use by getDeviceInfo().
\r
7174 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7175 this->saveDeviceInfo();
\r
7177 snd_pcm_stream_t stream;
\r
7178 if ( mode == OUTPUT )
\r
7179 stream = SND_PCM_STREAM_PLAYBACK;
\r
7181 stream = SND_PCM_STREAM_CAPTURE;
\r
7183 snd_pcm_t *phandle;
\r
7184 int openMode = SND_PCM_ASYNC;
\r
7185 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7186 if ( result < 0 ) {
\r
7187 if ( mode == OUTPUT )
\r
7188 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7190 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7191 errorText_ = errorStream_.str();
\r
7195 // Fill the parameter structure.
\r
7196 snd_pcm_hw_params_t *hw_params;
\r
7197 snd_pcm_hw_params_alloca( &hw_params );
\r
7198 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7199 if ( result < 0 ) {
\r
7200 snd_pcm_close( phandle );
\r
7201 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7202 errorText_ = errorStream_.str();
\r
7206 #if defined(__RTAUDIO_DEBUG__)
\r
7207 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7208 snd_pcm_hw_params_dump( hw_params, out );
\r
7211 // Set access ... check user preference.
\r
7212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7213 stream_.userInterleaved = false;
\r
7214 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7215 if ( result < 0 ) {
\r
7216 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7217 stream_.deviceInterleaved[mode] = true;
\r
7220 stream_.deviceInterleaved[mode] = false;
\r
7223 stream_.userInterleaved = true;
\r
7224 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7225 if ( result < 0 ) {
\r
7226 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7227 stream_.deviceInterleaved[mode] = false;
\r
7230 stream_.deviceInterleaved[mode] = true;
\r
7233 if ( result < 0 ) {
\r
7234 snd_pcm_close( phandle );
\r
7235 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7236 errorText_ = errorStream_.str();
\r
7240 // Determine how to set the device format.
\r
7241 stream_.userFormat = format;
\r
7242 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7244 if ( format == RTAUDIO_SINT8 )
\r
7245 deviceFormat = SND_PCM_FORMAT_S8;
\r
7246 else if ( format == RTAUDIO_SINT16 )
\r
7247 deviceFormat = SND_PCM_FORMAT_S16;
\r
7248 else if ( format == RTAUDIO_SINT24 )
\r
7249 deviceFormat = SND_PCM_FORMAT_S24;
\r
7250 else if ( format == RTAUDIO_SINT32 )
\r
7251 deviceFormat = SND_PCM_FORMAT_S32;
\r
7252 else if ( format == RTAUDIO_FLOAT32 )
\r
7253 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7254 else if ( format == RTAUDIO_FLOAT64 )
\r
7255 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7257 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7258 stream_.deviceFormat[mode] = format;
\r
7262 // The user requested format is not natively supported by the device.
\r
7263 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7264 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7265 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7269 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7270 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7271 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7275 deviceFormat = SND_PCM_FORMAT_S32;
\r
7276 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7277 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7281 deviceFormat = SND_PCM_FORMAT_S24;
\r
7282 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7283 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7287 deviceFormat = SND_PCM_FORMAT_S16;
\r
7288 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7289 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7293 deviceFormat = SND_PCM_FORMAT_S8;
\r
7294 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7295 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7299 // If we get here, no supported format was found.
\r
7300 snd_pcm_close( phandle );
\r
7301 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7302 errorText_ = errorStream_.str();
\r
7306 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7307 if ( result < 0 ) {
\r
7308 snd_pcm_close( phandle );
\r
7309 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7310 errorText_ = errorStream_.str();
\r
7314 // Determine whether byte-swaping is necessary.
\r
7315 stream_.doByteSwap[mode] = false;
\r
7316 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7317 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7318 if ( result == 0 )
\r
7319 stream_.doByteSwap[mode] = true;
\r
7320 else if (result < 0) {
\r
7321 snd_pcm_close( phandle );
\r
7322 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7323 errorText_ = errorStream_.str();
\r
7328 // Set the sample rate.
\r
7329 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7330 if ( result < 0 ) {
\r
7331 snd_pcm_close( phandle );
\r
7332 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7333 errorText_ = errorStream_.str();
\r
7337 // Determine the number of channels for this device. We support a possible
\r
7338 // minimum device channel number > than the value requested by the user.
\r
7339 stream_.nUserChannels[mode] = channels;
\r
7340 unsigned int value;
\r
7341 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7342 unsigned int deviceChannels = value;
\r
7343 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7344 snd_pcm_close( phandle );
\r
7345 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7346 errorText_ = errorStream_.str();
\r
7350 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7351 if ( result < 0 ) {
\r
7352 snd_pcm_close( phandle );
\r
7353 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7354 errorText_ = errorStream_.str();
\r
7357 deviceChannels = value;
\r
7358 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7359 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7361 // Set the device channels.
\r
7362 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7363 if ( result < 0 ) {
\r
7364 snd_pcm_close( phandle );
\r
7365 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7366 errorText_ = errorStream_.str();
\r
7370 // Set the buffer (or period) size.
\r
7372 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7373 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7374 if ( result < 0 ) {
\r
7375 snd_pcm_close( phandle );
\r
7376 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7377 errorText_ = errorStream_.str();
\r
7380 *bufferSize = periodSize;
\r
7382 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7383 unsigned int periods = 0;
\r
7384 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7385 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7386 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7387 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7388 if ( result < 0 ) {
\r
7389 snd_pcm_close( phandle );
\r
7390 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7391 errorText_ = errorStream_.str();
\r
7395 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7396 // MUST be the same in both directions!
\r
7397 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7398 snd_pcm_close( phandle );
\r
7399 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7400 errorText_ = errorStream_.str();
\r
7404 stream_.bufferSize = *bufferSize;
\r
7406 // Install the hardware configuration
\r
7407 result = snd_pcm_hw_params( phandle, hw_params );
\r
7408 if ( result < 0 ) {
\r
7409 snd_pcm_close( phandle );
\r
7410 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7411 errorText_ = errorStream_.str();
\r
7415 #if defined(__RTAUDIO_DEBUG__)
\r
7416 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7417 snd_pcm_hw_params_dump( hw_params, out );
\r
7420 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7421 snd_pcm_sw_params_t *sw_params = NULL;
\r
7422 snd_pcm_sw_params_alloca( &sw_params );
\r
7423 snd_pcm_sw_params_current( phandle, sw_params );
\r
7424 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7425 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7426 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7428 // The following two settings were suggested by Theo Veenker
\r
7429 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7430 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7432 // here are two options for a fix
\r
7433 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7434 snd_pcm_uframes_t val;
\r
7435 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7436 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7438 result = snd_pcm_sw_params( phandle, sw_params );
\r
7439 if ( result < 0 ) {
\r
7440 snd_pcm_close( phandle );
\r
7441 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7442 errorText_ = errorStream_.str();
\r
7446 #if defined(__RTAUDIO_DEBUG__)
\r
7447 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7448 snd_pcm_sw_params_dump( sw_params, out );
\r
7451 // Set flags for buffer conversion
\r
7452 stream_.doConvertBuffer[mode] = false;
\r
7453 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7454 stream_.doConvertBuffer[mode] = true;
\r
7455 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7456 stream_.doConvertBuffer[mode] = true;
\r
7457 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7458 stream_.nUserChannels[mode] > 1 )
\r
7459 stream_.doConvertBuffer[mode] = true;
\r
7461 // Allocate the ApiHandle if necessary and then save.
\r
7462 AlsaHandle *apiInfo = 0;
\r
7463 if ( stream_.apiHandle == 0 ) {
\r
7465 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7467 catch ( std::bad_alloc& ) {
\r
7468 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7472 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7473 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7477 stream_.apiHandle = (void *) apiInfo;
\r
7478 apiInfo->handles[0] = 0;
\r
7479 apiInfo->handles[1] = 0;
\r
7482 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7484 apiInfo->handles[mode] = phandle;
\r
7487 // Allocate necessary internal buffers.
\r
7488 unsigned long bufferBytes;
\r
7489 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7490 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7491 if ( stream_.userBuffer[mode] == NULL ) {
\r
7492 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7496 if ( stream_.doConvertBuffer[mode] ) {
\r
7498 bool makeBuffer = true;
\r
7499 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7500 if ( mode == INPUT ) {
\r
7501 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7502 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7503 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7507 if ( makeBuffer ) {
\r
7508 bufferBytes *= *bufferSize;
\r
7509 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7510 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7511 if ( stream_.deviceBuffer == NULL ) {
\r
7512 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7518 stream_.sampleRate = sampleRate;
\r
7519 stream_.nBuffers = periods;
\r
7520 stream_.device[mode] = device;
\r
7521 stream_.state = STREAM_STOPPED;
\r
7523 // Setup the buffer conversion information structure.
\r
7524 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7526 // Setup thread if necessary.
\r
7527 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7528 // We had already set up an output stream.
\r
7529 stream_.mode = DUPLEX;
\r
7530 // Link the streams if possible.
\r
7531 apiInfo->synchronized = false;
\r
7532 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7533 apiInfo->synchronized = true;
\r
7535 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7536 error( RtAudioError::WARNING );
\r
7540 stream_.mode = mode;
\r
7542 // Setup callback thread.
\r
7543 stream_.callbackInfo.object = (void *) this;
\r
7545 // Set the thread attributes for joinable and realtime scheduling
\r
7546 // priority (optional). The higher priority will only take affect
\r
7547 // if the program is run as root or suid. Note, under Linux
\r
7548 // processes with CAP_SYS_NICE privilege, a user can change
\r
7549 // scheduling policy and priority (thus need not be root). See
\r
7550 // POSIX "capabilities".
\r
7551 pthread_attr_t attr;
\r
7552 pthread_attr_init( &attr );
\r
7553 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7555 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7556 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7557 // We previously attempted to increase the audio callback priority
\r
7558 // to SCHED_RR here via the attributes. However, while no errors
\r
7559 // were reported in doing so, it did not work. So, now this is
\r
7560 // done in the alsaCallbackHandler function.
\r
7561 stream_.callbackInfo.doRealtime = true;
\r
7562 int priority = options->priority;
\r
7563 int min = sched_get_priority_min( SCHED_RR );
\r
7564 int max = sched_get_priority_max( SCHED_RR );
\r
7565 if ( priority < min ) priority = min;
\r
7566 else if ( priority > max ) priority = max;
\r
7567 stream_.callbackInfo.priority = priority;
\r
7571 stream_.callbackInfo.isRunning = true;
\r
7572 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7573 pthread_attr_destroy( &attr );
\r
7575 stream_.callbackInfo.isRunning = false;
\r
7576 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7585 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7586 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7587 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7589 stream_.apiHandle = 0;
\r
7592 if ( phandle) snd_pcm_close( phandle );
\r
7594 for ( int i=0; i<2; i++ ) {
\r
7595 if ( stream_.userBuffer[i] ) {
\r
7596 free( stream_.userBuffer[i] );
\r
7597 stream_.userBuffer[i] = 0;
\r
7601 if ( stream_.deviceBuffer ) {
\r
7602 free( stream_.deviceBuffer );
\r
7603 stream_.deviceBuffer = 0;
\r
7606 stream_.state = STREAM_CLOSED;
\r
7610 void RtApiAlsa :: closeStream()
\r
7612 if ( stream_.state == STREAM_CLOSED ) {
\r
7613 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7614 error( RtAudioError::WARNING );
\r
7618 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7619 stream_.callbackInfo.isRunning = false;
\r
7620 MUTEX_LOCK( &stream_.mutex );
\r
7621 if ( stream_.state == STREAM_STOPPED ) {
\r
7622 apiInfo->runnable = true;
\r
7623 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7625 MUTEX_UNLOCK( &stream_.mutex );
\r
7626 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7628 if ( stream_.state == STREAM_RUNNING ) {
\r
7629 stream_.state = STREAM_STOPPED;
\r
7630 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7631 snd_pcm_drop( apiInfo->handles[0] );
\r
7632 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7633 snd_pcm_drop( apiInfo->handles[1] );
\r
7637 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7638 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7639 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7641 stream_.apiHandle = 0;
\r
7644 for ( int i=0; i<2; i++ ) {
\r
7645 if ( stream_.userBuffer[i] ) {
\r
7646 free( stream_.userBuffer[i] );
\r
7647 stream_.userBuffer[i] = 0;
\r
7651 if ( stream_.deviceBuffer ) {
\r
7652 free( stream_.deviceBuffer );
\r
7653 stream_.deviceBuffer = 0;
\r
7656 stream_.mode = UNINITIALIZED;
\r
7657 stream_.state = STREAM_CLOSED;
\r
7660 void RtApiAlsa :: startStream()
\r
7662 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7665 if ( stream_.state == STREAM_RUNNING ) {
\r
7666 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7667 error( RtAudioError::WARNING );
\r
7671 MUTEX_LOCK( &stream_.mutex );
\r
7674 snd_pcm_state_t state;
\r
7675 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7676 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7677 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7678 state = snd_pcm_state( handle[0] );
\r
7679 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7680 result = snd_pcm_prepare( handle[0] );
\r
7681 if ( result < 0 ) {
\r
7682 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7683 errorText_ = errorStream_.str();
\r
7689 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7690 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7691 state = snd_pcm_state( handle[1] );
\r
7692 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7693 result = snd_pcm_prepare( handle[1] );
\r
7694 if ( result < 0 ) {
\r
7695 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7696 errorText_ = errorStream_.str();
\r
7702 stream_.state = STREAM_RUNNING;
\r
7705 apiInfo->runnable = true;
\r
7706 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7707 MUTEX_UNLOCK( &stream_.mutex );
\r
7709 if ( result >= 0 ) return;
\r
7710 error( RtAudioError::SYSTEM_ERROR );
\r
7713 void RtApiAlsa :: stopStream()
\r
7716 if ( stream_.state == STREAM_STOPPED ) {
\r
7717 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7718 error( RtAudioError::WARNING );
\r
7722 stream_.state = STREAM_STOPPED;
\r
7723 MUTEX_LOCK( &stream_.mutex );
\r
7726 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7727 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7728 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7729 if ( apiInfo->synchronized )
\r
7730 result = snd_pcm_drop( handle[0] );
\r
7732 result = snd_pcm_drain( handle[0] );
\r
7733 if ( result < 0 ) {
\r
7734 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7735 errorText_ = errorStream_.str();
\r
7740 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7741 result = snd_pcm_drop( handle[1] );
\r
7742 if ( result < 0 ) {
\r
7743 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7744 errorText_ = errorStream_.str();
\r
7750 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7751 MUTEX_UNLOCK( &stream_.mutex );
\r
7753 if ( result >= 0 ) return;
\r
7754 error( RtAudioError::SYSTEM_ERROR );
\r
7757 void RtApiAlsa :: abortStream()
\r
7760 if ( stream_.state == STREAM_STOPPED ) {
\r
7761 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7762 error( RtAudioError::WARNING );
\r
7766 stream_.state = STREAM_STOPPED;
\r
7767 MUTEX_LOCK( &stream_.mutex );
\r
7770 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7771 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7772 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7773 result = snd_pcm_drop( handle[0] );
\r
7774 if ( result < 0 ) {
\r
7775 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7776 errorText_ = errorStream_.str();
\r
7781 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7782 result = snd_pcm_drop( handle[1] );
\r
7783 if ( result < 0 ) {
\r
7784 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7785 errorText_ = errorStream_.str();
\r
7791 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7792 MUTEX_UNLOCK( &stream_.mutex );
\r
7794 if ( result >= 0 ) return;
\r
7795 error( RtAudioError::SYSTEM_ERROR );
\r
7798 void RtApiAlsa :: callbackEvent()
\r
7800 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7801 if ( stream_.state == STREAM_STOPPED ) {
\r
7802 MUTEX_LOCK( &stream_.mutex );
\r
7803 while ( !apiInfo->runnable )
\r
7804 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7806 if ( stream_.state != STREAM_RUNNING ) {
\r
7807 MUTEX_UNLOCK( &stream_.mutex );
\r
7810 MUTEX_UNLOCK( &stream_.mutex );
\r
7813 if ( stream_.state == STREAM_CLOSED ) {
\r
7814 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7815 error( RtAudioError::WARNING );
\r
7819 int doStopStream = 0;
\r
7820 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7821 double streamTime = getStreamTime();
\r
7822 RtAudioStreamStatus status = 0;
\r
7823 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7824 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7825 apiInfo->xrun[0] = false;
\r
7827 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7828 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7829 apiInfo->xrun[1] = false;
\r
7831 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7832 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7834 if ( doStopStream == 2 ) {
\r
7839 MUTEX_LOCK( &stream_.mutex );
\r
7841 // The state might change while waiting on a mutex.
\r
7842 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7847 snd_pcm_t **handle;
\r
7848 snd_pcm_sframes_t frames;
\r
7849 RtAudioFormat format;
\r
7850 handle = (snd_pcm_t **) apiInfo->handles;
\r
7852 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7854 // Setup parameters.
\r
7855 if ( stream_.doConvertBuffer[1] ) {
\r
7856 buffer = stream_.deviceBuffer;
\r
7857 channels = stream_.nDeviceChannels[1];
\r
7858 format = stream_.deviceFormat[1];
\r
7861 buffer = stream_.userBuffer[1];
\r
7862 channels = stream_.nUserChannels[1];
\r
7863 format = stream_.userFormat;
\r
7866 // Read samples from device in interleaved/non-interleaved format.
\r
7867 if ( stream_.deviceInterleaved[1] )
\r
7868 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7870 void *bufs[channels];
\r
7871 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7872 for ( int i=0; i<channels; i++ )
\r
7873 bufs[i] = (void *) (buffer + (i * offset));
\r
7874 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7877 if ( result < (int) stream_.bufferSize ) {
\r
7878 // Either an error or overrun occured.
\r
7879 if ( result == -EPIPE ) {
\r
7880 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7881 if ( state == SND_PCM_STATE_XRUN ) {
\r
7882 apiInfo->xrun[1] = true;
\r
7883 result = snd_pcm_prepare( handle[1] );
\r
7884 if ( result < 0 ) {
\r
7885 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7886 errorText_ = errorStream_.str();
\r
7890 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7891 errorText_ = errorStream_.str();
\r
7895 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7896 errorText_ = errorStream_.str();
\r
7898 error( RtAudioError::WARNING );
\r
7902 // Do byte swapping if necessary.
\r
7903 if ( stream_.doByteSwap[1] )
\r
7904 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7906 // Do buffer conversion if necessary.
\r
7907 if ( stream_.doConvertBuffer[1] )
\r
7908 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7910 // Check stream latency
\r
7911 result = snd_pcm_delay( handle[1], &frames );
\r
7912 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7917 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7919 // Setup parameters and do buffer conversion if necessary.
\r
7920 if ( stream_.doConvertBuffer[0] ) {
\r
7921 buffer = stream_.deviceBuffer;
\r
7922 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7923 channels = stream_.nDeviceChannels[0];
\r
7924 format = stream_.deviceFormat[0];
\r
7927 buffer = stream_.userBuffer[0];
\r
7928 channels = stream_.nUserChannels[0];
\r
7929 format = stream_.userFormat;
\r
7932 // Do byte swapping if necessary.
\r
7933 if ( stream_.doByteSwap[0] )
\r
7934 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7936 // Write samples to device in interleaved/non-interleaved format.
\r
7937 if ( stream_.deviceInterleaved[0] )
\r
7938 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7940 void *bufs[channels];
\r
7941 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7942 for ( int i=0; i<channels; i++ )
\r
7943 bufs[i] = (void *) (buffer + (i * offset));
\r
7944 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7947 if ( result < (int) stream_.bufferSize ) {
\r
7948 // Either an error or underrun occured.
\r
7949 if ( result == -EPIPE ) {
\r
7950 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7951 if ( state == SND_PCM_STATE_XRUN ) {
\r
7952 apiInfo->xrun[0] = true;
\r
7953 result = snd_pcm_prepare( handle[0] );
\r
7954 if ( result < 0 ) {
\r
7955 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7956 errorText_ = errorStream_.str();
\r
7960 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7961 errorText_ = errorStream_.str();
\r
7965 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7966 errorText_ = errorStream_.str();
\r
7968 error( RtAudioError::WARNING );
\r
7972 // Check stream latency
\r
7973 result = snd_pcm_delay( handle[0], &frames );
\r
7974 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7978 MUTEX_UNLOCK( &stream_.mutex );
\r
7980 RtApi::tickStreamTime();
\r
7981 if ( doStopStream == 1 ) this->stopStream();
\r
7984 static void *alsaCallbackHandler( void *ptr )
\r
7986 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7987 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7988 bool *isRunning = &info->isRunning;
\r
7990 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7991 if ( &info->doRealtime ) {
\r
7992 pthread_t tID = pthread_self(); // ID of this thread
\r
7993 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7994 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7998 while ( *isRunning == true ) {
\r
7999 pthread_testcancel();
\r
8000 object->callbackEvent();
\r
8003 pthread_exit( NULL );
\r
8006 //******************** End of __LINUX_ALSA__ *********************//
\r
8009 #if defined(__LINUX_PULSE__)
\r
8011 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8012 // and Tristan Matthews.
\r
8014 #include <pulse/error.h>
\r
8015 #include <pulse/simple.h>
\r
8018 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8019 44100, 48000, 96000, 0};
\r
8021 struct rtaudio_pa_format_mapping_t {
\r
8022 RtAudioFormat rtaudio_format;
\r
8023 pa_sample_format_t pa_format;
\r
8026 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8027 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8028 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8029 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8030 {0, PA_SAMPLE_INVALID}};
\r
8032 struct PulseAudioHandle {
\r
8033 pa_simple *s_play;
\r
8036 pthread_cond_t runnable_cv;
\r
8038 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8041 RtApiPulse::~RtApiPulse()
\r
8043 if ( stream_.state != STREAM_CLOSED )
\r
8047 unsigned int RtApiPulse::getDeviceCount( void )
\r
8052 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8054 RtAudio::DeviceInfo info;
\r
8055 info.probed = true;
\r
8056 info.name = "PulseAudio";
\r
8057 info.outputChannels = 2;
\r
8058 info.inputChannels = 2;
\r
8059 info.duplexChannels = 2;
\r
8060 info.isDefaultOutput = true;
\r
8061 info.isDefaultInput = true;
\r
8063 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8064 info.sampleRates.push_back( *sr );
\r
8066 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8071 static void *pulseaudio_callback( void * user )
\r
8073 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8074 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8075 volatile bool *isRunning = &cbi->isRunning;
\r
8077 while ( *isRunning ) {
\r
8078 pthread_testcancel();
\r
8079 context->callbackEvent();
\r
8082 pthread_exit( NULL );
\r
8085 void RtApiPulse::closeStream( void )
\r
8087 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8089 stream_.callbackInfo.isRunning = false;
\r
8091 MUTEX_LOCK( &stream_.mutex );
\r
8092 if ( stream_.state == STREAM_STOPPED ) {
\r
8093 pah->runnable = true;
\r
8094 pthread_cond_signal( &pah->runnable_cv );
\r
8096 MUTEX_UNLOCK( &stream_.mutex );
\r
8098 pthread_join( pah->thread, 0 );
\r
8099 if ( pah->s_play ) {
\r
8100 pa_simple_flush( pah->s_play, NULL );
\r
8101 pa_simple_free( pah->s_play );
\r
8104 pa_simple_free( pah->s_rec );
\r
8106 pthread_cond_destroy( &pah->runnable_cv );
\r
8108 stream_.apiHandle = 0;
\r
8111 if ( stream_.userBuffer[0] ) {
\r
8112 free( stream_.userBuffer[0] );
\r
8113 stream_.userBuffer[0] = 0;
\r
8115 if ( stream_.userBuffer[1] ) {
\r
8116 free( stream_.userBuffer[1] );
\r
8117 stream_.userBuffer[1] = 0;
\r
8120 stream_.state = STREAM_CLOSED;
\r
8121 stream_.mode = UNINITIALIZED;
\r
8124 void RtApiPulse::callbackEvent( void )
\r
8126 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8128 if ( stream_.state == STREAM_STOPPED ) {
\r
8129 MUTEX_LOCK( &stream_.mutex );
\r
8130 while ( !pah->runnable )
\r
8131 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8133 if ( stream_.state != STREAM_RUNNING ) {
\r
8134 MUTEX_UNLOCK( &stream_.mutex );
\r
8137 MUTEX_UNLOCK( &stream_.mutex );
\r
8140 if ( stream_.state == STREAM_CLOSED ) {
\r
8141 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8142 "this shouldn't happen!";
\r
8143 error( RtAudioError::WARNING );
\r
8147 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8148 double streamTime = getStreamTime();
\r
8149 RtAudioStreamStatus status = 0;
\r
8150 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8151 stream_.bufferSize, streamTime, status,
\r
8152 stream_.callbackInfo.userData );
\r
8154 if ( doStopStream == 2 ) {
\r
8159 MUTEX_LOCK( &stream_.mutex );
\r
8160 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8161 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8163 if ( stream_.state != STREAM_RUNNING )
\r
8168 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8169 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8170 convertBuffer( stream_.deviceBuffer,
\r
8171 stream_.userBuffer[OUTPUT],
\r
8172 stream_.convertInfo[OUTPUT] );
\r
8173 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8174 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8176 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8177 formatBytes( stream_.userFormat );
\r
8179 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8180 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8181 pa_strerror( pa_error ) << ".";
\r
8182 errorText_ = errorStream_.str();
\r
8183 error( RtAudioError::WARNING );
\r
8187 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8188 if ( stream_.doConvertBuffer[INPUT] )
\r
8189 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8190 formatBytes( stream_.deviceFormat[INPUT] );
\r
8192 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8193 formatBytes( stream_.userFormat );
\r
8195 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8196 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8197 pa_strerror( pa_error ) << ".";
\r
8198 errorText_ = errorStream_.str();
\r
8199 error( RtAudioError::WARNING );
\r
8201 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8202 convertBuffer( stream_.userBuffer[INPUT],
\r
8203 stream_.deviceBuffer,
\r
8204 stream_.convertInfo[INPUT] );
\r
8209 MUTEX_UNLOCK( &stream_.mutex );
\r
8210 RtApi::tickStreamTime();
\r
8212 if ( doStopStream == 1 )
\r
8216 void RtApiPulse::startStream( void )
\r
8218 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8220 if ( stream_.state == STREAM_CLOSED ) {
\r
8221 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8222 error( RtAudioError::INVALID_USE );
\r
8225 if ( stream_.state == STREAM_RUNNING ) {
\r
8226 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8227 error( RtAudioError::WARNING );
\r
8231 MUTEX_LOCK( &stream_.mutex );
\r
8233 stream_.state = STREAM_RUNNING;
\r
8235 pah->runnable = true;
\r
8236 pthread_cond_signal( &pah->runnable_cv );
\r
8237 MUTEX_UNLOCK( &stream_.mutex );
\r
8240 void RtApiPulse::stopStream( void )
\r
8242 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8244 if ( stream_.state == STREAM_CLOSED ) {
\r
8245 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8246 error( RtAudioError::INVALID_USE );
\r
8249 if ( stream_.state == STREAM_STOPPED ) {
\r
8250 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8251 error( RtAudioError::WARNING );
\r
8255 stream_.state = STREAM_STOPPED;
\r
8256 MUTEX_LOCK( &stream_.mutex );
\r
8258 if ( pah && pah->s_play ) {
\r
8260 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8261 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8262 pa_strerror( pa_error ) << ".";
\r
8263 errorText_ = errorStream_.str();
\r
8264 MUTEX_UNLOCK( &stream_.mutex );
\r
8265 error( RtAudioError::SYSTEM_ERROR );
\r
8270 stream_.state = STREAM_STOPPED;
\r
8271 MUTEX_UNLOCK( &stream_.mutex );
\r
8274 void RtApiPulse::abortStream( void )
\r
8276 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8278 if ( stream_.state == STREAM_CLOSED ) {
\r
8279 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8280 error( RtAudioError::INVALID_USE );
\r
8283 if ( stream_.state == STREAM_STOPPED ) {
\r
8284 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8285 error( RtAudioError::WARNING );
\r
8289 stream_.state = STREAM_STOPPED;
\r
8290 MUTEX_LOCK( &stream_.mutex );
\r
8292 if ( pah && pah->s_play ) {
\r
8294 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8295 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8296 pa_strerror( pa_error ) << ".";
\r
8297 errorText_ = errorStream_.str();
\r
8298 MUTEX_UNLOCK( &stream_.mutex );
\r
8299 error( RtAudioError::SYSTEM_ERROR );
\r
8304 stream_.state = STREAM_STOPPED;
\r
8305 MUTEX_UNLOCK( &stream_.mutex );
\r
8308 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8309 unsigned int channels, unsigned int firstChannel,
\r
8310 unsigned int sampleRate, RtAudioFormat format,
\r
8311 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8313 PulseAudioHandle *pah = 0;
\r
8314 unsigned long bufferBytes = 0;
\r
8315 pa_sample_spec ss;
\r
8317 if ( device != 0 ) return false;
\r
8318 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8319 if ( channels != 1 && channels != 2 ) {
\r
8320 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8323 ss.channels = channels;
\r
8325 if ( firstChannel != 0 ) return false;
\r
8327 bool sr_found = false;
\r
8328 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8329 if ( sampleRate == *sr ) {
\r
8331 stream_.sampleRate = sampleRate;
\r
8332 ss.rate = sampleRate;
\r
8336 if ( !sr_found ) {
\r
8337 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8341 bool sf_found = 0;
\r
8342 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8343 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8344 if ( format == sf->rtaudio_format ) {
\r
8346 stream_.userFormat = sf->rtaudio_format;
\r
8347 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8348 ss.format = sf->pa_format;
\r
8352 if ( !sf_found ) { // Use internal data format conversion.
\r
8353 stream_.userFormat = format;
\r
8354 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8355 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8358 // Set other stream parameters.
\r
8359 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8360 else stream_.userInterleaved = true;
\r
8361 stream_.deviceInterleaved[mode] = true;
\r
8362 stream_.nBuffers = 1;
\r
8363 stream_.doByteSwap[mode] = false;
\r
8364 stream_.nUserChannels[mode] = channels;
\r
8365 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8366 stream_.channelOffset[mode] = 0;
\r
8367 std::string streamName = "RtAudio";
\r
8369 // Set flags for buffer conversion.
\r
8370 stream_.doConvertBuffer[mode] = false;
\r
8371 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8372 stream_.doConvertBuffer[mode] = true;
\r
8373 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8374 stream_.doConvertBuffer[mode] = true;
\r
8376 // Allocate necessary internal buffers.
\r
8377 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8378 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8379 if ( stream_.userBuffer[mode] == NULL ) {
\r
8380 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8383 stream_.bufferSize = *bufferSize;
\r
8385 if ( stream_.doConvertBuffer[mode] ) {
\r
8387 bool makeBuffer = true;
\r
8388 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8389 if ( mode == INPUT ) {
\r
8390 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8391 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8392 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8396 if ( makeBuffer ) {
\r
8397 bufferBytes *= *bufferSize;
\r
8398 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8399 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8400 if ( stream_.deviceBuffer == NULL ) {
\r
8401 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8407 stream_.device[mode] = device;
\r
8409 // Setup the buffer conversion information structure.
\r
8410 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8412 if ( !stream_.apiHandle ) {
\r
8413 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8415 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8419 stream_.apiHandle = pah;
\r
8420 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8421 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8425 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8428 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8431 pa_buffer_attr buffer_attr;
\r
8432 buffer_attr.fragsize = bufferBytes;
\r
8433 buffer_attr.maxlength = -1;
\r
8435 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8436 if ( !pah->s_rec ) {
\r
8437 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8442 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8443 if ( !pah->s_play ) {
\r
8444 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8452 if ( stream_.mode == UNINITIALIZED )
\r
8453 stream_.mode = mode;
\r
8454 else if ( stream_.mode == mode )
\r
8457 stream_.mode = DUPLEX;
\r
8459 if ( !stream_.callbackInfo.isRunning ) {
\r
8460 stream_.callbackInfo.object = this;
\r
8461 stream_.callbackInfo.isRunning = true;
\r
8462 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8463 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8468 stream_.state = STREAM_STOPPED;
\r
8472 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8473 pthread_cond_destroy( &pah->runnable_cv );
\r
8475 stream_.apiHandle = 0;
\r
8478 for ( int i=0; i<2; i++ ) {
\r
8479 if ( stream_.userBuffer[i] ) {
\r
8480 free( stream_.userBuffer[i] );
\r
8481 stream_.userBuffer[i] = 0;
\r
8485 if ( stream_.deviceBuffer ) {
\r
8486 free( stream_.deviceBuffer );
\r
8487 stream_.deviceBuffer = 0;
\r
8493 //******************** End of __LINUX_PULSE__ *********************//
\r
8496 #if defined(__LINUX_OSS__)
\r
8498 #include <unistd.h>
\r
8499 #include <sys/ioctl.h>
\r
8500 #include <unistd.h>
\r
8501 #include <fcntl.h>
\r
8502 #include <sys/soundcard.h>
\r
8503 #include <errno.h>
\r
8506 static void *ossCallbackHandler(void * ptr);
\r
8508 // A structure to hold various information related to the OSS API
\r
8509 // implementation.
\r
8510 struct OssHandle {
\r
8511 int id[2]; // device ids
\r
8514 pthread_cond_t runnable;
\r
8517 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8520 RtApiOss :: RtApiOss()
\r
8522 // Nothing to do here.
\r
8525 RtApiOss :: ~RtApiOss()
\r
8527 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8530 unsigned int RtApiOss :: getDeviceCount( void )
\r
8532 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8533 if ( mixerfd == -1 ) {
\r
8534 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8535 error( RtAudioError::WARNING );
\r
8539 oss_sysinfo sysinfo;
\r
8540 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8542 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8543 error( RtAudioError::WARNING );
\r
8548 return sysinfo.numaudios;
\r
8551 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8553 RtAudio::DeviceInfo info;
\r
8554 info.probed = false;
\r
8556 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8557 if ( mixerfd == -1 ) {
\r
8558 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8559 error( RtAudioError::WARNING );
\r
8563 oss_sysinfo sysinfo;
\r
8564 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8565 if ( result == -1 ) {
\r
8567 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8568 error( RtAudioError::WARNING );
\r
8572 unsigned nDevices = sysinfo.numaudios;
\r
8573 if ( nDevices == 0 ) {
\r
8575 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8576 error( RtAudioError::INVALID_USE );
\r
8580 if ( device >= nDevices ) {
\r
8582 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8583 error( RtAudioError::INVALID_USE );
\r
8587 oss_audioinfo ainfo;
\r
8588 ainfo.dev = device;
\r
8589 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8591 if ( result == -1 ) {
\r
8592 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8593 errorText_ = errorStream_.str();
\r
8594 error( RtAudioError::WARNING );
\r
8599 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8600 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8601 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8602 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8603 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8606 // Probe data formats ... do for input
\r
8607 unsigned long mask = ainfo.iformats;
\r
8608 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8609 info.nativeFormats |= RTAUDIO_SINT16;
\r
8610 if ( mask & AFMT_S8 )
\r
8611 info.nativeFormats |= RTAUDIO_SINT8;
\r
8612 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8613 info.nativeFormats |= RTAUDIO_SINT32;
\r
8614 if ( mask & AFMT_FLOAT )
\r
8615 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8616 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8617 info.nativeFormats |= RTAUDIO_SINT24;
\r
8619 // Check that we have at least one supported format
\r
8620 if ( info.nativeFormats == 0 ) {
\r
8621 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8622 errorText_ = errorStream_.str();
\r
8623 error( RtAudioError::WARNING );
\r
8627 // Probe the supported sample rates.
\r
8628 info.sampleRates.clear();
\r
8629 if ( ainfo.nrates ) {
\r
8630 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8631 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8632 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8633 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8640 // Check min and max rate values;
\r
8641 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8642 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8643 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8647 if ( info.sampleRates.size() == 0 ) {
\r
8648 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8649 errorText_ = errorStream_.str();
\r
8650 error( RtAudioError::WARNING );
\r
8653 info.probed = true;
\r
8654 info.name = ainfo.name;
\r
8661 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8662 unsigned int firstChannel, unsigned int sampleRate,
\r
8663 RtAudioFormat format, unsigned int *bufferSize,
\r
8664 RtAudio::StreamOptions *options )
\r
8666 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8667 if ( mixerfd == -1 ) {
\r
8668 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8672 oss_sysinfo sysinfo;
\r
8673 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8674 if ( result == -1 ) {
\r
8676 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8680 unsigned nDevices = sysinfo.numaudios;
\r
8681 if ( nDevices == 0 ) {
\r
8682 // This should not happen because a check is made before this function is called.
\r
8684 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8688 if ( device >= nDevices ) {
\r
8689 // This should not happen because a check is made before this function is called.
\r
8691 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8695 oss_audioinfo ainfo;
\r
8696 ainfo.dev = device;
\r
8697 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8699 if ( result == -1 ) {
\r
8700 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8701 errorText_ = errorStream_.str();
\r
8705 // Check if device supports input or output
\r
8706 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8707 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8708 if ( mode == OUTPUT )
\r
8709 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8711 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8712 errorText_ = errorStream_.str();
\r
8717 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8718 if ( mode == OUTPUT )
\r
8719 flags |= O_WRONLY;
\r
8720 else { // mode == INPUT
\r
8721 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8722 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8723 close( handle->id[0] );
\r
8724 handle->id[0] = 0;
\r
8725 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8726 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8727 errorText_ = errorStream_.str();
\r
8730 // Check that the number previously set channels is the same.
\r
8731 if ( stream_.nUserChannels[0] != channels ) {
\r
8732 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8733 errorText_ = errorStream_.str();
\r
8739 flags |= O_RDONLY;
\r
8742 // Set exclusive access if specified.
\r
8743 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8745 // Try to open the device.
\r
8747 fd = open( ainfo.devnode, flags, 0 );
\r
8749 if ( errno == EBUSY )
\r
8750 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8752 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8753 errorText_ = errorStream_.str();
\r
8757 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8759 if ( flags | O_RDWR ) {
\r
8760 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8761 if ( result == -1) {
\r
8762 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8763 errorText_ = errorStream_.str();
\r
8769 // Check the device channel support.
\r
8770 stream_.nUserChannels[mode] = channels;
\r
8771 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8773 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8774 errorText_ = errorStream_.str();
\r
8778 // Set the number of channels.
\r
8779 int deviceChannels = channels + firstChannel;
\r
8780 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8781 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8783 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8784 errorText_ = errorStream_.str();
\r
8787 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8789 // Get the data format mask
\r
8791 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8792 if ( result == -1 ) {
\r
8794 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8795 errorText_ = errorStream_.str();
\r
8799 // Determine how to set the device format.
\r
8800 stream_.userFormat = format;
\r
8801 int deviceFormat = -1;
\r
8802 stream_.doByteSwap[mode] = false;
\r
8803 if ( format == RTAUDIO_SINT8 ) {
\r
8804 if ( mask & AFMT_S8 ) {
\r
8805 deviceFormat = AFMT_S8;
\r
8806 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8809 else if ( format == RTAUDIO_SINT16 ) {
\r
8810 if ( mask & AFMT_S16_NE ) {
\r
8811 deviceFormat = AFMT_S16_NE;
\r
8812 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8814 else if ( mask & AFMT_S16_OE ) {
\r
8815 deviceFormat = AFMT_S16_OE;
\r
8816 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8817 stream_.doByteSwap[mode] = true;
\r
8820 else if ( format == RTAUDIO_SINT24 ) {
\r
8821 if ( mask & AFMT_S24_NE ) {
\r
8822 deviceFormat = AFMT_S24_NE;
\r
8823 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8825 else if ( mask & AFMT_S24_OE ) {
\r
8826 deviceFormat = AFMT_S24_OE;
\r
8827 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8828 stream_.doByteSwap[mode] = true;
\r
8831 else if ( format == RTAUDIO_SINT32 ) {
\r
8832 if ( mask & AFMT_S32_NE ) {
\r
8833 deviceFormat = AFMT_S32_NE;
\r
8834 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8836 else if ( mask & AFMT_S32_OE ) {
\r
8837 deviceFormat = AFMT_S32_OE;
\r
8838 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8839 stream_.doByteSwap[mode] = true;
\r
8843 if ( deviceFormat == -1 ) {
\r
8844 // The user requested format is not natively supported by the device.
\r
8845 if ( mask & AFMT_S16_NE ) {
\r
8846 deviceFormat = AFMT_S16_NE;
\r
8847 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8849 else if ( mask & AFMT_S32_NE ) {
\r
8850 deviceFormat = AFMT_S32_NE;
\r
8851 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8853 else if ( mask & AFMT_S24_NE ) {
\r
8854 deviceFormat = AFMT_S24_NE;
\r
8855 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8857 else if ( mask & AFMT_S16_OE ) {
\r
8858 deviceFormat = AFMT_S16_OE;
\r
8859 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8860 stream_.doByteSwap[mode] = true;
\r
8862 else if ( mask & AFMT_S32_OE ) {
\r
8863 deviceFormat = AFMT_S32_OE;
\r
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8865 stream_.doByteSwap[mode] = true;
\r
8867 else if ( mask & AFMT_S24_OE ) {
\r
8868 deviceFormat = AFMT_S24_OE;
\r
8869 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8870 stream_.doByteSwap[mode] = true;
\r
8872 else if ( mask & AFMT_S8) {
\r
8873 deviceFormat = AFMT_S8;
\r
8874 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8878 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8879 // This really shouldn't happen ...
\r
8881 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8882 errorText_ = errorStream_.str();
\r
8886 // Set the data format.
\r
8887 int temp = deviceFormat;
\r
8888 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8889 if ( result == -1 || deviceFormat != temp ) {
\r
8891 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8892 errorText_ = errorStream_.str();
\r
8896 // Attempt to set the buffer size. According to OSS, the minimum
\r
8897 // number of buffers is two. The supposed minimum buffer size is 16
\r
8898 // bytes, so that will be our lower bound. The argument to this
\r
8899 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8900 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8901 // We'll check the actual value used near the end of the setup
\r
8903 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8904 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8906 if ( options ) buffers = options->numberOfBuffers;
\r
8907 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8908 if ( buffers < 2 ) buffers = 3;
\r
8909 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8910 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8911 if ( result == -1 ) {
\r
8913 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8914 errorText_ = errorStream_.str();
\r
8917 stream_.nBuffers = buffers;
\r
8919 // Save buffer size (in sample frames).
\r
8920 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8921 stream_.bufferSize = *bufferSize;
\r
8923 // Set the sample rate.
\r
8924 int srate = sampleRate;
\r
8925 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8926 if ( result == -1 ) {
\r
8928 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8929 errorText_ = errorStream_.str();
\r
8933 // Verify the sample rate setup worked.
\r
8934 if ( abs( srate - sampleRate ) > 100 ) {
\r
8936 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8937 errorText_ = errorStream_.str();
\r
8940 stream_.sampleRate = sampleRate;
\r
8942 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8943 // We're doing duplex setup here.
\r
8944 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8945 stream_.nDeviceChannels[0] = deviceChannels;
\r
8948 // Set interleaving parameters.
\r
8949 stream_.userInterleaved = true;
\r
8950 stream_.deviceInterleaved[mode] = true;
\r
8951 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8952 stream_.userInterleaved = false;
\r
8954 // Set flags for buffer conversion
\r
8955 stream_.doConvertBuffer[mode] = false;
\r
8956 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8957 stream_.doConvertBuffer[mode] = true;
\r
8958 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8959 stream_.doConvertBuffer[mode] = true;
\r
8960 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8961 stream_.nUserChannels[mode] > 1 )
\r
8962 stream_.doConvertBuffer[mode] = true;
\r
8964 // Allocate the stream handles if necessary and then save.
\r
8965 if ( stream_.apiHandle == 0 ) {
\r
8967 handle = new OssHandle;
\r
8969 catch ( std::bad_alloc& ) {
\r
8970 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8974 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8975 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8979 stream_.apiHandle = (void *) handle;
\r
8982 handle = (OssHandle *) stream_.apiHandle;
\r
8984 handle->id[mode] = fd;
\r
8986 // Allocate necessary internal buffers.
\r
8987 unsigned long bufferBytes;
\r
8988 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8989 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8990 if ( stream_.userBuffer[mode] == NULL ) {
\r
8991 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8995 if ( stream_.doConvertBuffer[mode] ) {
\r
8997 bool makeBuffer = true;
\r
8998 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8999 if ( mode == INPUT ) {
\r
9000 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9001 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9002 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9006 if ( makeBuffer ) {
\r
9007 bufferBytes *= *bufferSize;
\r
9008 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9009 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9010 if ( stream_.deviceBuffer == NULL ) {
\r
9011 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9017 stream_.device[mode] = device;
\r
9018 stream_.state = STREAM_STOPPED;
\r
9020 // Setup the buffer conversion information structure.
\r
9021 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9023 // Setup thread if necessary.
\r
9024 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9025 // We had already set up an output stream.
\r
9026 stream_.mode = DUPLEX;
\r
9027 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9030 stream_.mode = mode;
\r
9032 // Setup callback thread.
\r
9033 stream_.callbackInfo.object = (void *) this;
\r
9035 // Set the thread attributes for joinable and realtime scheduling
\r
9036 // priority. The higher priority will only take affect if the
\r
9037 // program is run as root or suid.
\r
9038 pthread_attr_t attr;
\r
9039 pthread_attr_init( &attr );
\r
9040 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9041 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9042 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9043 struct sched_param param;
\r
9044 int priority = options->priority;
\r
9045 int min = sched_get_priority_min( SCHED_RR );
\r
9046 int max = sched_get_priority_max( SCHED_RR );
\r
9047 if ( priority < min ) priority = min;
\r
9048 else if ( priority > max ) priority = max;
\r
9049 param.sched_priority = priority;
\r
9050 pthread_attr_setschedparam( &attr, ¶m );
\r
9051 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9054 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9056 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9059 stream_.callbackInfo.isRunning = true;
\r
9060 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9061 pthread_attr_destroy( &attr );
\r
9063 stream_.callbackInfo.isRunning = false;
\r
9064 errorText_ = "RtApiOss::error creating callback thread!";
\r
9073 pthread_cond_destroy( &handle->runnable );
\r
9074 if ( handle->id[0] ) close( handle->id[0] );
\r
9075 if ( handle->id[1] ) close( handle->id[1] );
\r
9077 stream_.apiHandle = 0;
\r
9080 for ( int i=0; i<2; i++ ) {
\r
9081 if ( stream_.userBuffer[i] ) {
\r
9082 free( stream_.userBuffer[i] );
\r
9083 stream_.userBuffer[i] = 0;
\r
9087 if ( stream_.deviceBuffer ) {
\r
9088 free( stream_.deviceBuffer );
\r
9089 stream_.deviceBuffer = 0;
\r
9095 void RtApiOss :: closeStream()
\r
9097 if ( stream_.state == STREAM_CLOSED ) {
\r
9098 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9099 error( RtAudioError::WARNING );
\r
9103 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9104 stream_.callbackInfo.isRunning = false;
\r
9105 MUTEX_LOCK( &stream_.mutex );
\r
9106 if ( stream_.state == STREAM_STOPPED )
\r
9107 pthread_cond_signal( &handle->runnable );
\r
9108 MUTEX_UNLOCK( &stream_.mutex );
\r
9109 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9111 if ( stream_.state == STREAM_RUNNING ) {
\r
9112 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9113 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9115 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9116 stream_.state = STREAM_STOPPED;
\r
9120 pthread_cond_destroy( &handle->runnable );
\r
9121 if ( handle->id[0] ) close( handle->id[0] );
\r
9122 if ( handle->id[1] ) close( handle->id[1] );
\r
9124 stream_.apiHandle = 0;
\r
9127 for ( int i=0; i<2; i++ ) {
\r
9128 if ( stream_.userBuffer[i] ) {
\r
9129 free( stream_.userBuffer[i] );
\r
9130 stream_.userBuffer[i] = 0;
\r
9134 if ( stream_.deviceBuffer ) {
\r
9135 free( stream_.deviceBuffer );
\r
9136 stream_.deviceBuffer = 0;
\r
9139 stream_.mode = UNINITIALIZED;
\r
9140 stream_.state = STREAM_CLOSED;
\r
9143 void RtApiOss :: startStream()
\r
9146 if ( stream_.state == STREAM_RUNNING ) {
\r
9147 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9148 error( RtAudioError::WARNING );
\r
9152 MUTEX_LOCK( &stream_.mutex );
\r
9154 stream_.state = STREAM_RUNNING;
\r
9156 // No need to do anything else here ... OSS automatically starts
\r
9157 // when fed samples.
\r
9159 MUTEX_UNLOCK( &stream_.mutex );
\r
9161 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9162 pthread_cond_signal( &handle->runnable );
\r
9165 void RtApiOss :: stopStream()
\r
9168 if ( stream_.state == STREAM_STOPPED ) {
\r
9169 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9170 error( RtAudioError::WARNING );
\r
9174 MUTEX_LOCK( &stream_.mutex );
\r
9176 // The state might change while waiting on a mutex.
\r
9177 if ( stream_.state == STREAM_STOPPED ) {
\r
9178 MUTEX_UNLOCK( &stream_.mutex );
\r
9183 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9184 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9186 // Flush the output with zeros a few times.
\r
9189 RtAudioFormat format;
\r
9191 if ( stream_.doConvertBuffer[0] ) {
\r
9192 buffer = stream_.deviceBuffer;
\r
9193 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9194 format = stream_.deviceFormat[0];
\r
9197 buffer = stream_.userBuffer[0];
\r
9198 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9199 format = stream_.userFormat;
\r
9202 memset( buffer, 0, samples * formatBytes(format) );
\r
9203 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9204 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9205 if ( result == -1 ) {
\r
9206 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9207 error( RtAudioError::WARNING );
\r
9211 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9212 if ( result == -1 ) {
\r
9213 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9214 errorText_ = errorStream_.str();
\r
9217 handle->triggered = false;
\r
9220 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9221 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9222 if ( result == -1 ) {
\r
9223 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9224 errorText_ = errorStream_.str();
\r
9230 stream_.state = STREAM_STOPPED;
\r
9231 MUTEX_UNLOCK( &stream_.mutex );
\r
9233 if ( result != -1 ) return;
\r
9234 error( RtAudioError::SYSTEM_ERROR );
\r
9237 void RtApiOss :: abortStream()
\r
9240 if ( stream_.state == STREAM_STOPPED ) {
\r
9241 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9242 error( RtAudioError::WARNING );
\r
9246 MUTEX_LOCK( &stream_.mutex );
\r
9248 // The state might change while waiting on a mutex.
\r
9249 if ( stream_.state == STREAM_STOPPED ) {
\r
9250 MUTEX_UNLOCK( &stream_.mutex );
\r
9255 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9256 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9257 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9258 if ( result == -1 ) {
\r
9259 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9260 errorText_ = errorStream_.str();
\r
9263 handle->triggered = false;
\r
9266 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9267 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9268 if ( result == -1 ) {
\r
9269 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9270 errorText_ = errorStream_.str();
\r
9276 stream_.state = STREAM_STOPPED;
\r
9277 MUTEX_UNLOCK( &stream_.mutex );
\r
9279 if ( result != -1 ) return;
\r
9280 error( RtAudioError::SYSTEM_ERROR );
\r
9283 void RtApiOss :: callbackEvent()
\r
9285 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9286 if ( stream_.state == STREAM_STOPPED ) {
\r
9287 MUTEX_LOCK( &stream_.mutex );
\r
9288 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9289 if ( stream_.state != STREAM_RUNNING ) {
\r
9290 MUTEX_UNLOCK( &stream_.mutex );
\r
9293 MUTEX_UNLOCK( &stream_.mutex );
\r
9296 if ( stream_.state == STREAM_CLOSED ) {
\r
9297 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9298 error( RtAudioError::WARNING );
\r
9302 // Invoke user callback to get fresh output data.
\r
9303 int doStopStream = 0;
\r
9304 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9305 double streamTime = getStreamTime();
\r
9306 RtAudioStreamStatus status = 0;
\r
9307 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9308 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9309 handle->xrun[0] = false;
\r
9311 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9312 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9313 handle->xrun[1] = false;
\r
9315 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9316 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9317 if ( doStopStream == 2 ) {
\r
9318 this->abortStream();
\r
9322 MUTEX_LOCK( &stream_.mutex );
\r
9324 // The state might change while waiting on a mutex.
\r
9325 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9330 RtAudioFormat format;
\r
9332 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9334 // Setup parameters and do buffer conversion if necessary.
\r
9335 if ( stream_.doConvertBuffer[0] ) {
\r
9336 buffer = stream_.deviceBuffer;
\r
9337 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9338 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9339 format = stream_.deviceFormat[0];
\r
9342 buffer = stream_.userBuffer[0];
\r
9343 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9344 format = stream_.userFormat;
\r
9347 // Do byte swapping if necessary.
\r
9348 if ( stream_.doByteSwap[0] )
\r
9349 byteSwapBuffer( buffer, samples, format );
\r
9351 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9353 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9354 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9355 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9356 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9357 handle->triggered = true;
\r
9360 // Write samples to device.
\r
9361 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9363 if ( result == -1 ) {
\r
9364 // We'll assume this is an underrun, though there isn't a
\r
9365 // specific means for determining that.
\r
9366 handle->xrun[0] = true;
\r
9367 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9368 error( RtAudioError::WARNING );
\r
9369 // Continue on to input section.
\r
9373 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9375 // Setup parameters.
\r
9376 if ( stream_.doConvertBuffer[1] ) {
\r
9377 buffer = stream_.deviceBuffer;
\r
9378 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9379 format = stream_.deviceFormat[1];
\r
9382 buffer = stream_.userBuffer[1];
\r
9383 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9384 format = stream_.userFormat;
\r
9387 // Read samples from device.
\r
9388 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9390 if ( result == -1 ) {
\r
9391 // We'll assume this is an overrun, though there isn't a
\r
9392 // specific means for determining that.
\r
9393 handle->xrun[1] = true;
\r
9394 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9395 error( RtAudioError::WARNING );
\r
9399 // Do byte swapping if necessary.
\r
9400 if ( stream_.doByteSwap[1] )
\r
9401 byteSwapBuffer( buffer, samples, format );
\r
9403 // Do buffer conversion if necessary.
\r
9404 if ( stream_.doConvertBuffer[1] )
\r
9405 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9409 MUTEX_UNLOCK( &stream_.mutex );
\r
9411 RtApi::tickStreamTime();
\r
9412 if ( doStopStream == 1 ) this->stopStream();
\r
9415 static void *ossCallbackHandler( void *ptr )
\r
9417 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9418 RtApiOss *object = (RtApiOss *) info->object;
\r
9419 bool *isRunning = &info->isRunning;
\r
9421 while ( *isRunning == true ) {
\r
9422 pthread_testcancel();
\r
9423 object->callbackEvent();
\r
9426 pthread_exit( NULL );
\r
9429 //******************** End of __LINUX_OSS__ *********************//
\r
9433 // *************************************************** //
\r
9435 // Protected common (OS-independent) RtAudio methods.
\r
9437 // *************************************************** //
\r
9439 // This method can be modified to control the behavior of error
\r
9440 // message printing.
\r
9441 void RtApi :: error( RtAudioError::Type type )
\r
9443 errorStream_.str(""); // clear the ostringstream
\r
9445 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9446 if ( errorCallback ) {
\r
9447 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9449 if ( firstErrorOccurred_ )
\r
9452 firstErrorOccurred_ = true;
\r
9453 const std::string errorMessage = errorText_;
\r
9455 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9456 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9460 errorCallback( type, errorMessage );
\r
9461 firstErrorOccurred_ = false;
\r
9465 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9466 std::cerr << '\n' << errorText_ << "\n\n";
\r
9467 else if ( type != RtAudioError::WARNING )
\r
9468 throw( RtAudioError( errorText_, type ) );
\r
9471 void RtApi :: verifyStream()
\r
9473 if ( stream_.state == STREAM_CLOSED ) {
\r
9474 errorText_ = "RtApi:: a stream is not open!";
\r
9475 error( RtAudioError::INVALID_USE );
\r
9479 void RtApi :: clearStreamInfo()
\r
9481 stream_.mode = UNINITIALIZED;
\r
9482 stream_.state = STREAM_CLOSED;
\r
9483 stream_.sampleRate = 0;
\r
9484 stream_.bufferSize = 0;
\r
9485 stream_.nBuffers = 0;
\r
9486 stream_.userFormat = 0;
\r
9487 stream_.userInterleaved = true;
\r
9488 stream_.streamTime = 0.0;
\r
9489 stream_.apiHandle = 0;
\r
9490 stream_.deviceBuffer = 0;
\r
9491 stream_.callbackInfo.callback = 0;
\r
9492 stream_.callbackInfo.userData = 0;
\r
9493 stream_.callbackInfo.isRunning = false;
\r
9494 stream_.callbackInfo.errorCallback = 0;
\r
9495 for ( int i=0; i<2; i++ ) {
\r
9496 stream_.device[i] = 11111;
\r
9497 stream_.doConvertBuffer[i] = false;
\r
9498 stream_.deviceInterleaved[i] = true;
\r
9499 stream_.doByteSwap[i] = false;
\r
9500 stream_.nUserChannels[i] = 0;
\r
9501 stream_.nDeviceChannels[i] = 0;
\r
9502 stream_.channelOffset[i] = 0;
\r
9503 stream_.deviceFormat[i] = 0;
\r
9504 stream_.latency[i] = 0;
\r
9505 stream_.userBuffer[i] = 0;
\r
9506 stream_.convertInfo[i].channels = 0;
\r
9507 stream_.convertInfo[i].inJump = 0;
\r
9508 stream_.convertInfo[i].outJump = 0;
\r
9509 stream_.convertInfo[i].inFormat = 0;
\r
9510 stream_.convertInfo[i].outFormat = 0;
\r
9511 stream_.convertInfo[i].inOffset.clear();
\r
9512 stream_.convertInfo[i].outOffset.clear();
\r
9516 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9518 if ( format == RTAUDIO_SINT16 )
\r
9520 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9522 else if ( format == RTAUDIO_FLOAT64 )
\r
9524 else if ( format == RTAUDIO_SINT24 )
\r
9526 else if ( format == RTAUDIO_SINT8 )
\r
9529 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9530 error( RtAudioError::WARNING );
\r
9535 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9537 if ( mode == INPUT ) { // convert device to user buffer
\r
9538 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9539 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9540 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9541 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9543 else { // convert user to device buffer
\r
9544 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9545 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9546 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9547 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9550 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9551 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9553 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9555 // Set up the interleave/deinterleave offsets.
\r
9556 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9557 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9558 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9559 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9560 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9561 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9562 stream_.convertInfo[mode].inJump = 1;
\r
9566 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9567 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9568 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9569 stream_.convertInfo[mode].outJump = 1;
\r
9573 else { // no (de)interleaving
\r
9574 if ( stream_.userInterleaved ) {
\r
9575 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9576 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9577 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9581 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9582 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9583 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9584 stream_.convertInfo[mode].inJump = 1;
\r
9585 stream_.convertInfo[mode].outJump = 1;
\r
9590 // Add channel offset.
\r
9591 if ( firstChannel > 0 ) {
\r
9592 if ( stream_.deviceInterleaved[mode] ) {
\r
9593 if ( mode == OUTPUT ) {
\r
9594 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9595 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9598 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9599 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9603 if ( mode == OUTPUT ) {
\r
9604 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9605 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9608 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9609 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9615 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9617 // This function does format conversion, input/output channel compensation, and
\r
9618 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9619 // the lower three bytes of a 32-bit integer.
\r
9621 // Clear our device buffer when in/out duplex device channels are different
\r
9622 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9623 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9624 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9627 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9629 Float64 *out = (Float64 *)outBuffer;
\r
9631 if (info.inFormat == RTAUDIO_SINT8) {
\r
9632 signed char *in = (signed char *)inBuffer;
\r
9633 scale = 1.0 / 127.5;
\r
9634 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9635 for (j=0; j<info.channels; j++) {
\r
9636 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9637 out[info.outOffset[j]] += 0.5;
\r
9638 out[info.outOffset[j]] *= scale;
\r
9640 in += info.inJump;
\r
9641 out += info.outJump;
\r
9644 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9645 Int16 *in = (Int16 *)inBuffer;
\r
9646 scale = 1.0 / 32767.5;
\r
9647 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9648 for (j=0; j<info.channels; j++) {
\r
9649 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9650 out[info.outOffset[j]] += 0.5;
\r
9651 out[info.outOffset[j]] *= scale;
\r
9653 in += info.inJump;
\r
9654 out += info.outJump;
\r
9657 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9658 Int24 *in = (Int24 *)inBuffer;
\r
9659 scale = 1.0 / 8388607.5;
\r
9660 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9661 for (j=0; j<info.channels; j++) {
\r
9662 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9663 out[info.outOffset[j]] += 0.5;
\r
9664 out[info.outOffset[j]] *= scale;
\r
9666 in += info.inJump;
\r
9667 out += info.outJump;
\r
9670 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9671 Int32 *in = (Int32 *)inBuffer;
\r
9672 scale = 1.0 / 2147483647.5;
\r
9673 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9674 for (j=0; j<info.channels; j++) {
\r
9675 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9676 out[info.outOffset[j]] += 0.5;
\r
9677 out[info.outOffset[j]] *= scale;
\r
9679 in += info.inJump;
\r
9680 out += info.outJump;
\r
9683 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9684 Float32 *in = (Float32 *)inBuffer;
\r
9685 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9686 for (j=0; j<info.channels; j++) {
\r
9687 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9689 in += info.inJump;
\r
9690 out += info.outJump;
\r
9693 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9694 // Channel compensation and/or (de)interleaving only.
\r
9695 Float64 *in = (Float64 *)inBuffer;
\r
9696 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9697 for (j=0; j<info.channels; j++) {
\r
9698 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9700 in += info.inJump;
\r
9701 out += info.outJump;
\r
9705 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9707 Float32 *out = (Float32 *)outBuffer;
\r
9709 if (info.inFormat == RTAUDIO_SINT8) {
\r
9710 signed char *in = (signed char *)inBuffer;
\r
9711 scale = (Float32) ( 1.0 / 127.5 );
\r
9712 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9713 for (j=0; j<info.channels; j++) {
\r
9714 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9715 out[info.outOffset[j]] += 0.5;
\r
9716 out[info.outOffset[j]] *= scale;
\r
9718 in += info.inJump;
\r
9719 out += info.outJump;
\r
9722 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9723 Int16 *in = (Int16 *)inBuffer;
\r
9724 scale = (Float32) ( 1.0 / 32767.5 );
\r
9725 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9726 for (j=0; j<info.channels; j++) {
\r
9727 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9728 out[info.outOffset[j]] += 0.5;
\r
9729 out[info.outOffset[j]] *= scale;
\r
9731 in += info.inJump;
\r
9732 out += info.outJump;
\r
9735 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9736 Int24 *in = (Int24 *)inBuffer;
\r
9737 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9738 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9739 for (j=0; j<info.channels; j++) {
\r
9740 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9741 out[info.outOffset[j]] += 0.5;
\r
9742 out[info.outOffset[j]] *= scale;
\r
9744 in += info.inJump;
\r
9745 out += info.outJump;
\r
9748 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9749 Int32 *in = (Int32 *)inBuffer;
\r
9750 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9751 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9752 for (j=0; j<info.channels; j++) {
\r
9753 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9754 out[info.outOffset[j]] += 0.5;
\r
9755 out[info.outOffset[j]] *= scale;
\r
9757 in += info.inJump;
\r
9758 out += info.outJump;
\r
9761 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9762 // Channel compensation and/or (de)interleaving only.
\r
9763 Float32 *in = (Float32 *)inBuffer;
\r
9764 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9765 for (j=0; j<info.channels; j++) {
\r
9766 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9768 in += info.inJump;
\r
9769 out += info.outJump;
\r
9772 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9773 Float64 *in = (Float64 *)inBuffer;
\r
9774 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9775 for (j=0; j<info.channels; j++) {
\r
9776 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9778 in += info.inJump;
\r
9779 out += info.outJump;
\r
9783 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9784 Int32 *out = (Int32 *)outBuffer;
\r
9785 if (info.inFormat == RTAUDIO_SINT8) {
\r
9786 signed char *in = (signed char *)inBuffer;
\r
9787 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9788 for (j=0; j<info.channels; j++) {
\r
9789 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9790 out[info.outOffset[j]] <<= 24;
\r
9792 in += info.inJump;
\r
9793 out += info.outJump;
\r
9796 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9797 Int16 *in = (Int16 *)inBuffer;
\r
9798 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9799 for (j=0; j<info.channels; j++) {
\r
9800 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9801 out[info.outOffset[j]] <<= 16;
\r
9803 in += info.inJump;
\r
9804 out += info.outJump;
\r
9807 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9808 Int24 *in = (Int24 *)inBuffer;
\r
9809 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9810 for (j=0; j<info.channels; j++) {
\r
9811 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9812 out[info.outOffset[j]] <<= 8;
\r
9814 in += info.inJump;
\r
9815 out += info.outJump;
\r
9818 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9819 // Channel compensation and/or (de)interleaving only.
\r
9820 Int32 *in = (Int32 *)inBuffer;
\r
9821 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9822 for (j=0; j<info.channels; j++) {
\r
9823 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9825 in += info.inJump;
\r
9826 out += info.outJump;
\r
9829 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9830 Float32 *in = (Float32 *)inBuffer;
\r
9831 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9832 for (j=0; j<info.channels; j++) {
\r
9833 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9835 in += info.inJump;
\r
9836 out += info.outJump;
\r
9839 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9840 Float64 *in = (Float64 *)inBuffer;
\r
9841 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9842 for (j=0; j<info.channels; j++) {
\r
9843 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9845 in += info.inJump;
\r
9846 out += info.outJump;
\r
9850 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9851 Int24 *out = (Int24 *)outBuffer;
\r
9852 if (info.inFormat == RTAUDIO_SINT8) {
\r
9853 signed char *in = (signed char *)inBuffer;
\r
9854 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9855 for (j=0; j<info.channels; j++) {
\r
9856 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9857 //out[info.outOffset[j]] <<= 16;
\r
9859 in += info.inJump;
\r
9860 out += info.outJump;
\r
9863 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9864 Int16 *in = (Int16 *)inBuffer;
\r
9865 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9866 for (j=0; j<info.channels; j++) {
\r
9867 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9868 //out[info.outOffset[j]] <<= 8;
\r
9870 in += info.inJump;
\r
9871 out += info.outJump;
\r
9874 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9875 // Channel compensation and/or (de)interleaving only.
\r
9876 Int24 *in = (Int24 *)inBuffer;
\r
9877 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9878 for (j=0; j<info.channels; j++) {
\r
9879 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9881 in += info.inJump;
\r
9882 out += info.outJump;
\r
9885 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9886 Int32 *in = (Int32 *)inBuffer;
\r
9887 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9888 for (j=0; j<info.channels; j++) {
\r
9889 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9890 //out[info.outOffset[j]] >>= 8;
\r
9892 in += info.inJump;
\r
9893 out += info.outJump;
\r
9896 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9897 Float32 *in = (Float32 *)inBuffer;
\r
9898 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9899 for (j=0; j<info.channels; j++) {
\r
9900 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9902 in += info.inJump;
\r
9903 out += info.outJump;
\r
9906 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9907 Float64 *in = (Float64 *)inBuffer;
\r
9908 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9909 for (j=0; j<info.channels; j++) {
\r
9910 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9912 in += info.inJump;
\r
9913 out += info.outJump;
\r
9917 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9918 Int16 *out = (Int16 *)outBuffer;
\r
9919 if (info.inFormat == RTAUDIO_SINT8) {
\r
9920 signed char *in = (signed char *)inBuffer;
\r
9921 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9922 for (j=0; j<info.channels; j++) {
\r
9923 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9924 out[info.outOffset[j]] <<= 8;
\r
9926 in += info.inJump;
\r
9927 out += info.outJump;
\r
9930 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9931 // Channel compensation and/or (de)interleaving only.
\r
9932 Int16 *in = (Int16 *)inBuffer;
\r
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9934 for (j=0; j<info.channels; j++) {
\r
9935 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9937 in += info.inJump;
\r
9938 out += info.outJump;
\r
9941 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9942 Int24 *in = (Int24 *)inBuffer;
\r
9943 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9944 for (j=0; j<info.channels; j++) {
\r
9945 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9947 in += info.inJump;
\r
9948 out += info.outJump;
\r
9951 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9952 Int32 *in = (Int32 *)inBuffer;
\r
9953 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9954 for (j=0; j<info.channels; j++) {
\r
9955 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9957 in += info.inJump;
\r
9958 out += info.outJump;
\r
9961 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9962 Float32 *in = (Float32 *)inBuffer;
\r
9963 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9964 for (j=0; j<info.channels; j++) {
\r
9965 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9967 in += info.inJump;
\r
9968 out += info.outJump;
\r
9971 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9972 Float64 *in = (Float64 *)inBuffer;
\r
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9974 for (j=0; j<info.channels; j++) {
\r
9975 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9977 in += info.inJump;
\r
9978 out += info.outJump;
\r
9982 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9983 signed char *out = (signed char *)outBuffer;
\r
9984 if (info.inFormat == RTAUDIO_SINT8) {
\r
9985 // Channel compensation and/or (de)interleaving only.
\r
9986 signed char *in = (signed char *)inBuffer;
\r
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9988 for (j=0; j<info.channels; j++) {
\r
9989 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9991 in += info.inJump;
\r
9992 out += info.outJump;
\r
9995 if (info.inFormat == RTAUDIO_SINT16) {
\r
9996 Int16 *in = (Int16 *)inBuffer;
\r
9997 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9998 for (j=0; j<info.channels; j++) {
\r
9999 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10001 in += info.inJump;
\r
10002 out += info.outJump;
\r
10005 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10006 Int24 *in = (Int24 *)inBuffer;
\r
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10008 for (j=0; j<info.channels; j++) {
\r
10009 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10011 in += info.inJump;
\r
10012 out += info.outJump;
\r
10015 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10016 Int32 *in = (Int32 *)inBuffer;
\r
10017 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10018 for (j=0; j<info.channels; j++) {
\r
10019 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10021 in += info.inJump;
\r
10022 out += info.outJump;
\r
10025 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10026 Float32 *in = (Float32 *)inBuffer;
\r
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10028 for (j=0; j<info.channels; j++) {
\r
10029 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10031 in += info.inJump;
\r
10032 out += info.outJump;
\r
10035 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10036 Float64 *in = (Float64 *)inBuffer;
\r
10037 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10038 for (j=0; j<info.channels; j++) {
\r
10039 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10041 in += info.inJump;
\r
10042 out += info.outJump;
\r
10048 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10049 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10050 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10052 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10054 register char val;
\r
10055 register char *ptr;
\r
10058 if ( format == RTAUDIO_SINT16 ) {
\r
10059 for ( unsigned int i=0; i<samples; i++ ) {
\r
10060 // Swap 1st and 2nd bytes.
\r
10062 *(ptr) = *(ptr+1);
\r
10065 // Increment 2 bytes.
\r
10069 else if ( format == RTAUDIO_SINT32 ||
\r
10070 format == RTAUDIO_FLOAT32 ) {
\r
10071 for ( unsigned int i=0; i<samples; i++ ) {
\r
10072 // Swap 1st and 4th bytes.
\r
10074 *(ptr) = *(ptr+3);
\r
10077 // Swap 2nd and 3rd bytes.
\r
10080 *(ptr) = *(ptr+1);
\r
10083 // Increment 3 more bytes.
\r
10087 else if ( format == RTAUDIO_SINT24 ) {
\r
10088 for ( unsigned int i=0; i<samples; i++ ) {
\r
10089 // Swap 1st and 3rd bytes.
\r
10091 *(ptr) = *(ptr+2);
\r
10094 // Increment 2 more bytes.
\r
10098 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10099 for ( unsigned int i=0; i<samples; i++ ) {
\r
10100 // Swap 1st and 8th bytes
\r
10102 *(ptr) = *(ptr+7);
\r
10105 // Swap 2nd and 7th bytes
\r
10108 *(ptr) = *(ptr+5);
\r
10111 // Swap 3rd and 6th bytes
\r
10114 *(ptr) = *(ptr+3);
\r
10117 // Swap 4th and 5th bytes
\r
10120 *(ptr) = *(ptr+1);
\r
10123 // Increment 5 more bytes.
\r
10129 // Indentation settings for Vim and Emacs
\r
10131 // Local Variables:
\r
10132 // c-basic-offset: 2
\r
10133 // indent-tabs-mode: nil
\r
10136 // vim: et sts=2 sw=2
\r