1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.0
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1686 if ( handle->drainCounter ) {
\r
1687 handle->drainCounter++;
\r
1692 AudioDeviceID inputDevice;
\r
1693 inputDevice = handle->id[1];
\r
1694 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1696 if ( handle->nStreams[1] == 1 ) {
\r
1697 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1698 convertBuffer( stream_.userBuffer[1],
\r
1699 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1700 stream_.convertInfo[1] );
\r
1702 else { // copy to user buffer
\r
1703 memcpy( stream_.userBuffer[1],
\r
1704 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1708 else { // read from multiple streams
\r
1709 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1710 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1712 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1713 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1714 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1715 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1716 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1719 else { // read from multiple multi-channel streams
\r
1720 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1721 Float32 *out, *in;
\r
1723 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1724 UInt32 outChannels = stream_.nUserChannels[1];
\r
1725 if ( stream_.doConvertBuffer[1] ) {
\r
1726 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1727 outChannels = stream_.nDeviceChannels[1];
\r
1730 if ( outInterleaved ) outOffset = 1;
\r
1731 else outOffset = stream_.bufferSize;
\r
1733 channelsLeft = outChannels;
\r
1734 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1736 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1737 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1740 // Account for possible channel offset in first stream
\r
1741 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1742 streamChannels -= stream_.channelOffset[1];
\r
1743 inJump = stream_.channelOffset[1];
\r
1747 // Account for possible unread channels at end of the last stream
\r
1748 if ( streamChannels > channelsLeft ) {
\r
1749 inJump = streamChannels - channelsLeft;
\r
1750 streamChannels = channelsLeft;
\r
1753 // Determine output buffer offsets and skips
\r
1754 if ( outInterleaved ) {
\r
1755 outJump = outChannels;
\r
1756 out += outChannels - channelsLeft;
\r
1760 out += (outChannels - channelsLeft) * outOffset;
\r
1763 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1764 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1765 out[j*outOffset] = *in++;
\r
1770 channelsLeft -= streamChannels;
\r
1774 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1775 convertBuffer( stream_.userBuffer[1],
\r
1776 stream_.deviceBuffer,
\r
1777 stream_.convertInfo[1] );
\r
1783 //MUTEX_UNLOCK( &stream_.mutex );
\r
1785 RtApi::tickStreamTime();
\r
1789 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1793 case kAudioHardwareNotRunningError:
\r
1794 return "kAudioHardwareNotRunningError";
\r
1796 case kAudioHardwareUnspecifiedError:
\r
1797 return "kAudioHardwareUnspecifiedError";
\r
1799 case kAudioHardwareUnknownPropertyError:
\r
1800 return "kAudioHardwareUnknownPropertyError";
\r
1802 case kAudioHardwareBadPropertySizeError:
\r
1803 return "kAudioHardwareBadPropertySizeError";
\r
1805 case kAudioHardwareIllegalOperationError:
\r
1806 return "kAudioHardwareIllegalOperationError";
\r
1808 case kAudioHardwareBadObjectError:
\r
1809 return "kAudioHardwareBadObjectError";
\r
1811 case kAudioHardwareBadDeviceError:
\r
1812 return "kAudioHardwareBadDeviceError";
\r
1814 case kAudioHardwareBadStreamError:
\r
1815 return "kAudioHardwareBadStreamError";
\r
1817 case kAudioHardwareUnsupportedOperationError:
\r
1818 return "kAudioHardwareUnsupportedOperationError";
\r
1820 case kAudioDeviceUnsupportedFormatError:
\r
1821 return "kAudioDeviceUnsupportedFormatError";
\r
1823 case kAudioDevicePermissionsError:
\r
1824 return "kAudioDevicePermissionsError";
\r
1827 return "CoreAudio unknown error";
\r
1831 //******************** End of __MACOSX_CORE__ *********************//
\r
1834 #if defined(__UNIX_JACK__)
\r
1836 // JACK is a low-latency audio server, originally written for the
\r
1837 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1838 // connect a number of different applications to an audio device, as
\r
1839 // well as allowing them to share audio between themselves.
\r
1841 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1842 // have ports connected to the server. The JACK server is typically
\r
1843 // started in a terminal as follows:
\r
1845 // .jackd -d alsa -d hw:0
\r
1847 // or through an interface program such as qjackctl. Many of the
\r
1848 // parameters normally set for a stream are fixed by the JACK server
\r
1849 // and can be specified when the JACK server is started. In
\r
1852 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1854 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1855 // frames, and number of buffers = 4. Once the server is running, it
\r
1856 // is not possible to override these values. If the values are not
\r
1857 // specified in the command-line, the JACK server uses default values.
\r
1859 // The JACK server does not have to be running when an instance of
\r
1860 // RtApiJack is created, though the function getDeviceCount() will
\r
1861 // report 0 devices found until JACK has been started. When no
\r
1862 // devices are available (i.e., the JACK server is not running), a
\r
1863 // stream cannot be opened.
\r
1865 #include <jack/jack.h>
\r
1866 #include <unistd.h>
\r
1869 // A structure to hold various information related to the Jack API
\r
1870 // implementation.
\r
1871 struct JackHandle {
\r
1872 jack_client_t *client;
\r
1873 jack_port_t **ports[2];
\r
1874 std::string deviceName[2];
\r
1876 pthread_cond_t condition;
\r
1877 int drainCounter; // Tracks callback counts when draining
\r
1878 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1881 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1884 static void jackSilentError( const char * ) {};
\r
1886 RtApiJack :: RtApiJack()
\r
1888 // Nothing to do here.
\r
1889 #if !defined(__RTAUDIO_DEBUG__)
\r
1890 // Turn off Jack's internal error reporting.
\r
1891 jack_set_error_function( &jackSilentError );
\r
1895 RtApiJack :: ~RtApiJack()
\r
1897 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1900 unsigned int RtApiJack :: getDeviceCount( void )
\r
1902 // See if we can become a jack client.
\r
1903 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1904 jack_status_t *status = NULL;
\r
1905 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1906 if ( client == 0 ) return 0;
\r
1908 const char **ports;
\r
1909 std::string port, previousPort;
\r
1910 unsigned int nChannels = 0, nDevices = 0;
\r
1911 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1913 // Parse the port names up to the first colon (:).
\r
1914 size_t iColon = 0;
\r
1916 port = (char *) ports[ nChannels ];
\r
1917 iColon = port.find(":");
\r
1918 if ( iColon != std::string::npos ) {
\r
1919 port = port.substr( 0, iColon + 1 );
\r
1920 if ( port != previousPort ) {
\r
1922 previousPort = port;
\r
1925 } while ( ports[++nChannels] );
\r
1929 jack_client_close( client );
\r
1933 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1935 RtAudio::DeviceInfo info;
\r
1936 info.probed = false;
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1941 if ( client == 0 ) {
\r
1942 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1943 error( RtAudioError::WARNING );
\r
1947 const char **ports;
\r
1948 std::string port, previousPort;
\r
1949 unsigned int nPorts = 0, nDevices = 0;
\r
1950 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1952 // Parse the port names up to the first colon (:).
\r
1953 size_t iColon = 0;
\r
1955 port = (char *) ports[ nPorts ];
\r
1956 iColon = port.find(":");
\r
1957 if ( iColon != std::string::npos ) {
\r
1958 port = port.substr( 0, iColon );
\r
1959 if ( port != previousPort ) {
\r
1960 if ( nDevices == device ) info.name = port;
\r
1962 previousPort = port;
\r
1965 } while ( ports[++nPorts] );
\r
1969 if ( device >= nDevices ) {
\r
1970 jack_client_close( client );
\r
1971 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1972 error( RtAudioError::INVALID_USE );
\r
1976 // Get the current jack server sample rate.
\r
1977 info.sampleRates.clear();
\r
1978 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1980 // Count the available ports containing the client name as device
\r
1981 // channels. Jack "input ports" equal RtAudio output channels.
\r
1982 unsigned int nChannels = 0;
\r
1983 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1985 while ( ports[ nChannels ] ) nChannels++;
\r
1987 info.outputChannels = nChannels;
\r
1990 // Jack "output ports" equal RtAudio input channels.
\r
1992 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1994 while ( ports[ nChannels ] ) nChannels++;
\r
1996 info.inputChannels = nChannels;
\r
1999 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2000 jack_client_close(client);
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 // If device opens for both playback and capture, we determine the channels.
\r
2007 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2008 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2010 // Jack always uses 32-bit floats.
\r
2011 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2013 // Jack doesn't provide default devices so we'll use the first available one.
\r
2014 if ( device == 0 && info.outputChannels > 0 )
\r
2015 info.isDefaultOutput = true;
\r
2016 if ( device == 0 && info.inputChannels > 0 )
\r
2017 info.isDefaultInput = true;
\r
2019 jack_client_close(client);
\r
2020 info.probed = true;
\r
2024 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2026 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2028 RtApiJack *object = (RtApiJack *) info->object;
\r
2029 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2034 // This function will be called by a spawned thread when the Jack
\r
2035 // server signals that it is shutting down. It is necessary to handle
\r
2036 // it this way because the jackShutdown() function must return before
\r
2037 // the jack_deactivate() function (in closeStream()) will return.
\r
2038 static void *jackCloseStream( void *ptr )
\r
2040 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2041 RtApiJack *object = (RtApiJack *) info->object;
\r
2043 object->closeStream();
\r
2045 pthread_exit( NULL );
\r
2047 static void jackShutdown( void *infoPointer )
\r
2049 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2050 RtApiJack *object = (RtApiJack *) info->object;
\r
2052 // Check current stream state. If stopped, then we'll assume this
\r
2053 // was called as a result of a call to RtApiJack::stopStream (the
\r
2054 // deactivation of a client handle causes this function to be called).
\r
2055 // If not, we'll assume the Jack server is shutting down or some
\r
2056 // other problem occurred and we should close the stream.
\r
2057 if ( object->isStreamRunning() == false ) return;
\r
2059 ThreadHandle threadId;
\r
2060 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2061 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2064 static int jackXrun( void *infoPointer )
\r
2066 JackHandle *handle = (JackHandle *) infoPointer;
\r
2068 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2069 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2074 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2075 unsigned int firstChannel, unsigned int sampleRate,
\r
2076 RtAudioFormat format, unsigned int *bufferSize,
\r
2077 RtAudio::StreamOptions *options )
\r
2079 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2081 // Look for jack server and try to become a client (only do once per stream).
\r
2082 jack_client_t *client = 0;
\r
2083 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2084 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2085 jack_status_t *status = NULL;
\r
2086 if ( options && !options->streamName.empty() )
\r
2087 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2089 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2090 if ( client == 0 ) {
\r
2091 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2092 error( RtAudioError::WARNING );
\r
2097 // The handle must have been created on an earlier pass.
\r
2098 client = handle->client;
\r
2101 const char **ports;
\r
2102 std::string port, previousPort, deviceName;
\r
2103 unsigned int nPorts = 0, nDevices = 0;
\r
2104 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2106 // Parse the port names up to the first colon (:).
\r
2107 size_t iColon = 0;
\r
2109 port = (char *) ports[ nPorts ];
\r
2110 iColon = port.find(":");
\r
2111 if ( iColon != std::string::npos ) {
\r
2112 port = port.substr( 0, iColon );
\r
2113 if ( port != previousPort ) {
\r
2114 if ( nDevices == device ) deviceName = port;
\r
2116 previousPort = port;
\r
2119 } while ( ports[++nPorts] );
\r
2123 if ( device >= nDevices ) {
\r
2124 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2128 // Count the available ports containing the client name as device
\r
2129 // channels. Jack "input ports" equal RtAudio output channels.
\r
2130 unsigned int nChannels = 0;
\r
2131 unsigned long flag = JackPortIsInput;
\r
2132 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2133 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2135 while ( ports[ nChannels ] ) nChannels++;
\r
2139 // Compare the jack ports for specified client to the requested number of channels.
\r
2140 if ( nChannels < (channels + firstChannel) ) {
\r
2141 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2142 errorText_ = errorStream_.str();
\r
2146 // Check the jack server sample rate.
\r
2147 unsigned int jackRate = jack_get_sample_rate( client );
\r
2148 if ( sampleRate != jackRate ) {
\r
2149 jack_client_close( client );
\r
2150 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2151 errorText_ = errorStream_.str();
\r
2154 stream_.sampleRate = jackRate;
\r
2156 // Get the latency of the JACK port.
\r
2157 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2158 if ( ports[ firstChannel ] ) {
\r
2159 // Added by Ge Wang
\r
2160 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2161 // the range (usually the min and max are equal)
\r
2162 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2163 // get the latency range
\r
2164 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2165 // be optimistic, use the min!
\r
2166 stream_.latency[mode] = latrange.min;
\r
2167 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2171 // The jack server always uses 32-bit floating-point data.
\r
2172 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2173 stream_.userFormat = format;
\r
2175 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2176 else stream_.userInterleaved = true;
\r
2178 // Jack always uses non-interleaved buffers.
\r
2179 stream_.deviceInterleaved[mode] = false;
\r
2181 // Jack always provides host byte-ordered data.
\r
2182 stream_.doByteSwap[mode] = false;
\r
2184 // Get the buffer size. The buffer size and number of buffers
\r
2185 // (periods) is set when the jack server is started.
\r
2186 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2187 *bufferSize = stream_.bufferSize;
\r
2189 stream_.nDeviceChannels[mode] = channels;
\r
2190 stream_.nUserChannels[mode] = channels;
\r
2192 // Set flags for buffer conversion.
\r
2193 stream_.doConvertBuffer[mode] = false;
\r
2194 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2195 stream_.doConvertBuffer[mode] = true;
\r
2196 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2197 stream_.nUserChannels[mode] > 1 )
\r
2198 stream_.doConvertBuffer[mode] = true;
\r
2200 // Allocate our JackHandle structure for the stream.
\r
2201 if ( handle == 0 ) {
\r
2203 handle = new JackHandle;
\r
2205 catch ( std::bad_alloc& ) {
\r
2206 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2210 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2211 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2214 stream_.apiHandle = (void *) handle;
\r
2215 handle->client = client;
\r
2217 handle->deviceName[mode] = deviceName;
\r
2219 // Allocate necessary internal buffers.
\r
2220 unsigned long bufferBytes;
\r
2221 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2222 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2223 if ( stream_.userBuffer[mode] == NULL ) {
\r
2224 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2228 if ( stream_.doConvertBuffer[mode] ) {
\r
2230 bool makeBuffer = true;
\r
2231 if ( mode == OUTPUT )
\r
2232 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2233 else { // mode == INPUT
\r
2234 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2235 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2236 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2237 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2241 if ( makeBuffer ) {
\r
2242 bufferBytes *= *bufferSize;
\r
2243 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2244 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2245 if ( stream_.deviceBuffer == NULL ) {
\r
2246 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2252 // Allocate memory for the Jack ports (channels) identifiers.
\r
2253 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2254 if ( handle->ports[mode] == NULL ) {
\r
2255 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2259 stream_.device[mode] = device;
\r
2260 stream_.channelOffset[mode] = firstChannel;
\r
2261 stream_.state = STREAM_STOPPED;
\r
2262 stream_.callbackInfo.object = (void *) this;
\r
2264 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2265 // We had already set up the stream for output.
\r
2266 stream_.mode = DUPLEX;
\r
2268 stream_.mode = mode;
\r
2269 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2270 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2271 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2274 // Register our ports.
\r
2276 if ( mode == OUTPUT ) {
\r
2277 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2278 snprintf( label, 64, "outport %d", i );
\r
2279 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2280 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2284 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2285 snprintf( label, 64, "inport %d", i );
\r
2286 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2287 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2291 // Setup the buffer conversion information structure. We don't use
\r
2292 // buffers to do channel offsets, so we override that parameter
\r
2294 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2300 pthread_cond_destroy( &handle->condition );
\r
2301 jack_client_close( handle->client );
\r
2303 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2304 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2307 stream_.apiHandle = 0;
\r
2310 for ( int i=0; i<2; i++ ) {
\r
2311 if ( stream_.userBuffer[i] ) {
\r
2312 free( stream_.userBuffer[i] );
\r
2313 stream_.userBuffer[i] = 0;
\r
2317 if ( stream_.deviceBuffer ) {
\r
2318 free( stream_.deviceBuffer );
\r
2319 stream_.deviceBuffer = 0;
\r
2325 void RtApiJack :: closeStream( void )
\r
2327 if ( stream_.state == STREAM_CLOSED ) {
\r
2328 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2329 error( RtAudioError::WARNING );
\r
2333 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2336 if ( stream_.state == STREAM_RUNNING )
\r
2337 jack_deactivate( handle->client );
\r
2339 jack_client_close( handle->client );
\r
2343 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2344 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2345 pthread_cond_destroy( &handle->condition );
\r
2347 stream_.apiHandle = 0;
\r
2350 for ( int i=0; i<2; i++ ) {
\r
2351 if ( stream_.userBuffer[i] ) {
\r
2352 free( stream_.userBuffer[i] );
\r
2353 stream_.userBuffer[i] = 0;
\r
2357 if ( stream_.deviceBuffer ) {
\r
2358 free( stream_.deviceBuffer );
\r
2359 stream_.deviceBuffer = 0;
\r
2362 stream_.mode = UNINITIALIZED;
\r
2363 stream_.state = STREAM_CLOSED;
\r
2366 void RtApiJack :: startStream( void )
\r
2369 if ( stream_.state == STREAM_RUNNING ) {
\r
2370 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2371 error( RtAudioError::WARNING );
\r
2375 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2376 int result = jack_activate( handle->client );
\r
2378 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2382 const char **ports;
\r
2384 // Get the list of available ports.
\r
2385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2387 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2388 if ( ports == NULL) {
\r
2389 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2393 // Now make the port connections. Since RtAudio wasn't designed to
\r
2394 // allow the user to select particular channels of a device, we'll
\r
2395 // just open the first "nChannels" ports with offset.
\r
2396 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2398 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2399 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2402 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2409 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2411 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2412 if ( ports == NULL) {
\r
2413 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2417 // Now make the port connections. See note above.
\r
2418 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2420 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2421 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2424 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2431 handle->drainCounter = 0;
\r
2432 handle->internalDrain = false;
\r
2433 stream_.state = STREAM_RUNNING;
\r
2436 if ( result == 0 ) return;
\r
2437 error( RtAudioError::SYSTEM_ERROR );
\r
2440 void RtApiJack :: stopStream( void )
\r
2443 if ( stream_.state == STREAM_STOPPED ) {
\r
2444 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2445 error( RtAudioError::WARNING );
\r
2449 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2452 if ( handle->drainCounter == 0 ) {
\r
2453 handle->drainCounter = 2;
\r
2454 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2458 jack_deactivate( handle->client );
\r
2459 stream_.state = STREAM_STOPPED;
\r
2462 void RtApiJack :: abortStream( void )
\r
2465 if ( stream_.state == STREAM_STOPPED ) {
\r
2466 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2467 error( RtAudioError::WARNING );
\r
2471 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2472 handle->drainCounter = 2;
\r
2477 // This function will be called by a spawned thread when the user
\r
2478 // callback function signals that the stream should be stopped or
\r
2479 // aborted. It is necessary to handle it this way because the
\r
2480 // callbackEvent() function must return before the jack_deactivate()
\r
2481 // function will return.
\r
2482 static void *jackStopStream( void *ptr )
\r
2484 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2485 RtApiJack *object = (RtApiJack *) info->object;
\r
2487 object->stopStream();
\r
2488 pthread_exit( NULL );
\r
2491 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2493 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2494 if ( stream_.state == STREAM_CLOSED ) {
\r
2495 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2496 error( RtAudioError::WARNING );
\r
2499 if ( stream_.bufferSize != nframes ) {
\r
2500 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2501 error( RtAudioError::WARNING );
\r
2505 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2506 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2508 // Check if we were draining the stream and signal is finished.
\r
2509 if ( handle->drainCounter > 3 ) {
\r
2510 ThreadHandle threadId;
\r
2512 stream_.state = STREAM_STOPPING;
\r
2513 if ( handle->internalDrain == true )
\r
2514 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2516 pthread_cond_signal( &handle->condition );
\r
2520 // Invoke user callback first, to get fresh output data.
\r
2521 if ( handle->drainCounter == 0 ) {
\r
2522 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2523 double streamTime = getStreamTime();
\r
2524 RtAudioStreamStatus status = 0;
\r
2525 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2526 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2527 handle->xrun[0] = false;
\r
2529 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2530 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2531 handle->xrun[1] = false;
\r
2533 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2534 stream_.bufferSize, streamTime, status, info->userData );
\r
2535 if ( cbReturnValue == 2 ) {
\r
2536 stream_.state = STREAM_STOPPING;
\r
2537 handle->drainCounter = 2;
\r
2539 pthread_create( &id, NULL, jackStopStream, info );
\r
2542 else if ( cbReturnValue == 1 ) {
\r
2543 handle->drainCounter = 1;
\r
2544 handle->internalDrain = true;
\r
2548 jack_default_audio_sample_t *jackbuffer;
\r
2549 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2552 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2554 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2555 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2556 memset( jackbuffer, 0, bufferBytes );
\r
2560 else if ( stream_.doConvertBuffer[0] ) {
\r
2562 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2564 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2565 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2566 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2569 else { // no buffer conversion
\r
2570 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2571 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2572 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2576 if ( handle->drainCounter ) {
\r
2577 handle->drainCounter++;
\r
2582 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2584 if ( stream_.doConvertBuffer[1] ) {
\r
2585 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2586 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2587 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2589 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2591 else { // no buffer conversion
\r
2592 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2593 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2594 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2600 RtApi::tickStreamTime();
\r
2603 //******************** End of __UNIX_JACK__ *********************//
\r
2606 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2608 // The ASIO API is designed around a callback scheme, so this
\r
2609 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2610 // Jack. The primary constraint with ASIO is that it only allows
\r
2611 // access to a single driver at a time. Thus, it is not possible to
\r
2612 // have more than one simultaneous RtAudio stream.
\r
2614 // This implementation also requires a number of external ASIO files
\r
2615 // and a few global variables. The ASIO callback scheme does not
\r
2616 // allow for the passing of user data, so we must create a global
\r
2617 // pointer to our callbackInfo structure.
\r
2619 // On unix systems, we make use of a pthread condition variable.
\r
2620 // Since there is no equivalent in Windows, I hacked something based
\r
2621 // on information found in
\r
2622 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2624 #include "asiosys.h"
\r
2626 #include "iasiothiscallresolver.h"
\r
2627 #include "asiodrivers.h"
\r
2630 static AsioDrivers drivers;
\r
2631 static ASIOCallbacks asioCallbacks;
\r
2632 static ASIODriverInfo driverInfo;
\r
2633 static CallbackInfo *asioCallbackInfo;
\r
2634 static bool asioXRun;
\r
2636 struct AsioHandle {
\r
2637 int drainCounter; // Tracks callback counts when draining
\r
2638 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2639 ASIOBufferInfo *bufferInfos;
\r
2643 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2646 // Function declarations (definitions at end of section)
\r
2647 static const char* getAsioErrorString( ASIOError result );
\r
2648 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2649 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2651 RtApiAsio :: RtApiAsio()
\r
2653 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2654 // CoInitialize beforehand, but it must be for appartment threading
\r
2655 // (in which case, CoInitilialize will return S_FALSE here).
\r
2656 coInitialized_ = false;
\r
2657 HRESULT hr = CoInitialize( NULL );
\r
2658 if ( FAILED(hr) ) {
\r
2659 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2660 error( RtAudioError::WARNING );
\r
2662 coInitialized_ = true;
\r
2664 drivers.removeCurrentDriver();
\r
2665 driverInfo.asioVersion = 2;
\r
2667 // See note in DirectSound implementation about GetDesktopWindow().
\r
2668 driverInfo.sysRef = GetForegroundWindow();
\r
2671 RtApiAsio :: ~RtApiAsio()
\r
2673 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2674 if ( coInitialized_ ) CoUninitialize();
\r
2677 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2679 return (unsigned int) drivers.asioGetNumDev();
\r
2682 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2684 RtAudio::DeviceInfo info;
\r
2685 info.probed = false;
\r
2688 unsigned int nDevices = getDeviceCount();
\r
2689 if ( nDevices == 0 ) {
\r
2690 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2691 error( RtAudioError::INVALID_USE );
\r
2695 if ( device >= nDevices ) {
\r
2696 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2697 error( RtAudioError::INVALID_USE );
\r
2701 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2702 if ( stream_.state != STREAM_CLOSED ) {
\r
2703 if ( device >= devices_.size() ) {
\r
2704 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2705 error( RtAudioError::WARNING );
\r
2708 return devices_[ device ];
\r
2711 char driverName[32];
\r
2712 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2713 if ( result != ASE_OK ) {
\r
2714 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2715 errorText_ = errorStream_.str();
\r
2716 error( RtAudioError::WARNING );
\r
2720 info.name = driverName;
\r
2722 if ( !drivers.loadDriver( driverName ) ) {
\r
2723 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2724 errorText_ = errorStream_.str();
\r
2725 error( RtAudioError::WARNING );
\r
2729 result = ASIOInit( &driverInfo );
\r
2730 if ( result != ASE_OK ) {
\r
2731 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2732 errorText_ = errorStream_.str();
\r
2733 error( RtAudioError::WARNING );
\r
2737 // Determine the device channel information.
\r
2738 long inputChannels, outputChannels;
\r
2739 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2740 if ( result != ASE_OK ) {
\r
2741 drivers.removeCurrentDriver();
\r
2742 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2743 errorText_ = errorStream_.str();
\r
2744 error( RtAudioError::WARNING );
\r
2748 info.outputChannels = outputChannels;
\r
2749 info.inputChannels = inputChannels;
\r
2750 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2751 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2753 // Determine the supported sample rates.
\r
2754 info.sampleRates.clear();
\r
2755 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2756 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2757 if ( result == ASE_OK )
\r
2758 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2761 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2762 ASIOChannelInfo channelInfo;
\r
2763 channelInfo.channel = 0;
\r
2764 channelInfo.isInput = true;
\r
2765 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2766 result = ASIOGetChannelInfo( &channelInfo );
\r
2767 if ( result != ASE_OK ) {
\r
2768 drivers.removeCurrentDriver();
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 info.nativeFormats = 0;
\r
2776 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2777 info.nativeFormats |= RTAUDIO_SINT16;
\r
2778 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT32;
\r
2780 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2784 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2785 info.nativeFormats |= RTAUDIO_SINT24;
\r
2787 if ( info.outputChannels > 0 )
\r
2788 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2789 if ( info.inputChannels > 0 )
\r
2790 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2792 info.probed = true;
\r
2793 drivers.removeCurrentDriver();
\r
2797 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2799 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2800 object->callbackEvent( index );
\r
2803 void RtApiAsio :: saveDeviceInfo( void )
\r
2807 unsigned int nDevices = getDeviceCount();
\r
2808 devices_.resize( nDevices );
\r
2809 for ( unsigned int i=0; i<nDevices; i++ )
\r
2810 devices_[i] = getDeviceInfo( i );
\r
2813 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2814 unsigned int firstChannel, unsigned int sampleRate,
\r
2815 RtAudioFormat format, unsigned int *bufferSize,
\r
2816 RtAudio::StreamOptions *options )
\r
2818 // For ASIO, a duplex stream MUST use the same driver.
\r
2819 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2820 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2824 char driverName[32];
\r
2825 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2826 if ( result != ASE_OK ) {
\r
2827 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2828 errorText_ = errorStream_.str();
\r
2832 // Only load the driver once for duplex stream.
\r
2833 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2834 // The getDeviceInfo() function will not work when a stream is open
\r
2835 // because ASIO does not allow multiple devices to run at the same
\r
2836 // time. Thus, we'll probe the system before opening a stream and
\r
2837 // save the results for use by getDeviceInfo().
\r
2838 this->saveDeviceInfo();
\r
2840 if ( !drivers.loadDriver( driverName ) ) {
\r
2841 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2842 errorText_ = errorStream_.str();
\r
2846 result = ASIOInit( &driverInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2849 errorText_ = errorStream_.str();
\r
2854 // Check the device channel count.
\r
2855 long inputChannels, outputChannels;
\r
2856 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2857 if ( result != ASE_OK ) {
\r
2858 drivers.removeCurrentDriver();
\r
2859 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2860 errorText_ = errorStream_.str();
\r
2864 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2865 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2866 drivers.removeCurrentDriver();
\r
2867 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2868 errorText_ = errorStream_.str();
\r
2871 stream_.nDeviceChannels[mode] = channels;
\r
2872 stream_.nUserChannels[mode] = channels;
\r
2873 stream_.channelOffset[mode] = firstChannel;
\r
2875 // Verify the sample rate is supported.
\r
2876 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2877 if ( result != ASE_OK ) {
\r
2878 drivers.removeCurrentDriver();
\r
2879 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2880 errorText_ = errorStream_.str();
\r
2884 // Get the current sample rate
\r
2885 ASIOSampleRate currentRate;
\r
2886 result = ASIOGetSampleRate( ¤tRate );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2890 errorText_ = errorStream_.str();
\r
2894 // Set the sample rate only if necessary
\r
2895 if ( currentRate != sampleRate ) {
\r
2896 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2897 if ( result != ASE_OK ) {
\r
2898 drivers.removeCurrentDriver();
\r
2899 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2900 errorText_ = errorStream_.str();
\r
2905 // Determine the driver data type.
\r
2906 ASIOChannelInfo channelInfo;
\r
2907 channelInfo.channel = 0;
\r
2908 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2909 else channelInfo.isInput = true;
\r
2910 result = ASIOGetChannelInfo( &channelInfo );
\r
2911 if ( result != ASE_OK ) {
\r
2912 drivers.removeCurrentDriver();
\r
2913 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2914 errorText_ = errorStream_.str();
\r
2918 // Assuming WINDOWS host is always little-endian.
\r
2919 stream_.doByteSwap[mode] = false;
\r
2920 stream_.userFormat = format;
\r
2921 stream_.deviceFormat[mode] = 0;
\r
2922 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2923 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2926 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2927 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2928 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2930 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2931 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2932 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2934 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2935 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2936 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2938 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2939 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2940 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2943 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2944 drivers.removeCurrentDriver();
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2946 errorText_ = errorStream_.str();
\r
2950 // Set the buffer size. For a duplex stream, this will end up
\r
2951 // setting the buffer size based on the input constraints, which
\r
2953 long minSize, maxSize, preferSize, granularity;
\r
2954 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2955 if ( result != ASE_OK ) {
\r
2956 drivers.removeCurrentDriver();
\r
2957 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2958 errorText_ = errorStream_.str();
\r
2962 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2963 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2964 else if ( granularity == -1 ) {
\r
2965 // Make sure bufferSize is a power of two.
\r
2966 int log2_of_min_size = 0;
\r
2967 int log2_of_max_size = 0;
\r
2969 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2970 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2971 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2974 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2975 int min_delta_num = log2_of_min_size;
\r
2977 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2978 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2979 if (current_delta < min_delta) {
\r
2980 min_delta = current_delta;
\r
2981 min_delta_num = i;
\r
2985 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2986 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2987 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2989 else if ( granularity != 0 ) {
\r
2990 // Set to an even multiple of granularity, rounding up.
\r
2991 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2994 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2995 drivers.removeCurrentDriver();
\r
2996 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3000 stream_.bufferSize = *bufferSize;
\r
3001 stream_.nBuffers = 2;
\r
3003 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3004 else stream_.userInterleaved = true;
\r
3006 // ASIO always uses non-interleaved buffers.
\r
3007 stream_.deviceInterleaved[mode] = false;
\r
3009 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3010 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3011 if ( handle == 0 ) {
\r
3013 handle = new AsioHandle;
\r
3015 catch ( std::bad_alloc& ) {
\r
3016 //if ( handle == NULL ) {
\r
3017 drivers.removeCurrentDriver();
\r
3018 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3021 handle->bufferInfos = 0;
\r
3023 // Create a manual-reset event.
\r
3024 handle->condition = CreateEvent( NULL, // no security
\r
3025 TRUE, // manual-reset
\r
3026 FALSE, // non-signaled initially
\r
3027 NULL ); // unnamed
\r
3028 stream_.apiHandle = (void *) handle;
\r
3031 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3032 // and output separately, we'll have to dispose of previously
\r
3033 // created output buffers for a duplex stream.
\r
3034 long inputLatency, outputLatency;
\r
3035 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3036 ASIODisposeBuffers();
\r
3037 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3040 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3041 bool buffersAllocated = false;
\r
3042 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3043 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3044 if ( handle->bufferInfos == NULL ) {
\r
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3046 errorText_ = errorStream_.str();
\r
3050 ASIOBufferInfo *infos;
\r
3051 infos = handle->bufferInfos;
\r
3052 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3053 infos->isInput = ASIOFalse;
\r
3054 infos->channelNum = i + stream_.channelOffset[0];
\r
3055 infos->buffers[0] = infos->buffers[1] = 0;
\r
3057 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3058 infos->isInput = ASIOTrue;
\r
3059 infos->channelNum = i + stream_.channelOffset[1];
\r
3060 infos->buffers[0] = infos->buffers[1] = 0;
\r
3063 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3064 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3065 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3066 asioCallbacks.asioMessage = &asioMessages;
\r
3067 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3068 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3069 if ( result != ASE_OK ) {
\r
3070 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3071 errorText_ = errorStream_.str();
\r
3074 buffersAllocated = true;
\r
3076 // Set flags for buffer conversion.
\r
3077 stream_.doConvertBuffer[mode] = false;
\r
3078 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3079 stream_.doConvertBuffer[mode] = true;
\r
3080 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3081 stream_.nUserChannels[mode] > 1 )
\r
3082 stream_.doConvertBuffer[mode] = true;
\r
3084 // Allocate necessary internal buffers
\r
3085 unsigned long bufferBytes;
\r
3086 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3087 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3088 if ( stream_.userBuffer[mode] == NULL ) {
\r
3089 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3093 if ( stream_.doConvertBuffer[mode] ) {
\r
3095 bool makeBuffer = true;
\r
3096 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3097 if ( mode == INPUT ) {
\r
3098 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3099 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3100 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3104 if ( makeBuffer ) {
\r
3105 bufferBytes *= *bufferSize;
\r
3106 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3107 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3108 if ( stream_.deviceBuffer == NULL ) {
\r
3109 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3115 stream_.sampleRate = sampleRate;
\r
3116 stream_.device[mode] = device;
\r
3117 stream_.state = STREAM_STOPPED;
\r
3118 asioCallbackInfo = &stream_.callbackInfo;
\r
3119 stream_.callbackInfo.object = (void *) this;
\r
3120 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3121 // We had already set up an output stream.
\r
3122 stream_.mode = DUPLEX;
\r
3124 stream_.mode = mode;
\r
3126 // Determine device latencies
\r
3127 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3128 if ( result != ASE_OK ) {
\r
3129 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3130 errorText_ = errorStream_.str();
\r
3131 error( RtAudioError::WARNING); // warn but don't fail
\r
3134 stream_.latency[0] = outputLatency;
\r
3135 stream_.latency[1] = inputLatency;
\r
3138 // Setup the buffer conversion information structure. We don't use
\r
3139 // buffers to do channel offsets, so we override that parameter
\r
3141 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3146 if ( buffersAllocated )
\r
3147 ASIODisposeBuffers();
\r
3148 drivers.removeCurrentDriver();
\r
3151 CloseHandle( handle->condition );
\r
3152 if ( handle->bufferInfos )
\r
3153 free( handle->bufferInfos );
\r
3155 stream_.apiHandle = 0;
\r
3158 for ( int i=0; i<2; i++ ) {
\r
3159 if ( stream_.userBuffer[i] ) {
\r
3160 free( stream_.userBuffer[i] );
\r
3161 stream_.userBuffer[i] = 0;
\r
3165 if ( stream_.deviceBuffer ) {
\r
3166 free( stream_.deviceBuffer );
\r
3167 stream_.deviceBuffer = 0;
\r
3173 void RtApiAsio :: closeStream()
\r
3175 if ( stream_.state == STREAM_CLOSED ) {
\r
3176 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3177 error( RtAudioError::WARNING );
\r
3181 if ( stream_.state == STREAM_RUNNING ) {
\r
3182 stream_.state = STREAM_STOPPED;
\r
3185 ASIODisposeBuffers();
\r
3186 drivers.removeCurrentDriver();
\r
3188 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3190 CloseHandle( handle->condition );
\r
3191 if ( handle->bufferInfos )
\r
3192 free( handle->bufferInfos );
\r
3194 stream_.apiHandle = 0;
\r
3197 for ( int i=0; i<2; i++ ) {
\r
3198 if ( stream_.userBuffer[i] ) {
\r
3199 free( stream_.userBuffer[i] );
\r
3200 stream_.userBuffer[i] = 0;
\r
3204 if ( stream_.deviceBuffer ) {
\r
3205 free( stream_.deviceBuffer );
\r
3206 stream_.deviceBuffer = 0;
\r
3209 stream_.mode = UNINITIALIZED;
\r
3210 stream_.state = STREAM_CLOSED;
\r
3213 bool stopThreadCalled = false;
\r
3215 void RtApiAsio :: startStream()
\r
3218 if ( stream_.state == STREAM_RUNNING ) {
\r
3219 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3220 error( RtAudioError::WARNING );
\r
3224 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3225 ASIOError result = ASIOStart();
\r
3226 if ( result != ASE_OK ) {
\r
3227 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3228 errorText_ = errorStream_.str();
\r
3232 handle->drainCounter = 0;
\r
3233 handle->internalDrain = false;
\r
3234 ResetEvent( handle->condition );
\r
3235 stream_.state = STREAM_RUNNING;
\r
3239 stopThreadCalled = false;
\r
3241 if ( result == ASE_OK ) return;
\r
3242 error( RtAudioError::SYSTEM_ERROR );
\r
3245 void RtApiAsio :: stopStream()
\r
3248 if ( stream_.state == STREAM_STOPPED ) {
\r
3249 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3250 error( RtAudioError::WARNING );
\r
3254 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3256 if ( handle->drainCounter == 0 ) {
\r
3257 handle->drainCounter = 2;
\r
3258 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3262 stream_.state = STREAM_STOPPED;
\r
3264 ASIOError result = ASIOStop();
\r
3265 if ( result != ASE_OK ) {
\r
3266 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3267 errorText_ = errorStream_.str();
\r
3270 if ( result == ASE_OK ) return;
\r
3271 error( RtAudioError::SYSTEM_ERROR );
\r
3274 void RtApiAsio :: abortStream()
\r
3277 if ( stream_.state == STREAM_STOPPED ) {
\r
3278 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3279 error( RtAudioError::WARNING );
\r
3283 // The following lines were commented-out because some behavior was
\r
3284 // noted where the device buffers need to be zeroed to avoid
\r
3285 // continuing sound, even when the device buffers are completely
\r
3286 // disposed. So now, calling abort is the same as calling stop.
\r
3287 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3288 // handle->drainCounter = 2;
\r
3292 // This function will be called by a spawned thread when the user
\r
3293 // callback function signals that the stream should be stopped or
\r
3294 // aborted. It is necessary to handle it this way because the
\r
3295 // callbackEvent() function must return before the ASIOStop()
\r
3296 // function will return.
\r
3297 static unsigned __stdcall asioStopStream( void *ptr )
\r
3299 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3300 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3302 object->stopStream();
\r
3303 _endthreadex( 0 );
\r
3307 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3309 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3310 if ( stream_.state == STREAM_CLOSED ) {
\r
3311 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3312 error( RtAudioError::WARNING );
\r
3316 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3317 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3319 // Check if we were draining the stream and signal if finished.
\r
3320 if ( handle->drainCounter > 3 ) {
\r
3322 stream_.state = STREAM_STOPPING;
\r
3323 if ( handle->internalDrain == false )
\r
3324 SetEvent( handle->condition );
\r
3325 else { // spawn a thread to stop the stream
\r
3326 unsigned threadId;
\r
3327 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3328 &stream_.callbackInfo, 0, &threadId );
\r
3333 // Invoke user callback to get fresh output data UNLESS we are
\r
3334 // draining stream.
\r
3335 if ( handle->drainCounter == 0 ) {
\r
3336 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3337 double streamTime = getStreamTime();
\r
3338 RtAudioStreamStatus status = 0;
\r
3339 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3340 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3343 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3344 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3347 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3348 stream_.bufferSize, streamTime, status, info->userData );
\r
3349 if ( cbReturnValue == 2 ) {
\r
3350 stream_.state = STREAM_STOPPING;
\r
3351 handle->drainCounter = 2;
\r
3352 unsigned threadId;
\r
3353 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3354 &stream_.callbackInfo, 0, &threadId );
\r
3357 else if ( cbReturnValue == 1 ) {
\r
3358 handle->drainCounter = 1;
\r
3359 handle->internalDrain = true;
\r
3363 unsigned int nChannels, bufferBytes, i, j;
\r
3364 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3365 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3367 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3369 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3373 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3377 else if ( stream_.doConvertBuffer[0] ) {
\r
3379 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3380 if ( stream_.doByteSwap[0] )
\r
3381 byteSwapBuffer( stream_.deviceBuffer,
\r
3382 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3383 stream_.deviceFormat[0] );
\r
3385 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3386 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3387 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3388 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3394 if ( stream_.doByteSwap[0] )
\r
3395 byteSwapBuffer( stream_.userBuffer[0],
\r
3396 stream_.bufferSize * stream_.nUserChannels[0],
\r
3397 stream_.userFormat );
\r
3399 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3400 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3401 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3402 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3407 if ( handle->drainCounter ) {
\r
3408 handle->drainCounter++;
\r
3413 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3415 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3417 if (stream_.doConvertBuffer[1]) {
\r
3419 // Always interleave ASIO input data.
\r
3420 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3421 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3422 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3423 handle->bufferInfos[i].buffers[bufferIndex],
\r
3427 if ( stream_.doByteSwap[1] )
\r
3428 byteSwapBuffer( stream_.deviceBuffer,
\r
3429 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3430 stream_.deviceFormat[1] );
\r
3431 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3435 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3436 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3437 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3438 handle->bufferInfos[i].buffers[bufferIndex],
\r
3443 if ( stream_.doByteSwap[1] )
\r
3444 byteSwapBuffer( stream_.userBuffer[1],
\r
3445 stream_.bufferSize * stream_.nUserChannels[1],
\r
3446 stream_.userFormat );
\r
3451 // The following call was suggested by Malte Clasen. While the API
\r
3452 // documentation indicates it should not be required, some device
\r
3453 // drivers apparently do not function correctly without it.
\r
3454 ASIOOutputReady();
\r
3456 RtApi::tickStreamTime();
\r
3460 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3462 // The ASIO documentation says that this usually only happens during
\r
3463 // external sync. Audio processing is not stopped by the driver,
\r
3464 // actual sample rate might not have even changed, maybe only the
\r
3465 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3468 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3470 object->stopStream();
\r
3472 catch ( RtAudioError &exception ) {
\r
3473 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3477 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3480 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3484 switch( selector ) {
\r
3485 case kAsioSelectorSupported:
\r
3486 if ( value == kAsioResetRequest
\r
3487 || value == kAsioEngineVersion
\r
3488 || value == kAsioResyncRequest
\r
3489 || value == kAsioLatenciesChanged
\r
3490 // The following three were added for ASIO 2.0, you don't
\r
3491 // necessarily have to support them.
\r
3492 || value == kAsioSupportsTimeInfo
\r
3493 || value == kAsioSupportsTimeCode
\r
3494 || value == kAsioSupportsInputMonitor)
\r
3497 case kAsioResetRequest:
\r
3498 // Defer the task and perform the reset of the driver during the
\r
3499 // next "safe" situation. You cannot reset the driver right now,
\r
3500 // as this code is called from the driver. Reset the driver is
\r
3501 // done by completely destruct is. I.e. ASIOStop(),
\r
3502 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3504 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3507 case kAsioResyncRequest:
\r
3508 // This informs the application that the driver encountered some
\r
3509 // non-fatal data loss. It is used for synchronization purposes
\r
3510 // of different media. Added mainly to work around the Win16Mutex
\r
3511 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3512 // which could lose data because the Mutex was held too long by
\r
3513 // another thread. However a driver can issue it in other
\r
3514 // situations, too.
\r
3515 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3519 case kAsioLatenciesChanged:
\r
3520 // This will inform the host application that the drivers were
\r
3521 // latencies changed. Beware, it this does not mean that the
\r
3522 // buffer sizes have changed! You might need to update internal
\r
3524 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3527 case kAsioEngineVersion:
\r
3528 // Return the supported ASIO version of the host application. If
\r
3529 // a host application does not implement this selector, ASIO 1.0
\r
3530 // is assumed by the driver.
\r
3533 case kAsioSupportsTimeInfo:
\r
3534 // Informs the driver whether the
\r
3535 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3536 // For compatibility with ASIO 1.0 drivers the host application
\r
3537 // should always support the "old" bufferSwitch method, too.
\r
3540 case kAsioSupportsTimeCode:
\r
3541 // Informs the driver whether application is interested in time
\r
3542 // code info. If an application does not need to know about time
\r
3543 // code, the driver has less work to do.
\r
3550 static const char* getAsioErrorString( ASIOError result )
\r
3555 const char*message;
\r
3558 static const Messages m[] =
\r
3560 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3561 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3562 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3563 { ASE_InvalidMode, "Invalid mode." },
\r
3564 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3565 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3566 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3569 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3570 if ( m[i].value == result ) return m[i].message;
\r
3572 return "Unknown error.";
\r
3575 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3579 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3581 #include <audioclient.h>
\r
3583 #include <functiondiscoverykeys.h>
\r
3584 #include <mmdeviceapi.h>
\r
3586 //=============================================================================
\r
3588 #define EXIT_ON_ERROR( hr, errorType, errorText )\
\r
3589 if ( FAILED( hr ) )\
\r
3591 errorText_ = __FUNCTION__ ": " errorText;\
\r
3592 error( errorType );\
\r
3596 #define SAFE_RELEASE( objectPtr )\
\r
3599 objectPtr->Release();\
\r
3600 objectPtr = NULL;\
\r
3603 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3605 //-----------------------------------------------------------------------------
\r
3607 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3608 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3609 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3610 // provide intermediate storage for read / write synchronization.
\r
3611 class WasapiBuffer
\r
3615 : buffer_( NULL ),
\r
3624 // sets the length of the internal ring buffer
\r
3625 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3628 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3630 bufferSize_ = bufferSize;
\r
3635 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3636 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3638 if ( !buffer || // incoming buffer is NULL
\r
3639 bufferSize == 0 || // incoming buffer has no data
\r
3640 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3645 unsigned int relOutIndex = outIndex_;
\r
3646 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3647 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3648 relOutIndex += bufferSize_;
\r
3651 // "in" index can end on the "out" index but cannot begin at it
\r
3652 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3653 return false; // not enough space between "in" index and "out" index
\r
3656 // copy buffer from external to internal
\r
3657 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3658 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3659 int fromInSize = bufferSize - fromZeroSize;
\r
3663 case RTAUDIO_SINT8:
\r
3664 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3665 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3667 case RTAUDIO_SINT16:
\r
3668 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3669 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3671 case RTAUDIO_SINT24:
\r
3672 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3673 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3675 case RTAUDIO_SINT32:
\r
3676 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3677 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3679 case RTAUDIO_FLOAT32:
\r
3680 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3681 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3683 case RTAUDIO_FLOAT64:
\r
3684 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3685 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3689 // update "in" index
\r
3690 inIndex_ += bufferSize;
\r
3691 inIndex_ %= bufferSize_;
\r
3696 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3697 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3699 if ( !buffer || // incoming buffer is NULL
\r
3700 bufferSize == 0 || // incoming buffer has no data
\r
3701 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3706 unsigned int relInIndex = inIndex_;
\r
3707 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3708 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3709 relInIndex += bufferSize_;
\r
3712 // "out" index can begin at and end on the "in" index
\r
3713 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3714 return false; // not enough space between "out" index and "in" index
\r
3717 // copy buffer from internal to external
\r
3718 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3719 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3720 int fromOutSize = bufferSize - fromZeroSize;
\r
3724 case RTAUDIO_SINT8:
\r
3725 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3726 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3728 case RTAUDIO_SINT16:
\r
3729 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3730 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3732 case RTAUDIO_SINT24:
\r
3733 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3734 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3736 case RTAUDIO_SINT32:
\r
3737 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3738 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3740 case RTAUDIO_FLOAT32:
\r
3741 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3742 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3744 case RTAUDIO_FLOAT64:
\r
3745 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3746 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3750 // update "out" index
\r
3751 outIndex_ += bufferSize;
\r
3752 outIndex_ %= bufferSize_;
\r
3759 unsigned int bufferSize_;
\r
3760 unsigned int inIndex_;
\r
3761 unsigned int outIndex_;
\r
3764 //-----------------------------------------------------------------------------
\r
3766 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3767 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3768 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3769 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3770 // one rate and its multiple. RtApiWasapi will not populate a device's sample rate list with rates
\r
3771 // that may cause artifacts via this conversion.
\r
3772 void convertBufferWasapi( char* outBuffer,
\r
3773 const char* inBuffer,
\r
3774 const unsigned int& inChannelCount,
\r
3775 const unsigned int& outChannelCount,
\r
3776 const unsigned int& inSampleRate,
\r
3777 const unsigned int& outSampleRate,
\r
3778 const unsigned int& inSampleCount,
\r
3779 unsigned int& outSampleCount,
\r
3780 const RtAudioFormat& format )
\r
3782 // calculate the new outSampleCount and relative sampleStep
\r
3783 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3784 float sampleStep = 1.0f / sampleRatio;
\r
3785 float inSampleFraction = 0.0f;
\r
3786 unsigned int commonChannelCount = min( inChannelCount, outChannelCount );
\r
3788 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3790 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3791 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3793 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3797 case RTAUDIO_SINT8:
\r
3798 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3800 case RTAUDIO_SINT16:
\r
3801 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3803 case RTAUDIO_SINT24:
\r
3804 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3806 case RTAUDIO_SINT32:
\r
3807 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3809 case RTAUDIO_FLOAT32:
\r
3810 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3812 case RTAUDIO_FLOAT64:
\r
3813 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3817 // jump to next in sample
\r
3818 inSampleFraction += sampleStep;
\r
3822 //-----------------------------------------------------------------------------
\r
3824 // A structure to hold various information related to the WASAPI implementation.
\r
3825 struct WasapiHandle
\r
3827 IAudioClient* captureAudioClient;
\r
3828 IAudioClient* renderAudioClient;
\r
3829 IAudioCaptureClient* captureClient;
\r
3830 IAudioRenderClient* renderClient;
\r
3831 HANDLE captureEvent;
\r
3832 HANDLE renderEvent;
\r
3835 : captureAudioClient( NULL ),
\r
3836 renderAudioClient( NULL ),
\r
3837 captureClient( NULL ),
\r
3838 renderClient( NULL ),
\r
3839 captureEvent( NULL ),
\r
3840 renderEvent( NULL ) {}
\r
3843 //=============================================================================
\r
3845 RtApiWasapi::RtApiWasapi()
\r
3846 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3848 // WASAPI can run either apartment or multi-threaded
\r
3849 HRESULT hr = CoInitialize( NULL );
\r
3851 if ( !FAILED( hr ) )
\r
3852 coInitialized_ = true;
\r
3854 // instantiate device enumerator
\r
3855 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3856 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3857 ( void** ) &deviceEnumerator_ );
\r
3859 if ( FAILED( hr ) ) {
\r
3860 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3861 error( RtAudioError::DRIVER_ERROR );
\r
3865 //-----------------------------------------------------------------------------
\r
3867 RtApiWasapi::~RtApiWasapi()
\r
3869 // if this object previously called CoInitialize()
\r
3870 if ( coInitialized_ ) {
\r
3874 if ( stream_.state != STREAM_CLOSED ) {
\r
3878 SAFE_RELEASE( deviceEnumerator_ );
\r
3881 //=============================================================================
\r
3883 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3885 unsigned int captureDeviceCount = 0;
\r
3886 unsigned int renderDeviceCount = 0;
\r
3888 IMMDeviceCollection* captureDevices = NULL;
\r
3889 IMMDeviceCollection* renderDevices = NULL;
\r
3891 // count capture devices
\r
3892 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3893 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" );
\r
3895 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3896 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" );
\r
3898 // count render devices
\r
3899 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3900 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" );
\r
3902 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3903 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" );
\r
3906 // release all references
\r
3907 SAFE_RELEASE( captureDevices );
\r
3908 SAFE_RELEASE( renderDevices );
\r
3910 return captureDeviceCount + renderDeviceCount;
\r
3913 //-----------------------------------------------------------------------------
\r
3915 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3917 RtAudio::DeviceInfo info;
\r
3918 unsigned int captureDeviceCount = 0;
\r
3919 unsigned int renderDeviceCount = 0;
\r
3920 std::wstring deviceName;
\r
3921 std::string defaultDeviceName;
\r
3922 bool isCaptureDevice = false;
\r
3924 PROPVARIANT deviceNameProp;
\r
3925 PROPVARIANT defaultDeviceNameProp;
\r
3927 IMMDeviceCollection* captureDevices = NULL;
\r
3928 IMMDeviceCollection* renderDevices = NULL;
\r
3929 IMMDevice* devicePtr = NULL;
\r
3930 IMMDevice* defaultDevicePtr = NULL;
\r
3931 IAudioClient* audioClient = NULL;
\r
3932 IPropertyStore* devicePropStore = NULL;
\r
3933 IPropertyStore* defaultDevicePropStore = NULL;
\r
3935 WAVEFORMATEX* deviceFormat = NULL;
\r
3936 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3939 info.probed = false;
\r
3941 // count capture devices
\r
3942 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3943 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" );
\r
3945 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3946 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" );
\r
3948 // count render devices
\r
3949 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3950 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" );
\r
3952 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3953 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" );
\r
3955 // validate device index
\r
3956 if ( device >= captureDeviceCount + renderDeviceCount )
\r
3957 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Invalid device index" );
\r
3959 // determine whether index falls within capture or render devices
\r
3960 if ( device >= renderDeviceCount ) {
\r
3961 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3962 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device handle" );
\r
3964 isCaptureDevice = true;
\r
3967 hr = renderDevices->Item( device, &devicePtr );
\r
3968 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device handle" );
\r
3970 isCaptureDevice = false;
\r
3973 // get default device name
\r
3974 if ( isCaptureDevice ) {
\r
3975 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
3976 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default render device handle" );
\r
3979 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
3980 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default capture device handle" );
\r
3983 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
3984 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to open default device property store" );
\r
3986 PropVariantInit( &defaultDeviceNameProp );
\r
3988 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
3989 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default device property: PKEY_Device_FriendlyName" );
\r
3991 deviceName = defaultDeviceNameProp.pwszVal;
\r
3992 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
3995 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
3996 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to open device property store" );
\r
3998 PropVariantInit( &deviceNameProp );
\r
4000 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4001 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device property: PKEY_Device_FriendlyName" );
\r
4003 deviceName = deviceNameProp.pwszVal;
\r
4004 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4007 if ( isCaptureDevice ) {
\r
4008 info.isDefaultInput = info.name == defaultDeviceName;
\r
4009 info.isDefaultOutput = false;
\r
4012 info.isDefaultInput = false;
\r
4013 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4017 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4018 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );
\r
4020 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4021 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4023 if ( isCaptureDevice ) {
\r
4024 info.inputChannels = deviceFormat->nChannels;
\r
4025 info.outputChannels = 0;
\r
4026 info.duplexChannels = 0;
\r
4029 info.inputChannels = 0;
\r
4030 info.outputChannels = deviceFormat->nChannels;
\r
4031 info.duplexChannels = 0;
\r
4035 info.sampleRates.clear();
\r
4037 // allow support for sample rates that are multiples of the base rate
\r
4038 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4039 if ( SAMPLE_RATES[i] < deviceFormat->nSamplesPerSec ) {
\r
4040 if ( deviceFormat->nSamplesPerSec % SAMPLE_RATES[i] == 0 ) {
\r
4041 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4045 if ( SAMPLE_RATES[i] % deviceFormat->nSamplesPerSec == 0 ) {
\r
4046 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4052 info.nativeFormats = 0;
\r
4054 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4055 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4056 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4058 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4059 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4061 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4062 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4065 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4066 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4067 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4069 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4070 info.nativeFormats |= RTAUDIO_SINT8;
\r
4072 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4073 info.nativeFormats |= RTAUDIO_SINT16;
\r
4075 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4076 info.nativeFormats |= RTAUDIO_SINT24;
\r
4078 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4079 info.nativeFormats |= RTAUDIO_SINT32;
\r
4084 info.probed = true;
\r
4087 // release all references
\r
4088 PropVariantClear( &deviceNameProp );
\r
4089 PropVariantClear( &defaultDeviceNameProp );
\r
4091 SAFE_RELEASE( captureDevices );
\r
4092 SAFE_RELEASE( renderDevices );
\r
4093 SAFE_RELEASE( devicePtr );
\r
4094 SAFE_RELEASE( defaultDevicePtr );
\r
4095 SAFE_RELEASE( audioClient );
\r
4096 SAFE_RELEASE( devicePropStore );
\r
4097 SAFE_RELEASE( defaultDevicePropStore );
\r
4099 CoTaskMemFree( deviceFormat );
\r
4100 CoTaskMemFree( closestMatchFormat );
\r
4105 //-----------------------------------------------------------------------------
\r
4107 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4109 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4110 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4118 //-----------------------------------------------------------------------------
\r
4120 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4122 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4123 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4131 //-----------------------------------------------------------------------------
\r
4133 void RtApiWasapi::closeStream( void )
\r
4135 if ( stream_.state == STREAM_CLOSED ) {
\r
4136 errorText_ = "RtApiWasapi::closeStream: No open stream to close";
\r
4137 error( RtAudioError::WARNING );
\r
4141 if ( stream_.state != STREAM_STOPPED )
\r
4144 // clean up stream memory
\r
4145 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4146 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4148 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4149 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4151 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4152 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4154 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4155 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4157 delete stream_.apiHandle;
\r
4158 stream_.apiHandle = NULL;
\r
4160 for ( int i = 0; i < 2; i++ ) {
\r
4161 if ( stream_.userBuffer[i] ) {
\r
4162 free( stream_.userBuffer[i] );
\r
4163 stream_.userBuffer[i] = 0;
\r
4167 if ( stream_.deviceBuffer ) {
\r
4168 free( stream_.deviceBuffer );
\r
4169 stream_.deviceBuffer = 0;
\r
4172 // update stream state
\r
4173 stream_.state = STREAM_CLOSED;
\r
4176 //-----------------------------------------------------------------------------
\r
4178 void RtApiWasapi::startStream( void )
\r
4182 if ( stream_.state == STREAM_RUNNING ) {
\r
4183 errorText_ = "RtApiWasapi::startStream: The stream is already running";
\r
4184 error( RtAudioError::WARNING );
\r
4188 // update stream state
\r
4189 stream_.state = STREAM_RUNNING;
\r
4191 // create WASAPI stream thread
\r
4192 stream_.callbackInfo.thread = ( unsigned int ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4194 if ( !stream_.callbackInfo.thread ) {
\r
4195 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread";
\r
4196 error( RtAudioError::THREAD_ERROR );
\r
4199 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4200 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4204 //-----------------------------------------------------------------------------
\r
4206 void RtApiWasapi::stopStream( void )
\r
4210 if ( stream_.state == STREAM_STOPPED ) {
\r
4211 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped";
\r
4212 error( RtAudioError::WARNING );
\r
4216 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4217 stream_.state = STREAM_STOPPING;
\r
4219 // wait until stream thread is stopped
\r
4220 while( stream_.state != STREAM_STOPPED ) {
\r
4224 // Wait for the last buffer to play before stopping.
\r
4225 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4227 // stop capture client if applicable
\r
4228 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4229 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4230 if ( FAILED( hr ) ) {
\r
4231 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream";
\r
4232 error( RtAudioError::DRIVER_ERROR );
\r
4236 // stop render client if applicable
\r
4237 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4238 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4239 if ( FAILED( hr ) ) {
\r
4240 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream";
\r
4241 error( RtAudioError::DRIVER_ERROR );
\r
4245 // close thread handle
\r
4246 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4247 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread";
\r
4248 error( RtAudioError::THREAD_ERROR );
\r
4251 stream_.callbackInfo.thread = NULL;
\r
4254 //-----------------------------------------------------------------------------
\r
4256 void RtApiWasapi::abortStream( void )
\r
4260 if ( stream_.state == STREAM_STOPPED ) {
\r
4261 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped";
\r
4262 error( RtAudioError::WARNING );
\r
4266 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4267 stream_.state = STREAM_STOPPING;
\r
4269 // wait until stream thread is stopped
\r
4270 while ( stream_.state != STREAM_STOPPED ) {
\r
4274 // stop capture client if applicable
\r
4275 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4276 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4277 if ( FAILED( hr ) ) {
\r
4278 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream";
\r
4279 error( RtAudioError::DRIVER_ERROR );
\r
4283 // stop render client if applicable
\r
4284 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4285 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4286 if ( FAILED( hr ) ) {
\r
4287 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream";
\r
4288 error( RtAudioError::DRIVER_ERROR );
\r
4292 // close thread handle
\r
4293 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4294 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread";
\r
4295 error( RtAudioError::THREAD_ERROR );
\r
4298 stream_.callbackInfo.thread = NULL;
\r
4301 //-----------------------------------------------------------------------------
\r
4303 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4304 unsigned int firstChannel, unsigned int sampleRate,
\r
4305 RtAudioFormat format, unsigned int* bufferSize,
\r
4306 RtAudio::StreamOptions* options )
\r
4308 bool methodResult = FAILURE;
\r
4309 unsigned int captureDeviceCount = 0;
\r
4310 unsigned int renderDeviceCount = 0;
\r
4312 IMMDeviceCollection* captureDevices = NULL;
\r
4313 IMMDeviceCollection* renderDevices = NULL;
\r
4314 IMMDevice* devicePtr = NULL;
\r
4315 WAVEFORMATEX* deviceFormat = NULL;
\r
4317 // create API Handle if not already created
\r
4318 if ( !stream_.apiHandle )
\r
4319 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4321 // count capture devices
\r
4322 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4323 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" );
\r
4325 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4326 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" );
\r
4328 // count render devices
\r
4329 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4330 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" );
\r
4332 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4333 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" );
\r
4335 // validate device index
\r
4336 if ( device >= captureDeviceCount + renderDeviceCount )
\r
4337 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Invalid device index" );
\r
4339 // determine whether index falls within capture or render devices
\r
4340 if ( device >= renderDeviceCount ) {
\r
4341 if ( mode != INPUT )
\r
4342 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Capture device selected as output device" );
\r
4344 // retrieve captureAudioClient from devicePtr
\r
4345 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4347 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4348 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device handle" );
\r
4350 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4351 NULL, ( void** ) &captureAudioClient );
\r
4352 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );
\r
4354 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4355 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4357 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4358 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4361 if ( mode != OUTPUT )
\r
4362 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Render device selected as input device" );
\r
4364 // retrieve renderAudioClient from devicePtr
\r
4365 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4367 hr = renderDevices->Item( device, &devicePtr );
\r
4368 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device handle" );
\r
4370 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4371 NULL, ( void** ) &renderAudioClient );
\r
4372 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );
\r
4374 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4375 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4377 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4378 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4381 // fill stream data
\r
4382 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4383 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4384 stream_.mode = DUPLEX;
\r
4387 stream_.mode = mode;
\r
4390 stream_.device[mode] = device;
\r
4391 stream_.state = STREAM_STOPPED;
\r
4392 stream_.doByteSwap[mode] = false;
\r
4393 stream_.sampleRate = sampleRate;
\r
4394 stream_.bufferSize = *bufferSize;
\r
4395 stream_.nBuffers = 1;
\r
4396 stream_.nUserChannels[mode] = channels;
\r
4397 stream_.channelOffset[mode] = firstChannel;
\r
4398 stream_.userFormat = format;
\r
4399 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4401 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4402 stream_.userInterleaved = false;
\r
4404 stream_.userInterleaved = true;
\r
4405 stream_.deviceInterleaved[mode] = true;
\r
4407 // Set flags for buffer conversion.
\r
4408 stream_.doConvertBuffer[mode] = false;
\r
4409 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4410 stream_.doConvertBuffer[mode] = true;
\r
4411 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4412 stream_.nUserChannels[mode] > 1 )
\r
4413 stream_.doConvertBuffer[mode] = true;
\r
4415 if ( stream_.doConvertBuffer[mode] )
\r
4416 setConvertInfo( mode, 0 );
\r
4418 // Allocate necessary internal buffers
\r
4419 unsigned int bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4421 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4422 if ( !stream_.userBuffer[mode] )
\r
4423 EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating user buffer memory" );
\r
4425 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4426 stream_.callbackInfo.priority = 15;
\r
4428 stream_.callbackInfo.priority = 0;
\r
4430 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4431 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4433 methodResult = SUCCESS;
\r
4438 SAFE_RELEASE( captureDevices );
\r
4439 SAFE_RELEASE( renderDevices );
\r
4440 SAFE_RELEASE( devicePtr );
\r
4442 CoTaskMemFree( deviceFormat );
\r
4444 // if method failed, close the stream
\r
4445 if ( methodResult == FAILURE )
\r
4448 return methodResult;
\r
4451 //=============================================================================
\r
4453 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4456 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4461 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4464 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4469 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4472 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4477 //-----------------------------------------------------------------------------
\r
4479 void RtApiWasapi::wasapiThread()
\r
4481 // as this is a new thread, we must CoInitialize it
\r
4482 CoInitialize( NULL );
\r
4486 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4487 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4488 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4489 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4490 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4491 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4493 WAVEFORMATEX* captureFormat = NULL;
\r
4494 WAVEFORMATEX* renderFormat = NULL;
\r
4495 float captureSrRatio = 0.0f;
\r
4496 float renderSrRatio = 0.0f;
\r
4497 WasapiBuffer captureBuffer;
\r
4498 WasapiBuffer renderBuffer;
\r
4500 // Attempt to assign "Pro Audio" characteristic to thread
\r
4501 HMODULE AvrtDll = LoadLibrary( "AVRT.dll" );
\r
4503 DWORD taskIndex = 0;
\r
4504 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4505 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4506 FreeLibrary( AvrtDll );
\r
4509 // start capture stream if applicable
\r
4510 if ( captureAudioClient ) {
\r
4511 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4512 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4514 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4516 // initialize capture stream according to desire buffer size
\r
4517 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4518 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4520 if ( !captureClient ) {
\r
4521 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4522 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4523 desiredBufferPeriod,
\r
4524 desiredBufferPeriod,
\r
4527 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize capture audio client" );
\r
4529 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4530 ( void** ) &captureClient );
\r
4531 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture client handle" );
\r
4533 // configure captureEvent to trigger on every available capture buffer
\r
4534 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4535 if ( !captureEvent )
\r
4536 EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create capture event" );
\r
4538 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4539 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set capture event handle" );
\r
4541 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4542 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4545 unsigned int inBufferSize = 0;
\r
4546 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4547 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get capture buffer size" );
\r
4549 // scale outBufferSize according to stream->user sample rate ratio
\r
4550 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4551 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4553 // set captureBuffer size
\r
4554 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4556 // reset the capture stream
\r
4557 hr = captureAudioClient->Reset();
\r
4558 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset capture stream" );
\r
4560 // start the capture stream
\r
4561 hr = captureAudioClient->Start();
\r
4562 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start capture stream" );
\r
4565 // start render stream if applicable
\r
4566 if ( renderAudioClient ) {
\r
4567 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4568 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4570 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4572 // initialize render stream according to desire buffer size
\r
4573 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4574 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4576 if ( !renderClient ) {
\r
4577 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4578 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4579 desiredBufferPeriod,
\r
4580 desiredBufferPeriod,
\r
4583 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize render audio client" );
\r
4585 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4586 ( void** ) &renderClient );
\r
4587 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render client handle" );
\r
4589 // configure renderEvent to trigger on every available render buffer
\r
4590 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4591 if ( !renderEvent )
\r
4592 EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create render event" );
\r
4594 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4595 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set render event handle" );
\r
4597 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4598 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4601 unsigned int outBufferSize = 0;
\r
4602 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4603 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get render buffer size" );
\r
4605 // scale inBufferSize according to user->stream sample rate ratio
\r
4606 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4607 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4609 // set renderBuffer size
\r
4610 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4612 // reset the render stream
\r
4613 hr = renderAudioClient->Reset();
\r
4614 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset render stream" );
\r
4616 // start the render stream
\r
4617 hr = renderAudioClient->Start();
\r
4618 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start render stream" );
\r
4621 // declare local stream variables
\r
4622 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4624 BYTE* streamBuffer = NULL;
\r
4625 unsigned long captureFlags = 0;
\r
4627 unsigned int bufferFrameCount = 0;
\r
4628 unsigned int numFramesPadding = 0;
\r
4629 unsigned int convBufferSize = 0;
\r
4631 bool callbackPushed = false;
\r
4632 bool callbackPulled = false;
\r
4633 bool callbackStopped = false;
\r
4635 int callbackResult = 0;
\r
4637 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4638 unsigned int deviceBufferSize = 0;
\r
4639 if ( stream_.mode == INPUT ) {
\r
4640 deviceBufferSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4642 else if ( stream_.mode == OUTPUT ) {
\r
4643 deviceBufferSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4645 else if ( stream_.mode == DUPLEX ) {
\r
4646 deviceBufferSize = max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4647 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4650 char* convBuffer = ( char* ) malloc( deviceBufferSize );
\r
4651 stream_.deviceBuffer = ( char* ) malloc( deviceBufferSize );
\r
4652 if ( !convBuffer || !stream_.deviceBuffer )
\r
4653 EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating device buffer memory" );
\r
4655 // stream process loop
\r
4656 while ( stream_.state != STREAM_STOPPING ) {
\r
4657 if ( !callbackPulled ) {
\r
4660 // 1. Pull callback buffer from inputBuffer
\r
4661 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4662 // Convert callback buffer to user format
\r
4664 if ( captureAudioClient ) {
\r
4665 // Pull callback buffer from inputBuffer
\r
4666 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4667 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4668 stream_.deviceFormat[INPUT] );
\r
4670 if ( callbackPulled ) {
\r
4671 // Convert callback buffer to user sample rate and channel count
\r
4672 convertBufferWasapi( stream_.deviceBuffer,
\r
4674 stream_.nDeviceChannels[INPUT],
\r
4675 stream_.nUserChannels[INPUT],
\r
4676 captureFormat->nSamplesPerSec,
\r
4677 stream_.sampleRate,
\r
4678 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4680 stream_.deviceFormat[INPUT] );
\r
4682 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4683 // Convert callback buffer to user format
\r
4684 convertBuffer( stream_.userBuffer[INPUT],
\r
4685 stream_.deviceBuffer,
\r
4686 stream_.convertInfo[INPUT] );
\r
4689 // no conversion, simple copy deviceBuffer to userBuffer
\r
4690 memcpy( stream_.userBuffer[INPUT],
\r
4691 stream_.deviceBuffer,
\r
4692 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4697 // if there is no capture stream, set callbackPulled flag
\r
4698 callbackPulled = true;
\r
4701 // Execute Callback
\r
4702 // ================
\r
4703 // 1. Execute user callback method
\r
4704 // 2. Handle return value from callback
\r
4706 // if callback has not requested the stream to stop
\r
4707 if ( callbackPulled && !callbackStopped ) {
\r
4708 // Execute user callback method
\r
4709 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4710 stream_.userBuffer[INPUT],
\r
4711 stream_.bufferSize,
\r
4713 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4714 stream_.callbackInfo.userData );
\r
4716 // Handle return value from callback
\r
4717 if ( callbackResult == 1 ) {
\r
4718 // instantiate a thread to stop this thread
\r
4719 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, NULL, NULL );
\r
4721 if ( !threadHandle ) {
\r
4722 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream stop thread" );
\r
4724 else if ( !CloseHandle( threadHandle ) ) {
\r
4725 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream stop thread handle" );
\r
4728 callbackStopped = true;
\r
4730 else if ( callbackResult == 2 ) {
\r
4731 // instantiate a thread to stop this thread
\r
4732 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, NULL, NULL );
\r
4734 if ( !threadHandle ) {
\r
4735 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream abort thread" );
\r
4737 else if ( !CloseHandle( threadHandle ) ) {
\r
4738 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream abort thread handle" );
\r
4741 callbackStopped = true;
\r
4746 // Callback Output
\r
4747 // ===============
\r
4748 // 1. Convert callback buffer to stream format
\r
4749 // 2. Convert callback buffer to stream sample rate and channel count
\r
4750 // 3. Push callback buffer into outputBuffer
\r
4752 if ( renderAudioClient && callbackPulled ) {
\r
4753 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4754 // Convert callback buffer to stream format
\r
4755 convertBuffer( stream_.deviceBuffer,
\r
4756 stream_.userBuffer[OUTPUT],
\r
4757 stream_.convertInfo[OUTPUT] );
\r
4759 // Convert callback buffer to stream sample rate and channel count
\r
4760 convertBufferWasapi( convBuffer,
\r
4761 stream_.deviceBuffer,
\r
4762 stream_.nUserChannels[OUTPUT],
\r
4763 stream_.nDeviceChannels[OUTPUT],
\r
4764 stream_.sampleRate,
\r
4765 renderFormat->nSamplesPerSec,
\r
4766 stream_.bufferSize,
\r
4768 stream_.deviceFormat[OUTPUT] );
\r
4771 // Convert callback buffer to stream sample rate and channel count
\r
4772 convertBufferWasapi( convBuffer,
\r
4773 stream_.userBuffer[OUTPUT],
\r
4774 stream_.nUserChannels[OUTPUT],
\r
4775 stream_.nDeviceChannels[OUTPUT],
\r
4776 stream_.sampleRate,
\r
4777 renderFormat->nSamplesPerSec,
\r
4778 stream_.bufferSize,
\r
4780 stream_.deviceFormat[OUTPUT] );
\r
4783 // Push callback buffer into outputBuffer
\r
4784 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4785 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4786 stream_.deviceFormat[OUTPUT] );
\r
4791 // 1. Get capture buffer from stream
\r
4792 // 2. Push capture buffer into inputBuffer
\r
4793 // 3. If 2. was successful: Release capture buffer
\r
4795 if ( captureAudioClient ) {
\r
4796 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4797 if ( !callbackPulled ) {
\r
4798 WaitForSingleObject( captureEvent, INFINITE );
\r
4801 // Get capture buffer from stream
\r
4802 hr = captureClient->GetBuffer( &streamBuffer,
\r
4803 &bufferFrameCount,
\r
4804 &captureFlags, NULL, NULL );
\r
4805 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture buffer" );
\r
4807 if ( bufferFrameCount != 0 ) {
\r
4808 // Push capture buffer into inputBuffer
\r
4809 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4810 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4811 stream_.deviceFormat[INPUT] ) )
\r
4813 // Release capture buffer
\r
4814 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4815 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );
\r
4819 // Inform WASAPI that capture was unsuccessful
\r
4820 hr = captureClient->ReleaseBuffer( 0 );
\r
4821 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );
\r
4826 // Inform WASAPI that capture was unsuccessful
\r
4827 hr = captureClient->ReleaseBuffer( 0 );
\r
4828 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );
\r
4834 // 1. Get render buffer from stream
\r
4835 // 2. Pull next buffer from outputBuffer
\r
4836 // 3. If 2. was successful: Fill render buffer with next buffer
\r
4837 // Release render buffer
\r
4839 if ( renderAudioClient ) {
\r
4840 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
4841 if ( callbackPulled && !callbackPushed ) {
\r
4842 WaitForSingleObject( renderEvent, INFINITE );
\r
4845 // Get render buffer from stream
\r
4846 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
4847 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer size" );
\r
4849 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
4850 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer padding" );
\r
4852 bufferFrameCount -= numFramesPadding;
\r
4854 if ( bufferFrameCount != 0 ) {
\r
4855 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
4856 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer" );
\r
4858 // Pull next buffer from outputBuffer
\r
4859 // Fill render buffer with next buffer
\r
4860 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
4861 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
4862 stream_.deviceFormat[OUTPUT] ) )
\r
4864 // Release render buffer
\r
4865 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
4866 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );
\r
4870 // Inform WASAPI that render was unsuccessful
\r
4871 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
4872 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );
\r
4877 // Inform WASAPI that render was unsuccessful
\r
4878 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
4879 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );
\r
4883 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
4884 if ( callbackPushed ) {
\r
4885 callbackPulled = false;
\r
4888 // tick stream time
\r
4889 RtApi::tickStreamTime();
\r
4894 CoTaskMemFree( captureFormat );
\r
4895 CoTaskMemFree( renderFormat );
\r
4897 //delete convBuffer;
\r
4898 free ( convBuffer );
\r
4902 // update stream state
\r
4903 stream_.state = STREAM_STOPPED;
\r
4906 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
4910 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
4912 // Modified by Robin Davies, October 2005
\r
4913 // - Improvements to DirectX pointer chasing.
\r
4914 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
4915 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
4916 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
4917 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
4919 #include <dsound.h>
\r
4920 #include <assert.h>
\r
4921 #include <algorithm>
\r
4923 #if defined(__MINGW32__)
\r
4924 // missing from latest mingw winapi
\r
4925 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
4926 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
4927 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
4928 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
4931 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
4933 #ifdef _MSC_VER // if Microsoft Visual C++
\r
4934 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
4937 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
4939 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
4940 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
4941 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
4942 return pointer >= earlierPointer && pointer < laterPointer;
\r
4945 // A structure to hold various information related to the DirectSound
\r
4946 // API implementation.
\r
4948 unsigned int drainCounter; // Tracks callback counts when draining
\r
4949 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
4953 UINT bufferPointer[2];
\r
4954 DWORD dsBufferSize[2];
\r
4955 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
4959 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
4962 // Declarations for utility functions, callbacks, and structures
\r
4963 // specific to the DirectSound implementation.
\r
4964 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4965 LPCTSTR description,
\r
4967 LPVOID lpContext );
\r
4969 static const char* getErrorString( int code );
\r
4971 static unsigned __stdcall callbackHandler( void *ptr );
\r
4980 : found(false) { validId[0] = false; validId[1] = false; }
\r
4983 struct DsProbeData {
\r
4985 std::vector<struct DsDevice>* dsDevices;
\r
4988 RtApiDs :: RtApiDs()
\r
4990 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
4991 // accept whatever the mainline chose for a threading model.
\r
4992 coInitialized_ = false;
\r
4993 HRESULT hr = CoInitialize( NULL );
\r
4994 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
4997 RtApiDs :: ~RtApiDs()
\r
4999 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5000 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5003 // The DirectSound default output is always the first device.
\r
5004 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5009 // The DirectSound default input is always the first input device,
\r
5010 // which is the first capture device enumerated.
\r
5011 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5016 unsigned int RtApiDs :: getDeviceCount( void )
\r
5018 // Set query flag for previously found devices to false, so that we
\r
5019 // can check for any devices that have disappeared.
\r
5020 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5021 dsDevices[i].found = false;
\r
5023 // Query DirectSound devices.
\r
5024 struct DsProbeData probeInfo;
\r
5025 probeInfo.isInput = false;
\r
5026 probeInfo.dsDevices = &dsDevices;
\r
5027 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5028 if ( FAILED( result ) ) {
\r
5029 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5030 errorText_ = errorStream_.str();
\r
5031 error( RtAudioError::WARNING );
\r
5034 // Query DirectSoundCapture devices.
\r
5035 probeInfo.isInput = true;
\r
5036 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5037 if ( FAILED( result ) ) {
\r
5038 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5039 errorText_ = errorStream_.str();
\r
5040 error( RtAudioError::WARNING );
\r
5043 // Clean out any devices that may have disappeared.
\r
5044 std::vector< int > indices;
\r
5045 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5046 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5047 //unsigned int nErased = 0;
\r
5048 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5049 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5050 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5052 return static_cast<unsigned int>(dsDevices.size());
\r
5055 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5057 RtAudio::DeviceInfo info;
\r
5058 info.probed = false;
\r
5060 if ( dsDevices.size() == 0 ) {
\r
5061 // Force a query of all devices
\r
5063 if ( dsDevices.size() == 0 ) {
\r
5064 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5065 error( RtAudioError::INVALID_USE );
\r
5070 if ( device >= dsDevices.size() ) {
\r
5071 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5072 error( RtAudioError::INVALID_USE );
\r
5077 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5079 LPDIRECTSOUND output;
\r
5081 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5082 if ( FAILED( result ) ) {
\r
5083 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5084 errorText_ = errorStream_.str();
\r
5085 error( RtAudioError::WARNING );
\r
5089 outCaps.dwSize = sizeof( outCaps );
\r
5090 result = output->GetCaps( &outCaps );
\r
5091 if ( FAILED( result ) ) {
\r
5092 output->Release();
\r
5093 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5094 errorText_ = errorStream_.str();
\r
5095 error( RtAudioError::WARNING );
\r
5099 // Get output channel information.
\r
5100 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5102 // Get sample rate information.
\r
5103 info.sampleRates.clear();
\r
5104 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5105 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5106 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5107 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5110 // Get format information.
\r
5111 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5112 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5114 output->Release();
\r
5116 if ( getDefaultOutputDevice() == device )
\r
5117 info.isDefaultOutput = true;
\r
5119 if ( dsDevices[ device ].validId[1] == false ) {
\r
5120 info.name = dsDevices[ device ].name;
\r
5121 info.probed = true;
\r
5127 LPDIRECTSOUNDCAPTURE input;
\r
5128 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5129 if ( FAILED( result ) ) {
\r
5130 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5131 errorText_ = errorStream_.str();
\r
5132 error( RtAudioError::WARNING );
\r
5137 inCaps.dwSize = sizeof( inCaps );
\r
5138 result = input->GetCaps( &inCaps );
\r
5139 if ( FAILED( result ) ) {
\r
5141 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5142 errorText_ = errorStream_.str();
\r
5143 error( RtAudioError::WARNING );
\r
5147 // Get input channel information.
\r
5148 info.inputChannels = inCaps.dwChannels;
\r
5150 // Get sample rate and format information.
\r
5151 std::vector<unsigned int> rates;
\r
5152 if ( inCaps.dwChannels >= 2 ) {
\r
5153 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5154 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5155 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5156 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5157 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5158 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5159 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5160 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5162 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5163 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5164 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5165 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5166 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5168 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5169 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5170 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5171 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5172 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5175 else if ( inCaps.dwChannels == 1 ) {
\r
5176 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5177 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5178 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5179 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5180 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5181 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5182 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5183 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5185 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5186 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5187 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5188 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5189 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5191 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5192 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5193 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5194 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5195 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5198 else info.inputChannels = 0; // technically, this would be an error
\r
5202 if ( info.inputChannels == 0 ) return info;
\r
5204 // Copy the supported rates to the info structure but avoid duplication.
\r
5206 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5208 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5209 if ( rates[i] == info.sampleRates[j] ) {
\r
5214 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5216 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5218 // If device opens for both playback and capture, we determine the channels.
\r
5219 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5220 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5222 if ( device == 0 ) info.isDefaultInput = true;
\r
5224 // Copy name and return.
\r
5225 info.name = dsDevices[ device ].name;
\r
5226 info.probed = true;
\r
5230 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5231 unsigned int firstChannel, unsigned int sampleRate,
\r
5232 RtAudioFormat format, unsigned int *bufferSize,
\r
5233 RtAudio::StreamOptions *options )
\r
5235 if ( channels + firstChannel > 2 ) {
\r
5236 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5240 size_t nDevices = dsDevices.size();
\r
5241 if ( nDevices == 0 ) {
\r
5242 // This should not happen because a check is made before this function is called.
\r
5243 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5247 if ( device >= nDevices ) {
\r
5248 // This should not happen because a check is made before this function is called.
\r
5249 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5253 if ( mode == OUTPUT ) {
\r
5254 if ( dsDevices[ device ].validId[0] == false ) {
\r
5255 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5256 errorText_ = errorStream_.str();
\r
5260 else { // mode == INPUT
\r
5261 if ( dsDevices[ device ].validId[1] == false ) {
\r
5262 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5263 errorText_ = errorStream_.str();
\r
5268 // According to a note in PortAudio, using GetDesktopWindow()
\r
5269 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5270 // that occur when the application's window is not the foreground
\r
5271 // window. Also, if the application window closes before the
\r
5272 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5273 // problems when using GetDesktopWindow() but it seems fine now
\r
5274 // (January 2010). I'll leave it commented here.
\r
5275 // HWND hWnd = GetForegroundWindow();
\r
5276 HWND hWnd = GetDesktopWindow();
\r
5278 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5279 // two. This is a judgement call and a value of two is probably too
\r
5280 // low for capture, but it should work for playback.
\r
5282 if ( options ) nBuffers = options->numberOfBuffers;
\r
5283 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5284 if ( nBuffers < 2 ) nBuffers = 3;
\r
5286 // Check the lower range of the user-specified buffer size and set
\r
5287 // (arbitrarily) to a lower bound of 32.
\r
5288 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5290 // Create the wave format structure. The data format setting will
\r
5291 // be determined later.
\r
5292 WAVEFORMATEX waveFormat;
\r
5293 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5294 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5295 waveFormat.nChannels = channels + firstChannel;
\r
5296 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5298 // Determine the device buffer size. By default, we'll use the value
\r
5299 // defined above (32K), but we will grow it to make allowances for
\r
5300 // very large software buffer sizes.
\r
5301 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5302 DWORD dsPointerLeadTime = 0;
\r
5304 void *ohandle = 0, *bhandle = 0;
\r
5306 if ( mode == OUTPUT ) {
\r
5308 LPDIRECTSOUND output;
\r
5309 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5310 if ( FAILED( result ) ) {
\r
5311 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5312 errorText_ = errorStream_.str();
\r
5317 outCaps.dwSize = sizeof( outCaps );
\r
5318 result = output->GetCaps( &outCaps );
\r
5319 if ( FAILED( result ) ) {
\r
5320 output->Release();
\r
5321 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5322 errorText_ = errorStream_.str();
\r
5326 // Check channel information.
\r
5327 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5328 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5329 errorText_ = errorStream_.str();
\r
5333 // Check format information. Use 16-bit format unless not
\r
5334 // supported or user requests 8-bit.
\r
5335 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5336 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5337 waveFormat.wBitsPerSample = 16;
\r
5338 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5341 waveFormat.wBitsPerSample = 8;
\r
5342 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5344 stream_.userFormat = format;
\r
5346 // Update wave format structure and buffer information.
\r
5347 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5348 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5349 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5351 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5352 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5353 dsBufferSize *= 2;
\r
5355 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5356 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5357 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5358 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5359 if ( FAILED( result ) ) {
\r
5360 output->Release();
\r
5361 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5362 errorText_ = errorStream_.str();
\r
5366 // Even though we will write to the secondary buffer, we need to
\r
5367 // access the primary buffer to set the correct output format
\r
5368 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5369 // buffer description.
\r
5370 DSBUFFERDESC bufferDescription;
\r
5371 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5372 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5373 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5375 // Obtain the primary buffer
\r
5376 LPDIRECTSOUNDBUFFER buffer;
\r
5377 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5378 if ( FAILED( result ) ) {
\r
5379 output->Release();
\r
5380 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5381 errorText_ = errorStream_.str();
\r
5385 // Set the primary DS buffer sound format.
\r
5386 result = buffer->SetFormat( &waveFormat );
\r
5387 if ( FAILED( result ) ) {
\r
5388 output->Release();
\r
5389 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5390 errorText_ = errorStream_.str();
\r
5394 // Setup the secondary DS buffer description.
\r
5395 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5396 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5397 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5398 DSBCAPS_GLOBALFOCUS |
\r
5399 DSBCAPS_GETCURRENTPOSITION2 |
\r
5400 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5401 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5402 bufferDescription.lpwfxFormat = &waveFormat;
\r
5404 // Try to create the secondary DS buffer. If that doesn't work,
\r
5405 // try to use software mixing. Otherwise, there's a problem.
\r
5406 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5407 if ( FAILED( result ) ) {
\r
5408 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5409 DSBCAPS_GLOBALFOCUS |
\r
5410 DSBCAPS_GETCURRENTPOSITION2 |
\r
5411 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5412 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5413 if ( FAILED( result ) ) {
\r
5414 output->Release();
\r
5415 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5416 errorText_ = errorStream_.str();
\r
5421 // Get the buffer size ... might be different from what we specified.
\r
5423 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5424 result = buffer->GetCaps( &dsbcaps );
\r
5425 if ( FAILED( result ) ) {
\r
5426 output->Release();
\r
5427 buffer->Release();
\r
5428 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5429 errorText_ = errorStream_.str();
\r
5433 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5435 // Lock the DS buffer
\r
5438 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5439 if ( FAILED( result ) ) {
\r
5440 output->Release();
\r
5441 buffer->Release();
\r
5442 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5443 errorText_ = errorStream_.str();
\r
5447 // Zero the DS buffer
\r
5448 ZeroMemory( audioPtr, dataLen );
\r
5450 // Unlock the DS buffer
\r
5451 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5452 if ( FAILED( result ) ) {
\r
5453 output->Release();
\r
5454 buffer->Release();
\r
5455 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5456 errorText_ = errorStream_.str();
\r
5460 ohandle = (void *) output;
\r
5461 bhandle = (void *) buffer;
\r
5464 if ( mode == INPUT ) {
\r
5466 LPDIRECTSOUNDCAPTURE input;
\r
5467 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5468 if ( FAILED( result ) ) {
\r
5469 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5470 errorText_ = errorStream_.str();
\r
5475 inCaps.dwSize = sizeof( inCaps );
\r
5476 result = input->GetCaps( &inCaps );
\r
5477 if ( FAILED( result ) ) {
\r
5479 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5480 errorText_ = errorStream_.str();
\r
5484 // Check channel information.
\r
5485 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5486 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5490 // Check format information. Use 16-bit format unless user
\r
5491 // requests 8-bit.
\r
5492 DWORD deviceFormats;
\r
5493 if ( channels + firstChannel == 2 ) {
\r
5494 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5495 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5496 waveFormat.wBitsPerSample = 8;
\r
5497 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5499 else { // assume 16-bit is supported
\r
5500 waveFormat.wBitsPerSample = 16;
\r
5501 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5504 else { // channel == 1
\r
5505 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5506 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5507 waveFormat.wBitsPerSample = 8;
\r
5508 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5510 else { // assume 16-bit is supported
\r
5511 waveFormat.wBitsPerSample = 16;
\r
5512 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5515 stream_.userFormat = format;
\r
5517 // Update wave format structure and buffer information.
\r
5518 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5519 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5520 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5522 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5523 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5524 dsBufferSize *= 2;
\r
5526 // Setup the secondary DS buffer description.
\r
5527 DSCBUFFERDESC bufferDescription;
\r
5528 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5529 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5530 bufferDescription.dwFlags = 0;
\r
5531 bufferDescription.dwReserved = 0;
\r
5532 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5533 bufferDescription.lpwfxFormat = &waveFormat;
\r
5535 // Create the capture buffer.
\r
5536 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5537 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5538 if ( FAILED( result ) ) {
\r
5540 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5541 errorText_ = errorStream_.str();
\r
5545 // Get the buffer size ... might be different from what we specified.
\r
5546 DSCBCAPS dscbcaps;
\r
5547 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5548 result = buffer->GetCaps( &dscbcaps );
\r
5549 if ( FAILED( result ) ) {
\r
5551 buffer->Release();
\r
5552 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5553 errorText_ = errorStream_.str();
\r
5557 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5559 // NOTE: We could have a problem here if this is a duplex stream
\r
5560 // and the play and capture hardware buffer sizes are different
\r
5561 // (I'm actually not sure if that is a problem or not).
\r
5562 // Currently, we are not verifying that.
\r
5564 // Lock the capture buffer
\r
5567 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5568 if ( FAILED( result ) ) {
\r
5570 buffer->Release();
\r
5571 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5572 errorText_ = errorStream_.str();
\r
5576 // Zero the buffer
\r
5577 ZeroMemory( audioPtr, dataLen );
\r
5579 // Unlock the buffer
\r
5580 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5581 if ( FAILED( result ) ) {
\r
5583 buffer->Release();
\r
5584 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5585 errorText_ = errorStream_.str();
\r
5589 ohandle = (void *) input;
\r
5590 bhandle = (void *) buffer;
\r
5593 // Set various stream parameters
\r
5594 DsHandle *handle = 0;
\r
5595 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5596 stream_.nUserChannels[mode] = channels;
\r
5597 stream_.bufferSize = *bufferSize;
\r
5598 stream_.channelOffset[mode] = firstChannel;
\r
5599 stream_.deviceInterleaved[mode] = true;
\r
5600 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5601 else stream_.userInterleaved = true;
\r
5603 // Set flag for buffer conversion
\r
5604 stream_.doConvertBuffer[mode] = false;
\r
5605 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5606 stream_.doConvertBuffer[mode] = true;
\r
5607 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5608 stream_.doConvertBuffer[mode] = true;
\r
5609 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5610 stream_.nUserChannels[mode] > 1 )
\r
5611 stream_.doConvertBuffer[mode] = true;
\r
5613 // Allocate necessary internal buffers
\r
5614 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5615 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5616 if ( stream_.userBuffer[mode] == NULL ) {
\r
5617 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5621 if ( stream_.doConvertBuffer[mode] ) {
\r
5623 bool makeBuffer = true;
\r
5624 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5625 if ( mode == INPUT ) {
\r
5626 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5627 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5628 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5632 if ( makeBuffer ) {
\r
5633 bufferBytes *= *bufferSize;
\r
5634 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5635 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5636 if ( stream_.deviceBuffer == NULL ) {
\r
5637 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5643 // Allocate our DsHandle structures for the stream.
\r
5644 if ( stream_.apiHandle == 0 ) {
\r
5646 handle = new DsHandle;
\r
5648 catch ( std::bad_alloc& ) {
\r
5649 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5653 // Create a manual-reset event.
\r
5654 handle->condition = CreateEvent( NULL, // no security
\r
5655 TRUE, // manual-reset
\r
5656 FALSE, // non-signaled initially
\r
5657 NULL ); // unnamed
\r
5658 stream_.apiHandle = (void *) handle;
\r
5661 handle = (DsHandle *) stream_.apiHandle;
\r
5662 handle->id[mode] = ohandle;
\r
5663 handle->buffer[mode] = bhandle;
\r
5664 handle->dsBufferSize[mode] = dsBufferSize;
\r
5665 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5667 stream_.device[mode] = device;
\r
5668 stream_.state = STREAM_STOPPED;
\r
5669 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5670 // We had already set up an output stream.
\r
5671 stream_.mode = DUPLEX;
\r
5673 stream_.mode = mode;
\r
5674 stream_.nBuffers = nBuffers;
\r
5675 stream_.sampleRate = sampleRate;
\r
5677 // Setup the buffer conversion information structure.
\r
5678 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5680 // Setup the callback thread.
\r
5681 if ( stream_.callbackInfo.isRunning == false ) {
\r
5682 unsigned threadId;
\r
5683 stream_.callbackInfo.isRunning = true;
\r
5684 stream_.callbackInfo.object = (void *) this;
\r
5685 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5686 &stream_.callbackInfo, 0, &threadId );
\r
5687 if ( stream_.callbackInfo.thread == 0 ) {
\r
5688 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5692 // Boost DS thread priority
\r
5693 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5699 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5700 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5701 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5702 if ( buffer ) buffer->Release();
\r
5703 object->Release();
\r
5705 if ( handle->buffer[1] ) {
\r
5706 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5707 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5708 if ( buffer ) buffer->Release();
\r
5709 object->Release();
\r
5711 CloseHandle( handle->condition );
\r
5713 stream_.apiHandle = 0;
\r
5716 for ( int i=0; i<2; i++ ) {
\r
5717 if ( stream_.userBuffer[i] ) {
\r
5718 free( stream_.userBuffer[i] );
\r
5719 stream_.userBuffer[i] = 0;
\r
5723 if ( stream_.deviceBuffer ) {
\r
5724 free( stream_.deviceBuffer );
\r
5725 stream_.deviceBuffer = 0;
\r
5728 stream_.state = STREAM_CLOSED;
\r
5732 void RtApiDs :: closeStream()
\r
5734 if ( stream_.state == STREAM_CLOSED ) {
\r
5735 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5736 error( RtAudioError::WARNING );
\r
5740 // Stop the callback thread.
\r
5741 stream_.callbackInfo.isRunning = false;
\r
5742 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5743 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5745 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5747 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5748 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5749 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5752 buffer->Release();
\r
5754 object->Release();
\r
5756 if ( handle->buffer[1] ) {
\r
5757 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5758 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5761 buffer->Release();
\r
5763 object->Release();
\r
5765 CloseHandle( handle->condition );
\r
5767 stream_.apiHandle = 0;
\r
5770 for ( int i=0; i<2; i++ ) {
\r
5771 if ( stream_.userBuffer[i] ) {
\r
5772 free( stream_.userBuffer[i] );
\r
5773 stream_.userBuffer[i] = 0;
\r
5777 if ( stream_.deviceBuffer ) {
\r
5778 free( stream_.deviceBuffer );
\r
5779 stream_.deviceBuffer = 0;
\r
5782 stream_.mode = UNINITIALIZED;
\r
5783 stream_.state = STREAM_CLOSED;
\r
5786 void RtApiDs :: startStream()
\r
5789 if ( stream_.state == STREAM_RUNNING ) {
\r
5790 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5791 error( RtAudioError::WARNING );
\r
5795 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5797 // Increase scheduler frequency on lesser windows (a side-effect of
\r
5798 // increasing timer accuracy). On greater windows (Win2K or later),
\r
5799 // this is already in effect.
\r
5800 timeBeginPeriod( 1 );
\r
5802 buffersRolling = false;
\r
5803 duplexPrerollBytes = 0;
\r
5805 if ( stream_.mode == DUPLEX ) {
\r
5806 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
5807 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
5810 HRESULT result = 0;
\r
5811 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
5813 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5814 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
5815 if ( FAILED( result ) ) {
\r
5816 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
5817 errorText_ = errorStream_.str();
\r
5822 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
5824 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5825 result = buffer->Start( DSCBSTART_LOOPING );
\r
5826 if ( FAILED( result ) ) {
\r
5827 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
5828 errorText_ = errorStream_.str();
\r
5833 handle->drainCounter = 0;
\r
5834 handle->internalDrain = false;
\r
5835 ResetEvent( handle->condition );
\r
5836 stream_.state = STREAM_RUNNING;
\r
5839 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
5842 void RtApiDs :: stopStream()
\r
5845 if ( stream_.state == STREAM_STOPPED ) {
\r
5846 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
5847 error( RtAudioError::WARNING );
\r
5851 HRESULT result = 0;
\r
5854 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5855 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
5856 if ( handle->drainCounter == 0 ) {
\r
5857 handle->drainCounter = 2;
\r
5858 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
5861 stream_.state = STREAM_STOPPED;
\r
5863 // Stop the buffer and clear memory
\r
5864 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5865 result = buffer->Stop();
\r
5866 if ( FAILED( result ) ) {
\r
5867 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
5868 errorText_ = errorStream_.str();
\r
5872 // Lock the buffer and clear it so that if we start to play again,
\r
5873 // we won't have old data playing.
\r
5874 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5875 if ( FAILED( result ) ) {
\r
5876 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
5877 errorText_ = errorStream_.str();
\r
5881 // Zero the DS buffer
\r
5882 ZeroMemory( audioPtr, dataLen );
\r
5884 // Unlock the DS buffer
\r
5885 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5886 if ( FAILED( result ) ) {
\r
5887 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
5888 errorText_ = errorStream_.str();
\r
5892 // If we start playing again, we must begin at beginning of buffer.
\r
5893 handle->bufferPointer[0] = 0;
\r
5896 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
5897 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5901 stream_.state = STREAM_STOPPED;
\r
5903 result = buffer->Stop();
\r
5904 if ( FAILED( result ) ) {
\r
5905 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
5906 errorText_ = errorStream_.str();
\r
5910 // Lock the buffer and clear it so that if we start to play again,
\r
5911 // we won't have old data playing.
\r
5912 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5913 if ( FAILED( result ) ) {
\r
5914 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
5915 errorText_ = errorStream_.str();
\r
5919 // Zero the DS buffer
\r
5920 ZeroMemory( audioPtr, dataLen );
\r
5922 // Unlock the DS buffer
\r
5923 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5924 if ( FAILED( result ) ) {
\r
5925 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
5926 errorText_ = errorStream_.str();
\r
5930 // If we start recording again, we must begin at beginning of buffer.
\r
5931 handle->bufferPointer[1] = 0;
\r
5935 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
5936 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
5939 void RtApiDs :: abortStream()
\r
5942 if ( stream_.state == STREAM_STOPPED ) {
\r
5943 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
5944 error( RtAudioError::WARNING );
\r
5948 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5949 handle->drainCounter = 2;
\r
5954 void RtApiDs :: callbackEvent()
\r
5956 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
5957 Sleep( 50 ); // sleep 50 milliseconds
\r
5961 if ( stream_.state == STREAM_CLOSED ) {
\r
5962 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
5963 error( RtAudioError::WARNING );
\r
5967 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
5968 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5970 // Check if we were draining the stream and signal is finished.
\r
5971 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
5973 stream_.state = STREAM_STOPPING;
\r
5974 if ( handle->internalDrain == false )
\r
5975 SetEvent( handle->condition );
\r
5981 // Invoke user callback to get fresh output data UNLESS we are
\r
5982 // draining stream.
\r
5983 if ( handle->drainCounter == 0 ) {
\r
5984 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
5985 double streamTime = getStreamTime();
\r
5986 RtAudioStreamStatus status = 0;
\r
5987 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
5988 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
5989 handle->xrun[0] = false;
\r
5991 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
5992 status |= RTAUDIO_INPUT_OVERFLOW;
\r
5993 handle->xrun[1] = false;
\r
5995 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
5996 stream_.bufferSize, streamTime, status, info->userData );
\r
5997 if ( cbReturnValue == 2 ) {
\r
5998 stream_.state = STREAM_STOPPING;
\r
5999 handle->drainCounter = 2;
\r
6003 else if ( cbReturnValue == 1 ) {
\r
6004 handle->drainCounter = 1;
\r
6005 handle->internalDrain = true;
\r
6010 DWORD currentWritePointer, safeWritePointer;
\r
6011 DWORD currentReadPointer, safeReadPointer;
\r
6012 UINT nextWritePointer;
\r
6014 LPVOID buffer1 = NULL;
\r
6015 LPVOID buffer2 = NULL;
\r
6016 DWORD bufferSize1 = 0;
\r
6017 DWORD bufferSize2 = 0;
\r
6022 if ( buffersRolling == false ) {
\r
6023 if ( stream_.mode == DUPLEX ) {
\r
6024 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6026 // It takes a while for the devices to get rolling. As a result,
\r
6027 // there's no guarantee that the capture and write device pointers
\r
6028 // will move in lockstep. Wait here for both devices to start
\r
6029 // rolling, and then set our buffer pointers accordingly.
\r
6030 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6031 // bytes later than the write buffer.
\r
6033 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6034 // take place between the two GetCurrentPosition calls... but I'm
\r
6035 // really not sure how to solve the problem. Temporarily boost to
\r
6036 // Realtime priority, maybe; but I'm not sure what priority the
\r
6037 // DirectSound service threads run at. We *should* be roughly
\r
6038 // within a ms or so of correct.
\r
6040 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6041 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6043 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6045 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6046 if ( FAILED( result ) ) {
\r
6047 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6048 errorText_ = errorStream_.str();
\r
6049 error( RtAudioError::SYSTEM_ERROR );
\r
6052 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6053 if ( FAILED( result ) ) {
\r
6054 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6055 errorText_ = errorStream_.str();
\r
6056 error( RtAudioError::SYSTEM_ERROR );
\r
6060 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6061 if ( FAILED( result ) ) {
\r
6062 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6063 errorText_ = errorStream_.str();
\r
6064 error( RtAudioError::SYSTEM_ERROR );
\r
6067 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6068 if ( FAILED( result ) ) {
\r
6069 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6070 errorText_ = errorStream_.str();
\r
6071 error( RtAudioError::SYSTEM_ERROR );
\r
6074 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6078 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6080 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6081 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6082 handle->bufferPointer[1] = safeReadPointer;
\r
6084 else if ( stream_.mode == OUTPUT ) {
\r
6086 // Set the proper nextWritePosition after initial startup.
\r
6087 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6088 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6089 if ( FAILED( result ) ) {
\r
6090 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6091 errorText_ = errorStream_.str();
\r
6092 error( RtAudioError::SYSTEM_ERROR );
\r
6095 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6096 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6099 buffersRolling = true;
\r
6102 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6104 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6106 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6107 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6108 bufferBytes *= formatBytes( stream_.userFormat );
\r
6109 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6112 // Setup parameters and do buffer conversion if necessary.
\r
6113 if ( stream_.doConvertBuffer[0] ) {
\r
6114 buffer = stream_.deviceBuffer;
\r
6115 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6116 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6117 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6120 buffer = stream_.userBuffer[0];
\r
6121 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6122 bufferBytes *= formatBytes( stream_.userFormat );
\r
6125 // No byte swapping necessary in DirectSound implementation.
\r
6127 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6128 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6130 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6131 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6133 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6134 nextWritePointer = handle->bufferPointer[0];
\r
6136 DWORD endWrite, leadPointer;
\r
6138 // Find out where the read and "safe write" pointers are.
\r
6139 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6140 if ( FAILED( result ) ) {
\r
6141 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6142 errorText_ = errorStream_.str();
\r
6143 error( RtAudioError::SYSTEM_ERROR );
\r
6147 // We will copy our output buffer into the region between
\r
6148 // safeWritePointer and leadPointer. If leadPointer is not
\r
6149 // beyond the next endWrite position, wait until it is.
\r
6150 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6151 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6152 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6153 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6154 endWrite = nextWritePointer + bufferBytes;
\r
6156 // Check whether the entire write region is behind the play pointer.
\r
6157 if ( leadPointer >= endWrite ) break;
\r
6159 // If we are here, then we must wait until the leadPointer advances
\r
6160 // beyond the end of our next write region. We use the
\r
6161 // Sleep() function to suspend operation until that happens.
\r
6162 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6163 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6164 if ( millis < 1.0 ) millis = 1.0;
\r
6165 Sleep( (DWORD) millis );
\r
6168 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6169 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6170 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6171 handle->xrun[0] = true;
\r
6172 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6173 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6174 handle->bufferPointer[0] = nextWritePointer;
\r
6175 endWrite = nextWritePointer + bufferBytes;
\r
6178 // Lock free space in the buffer
\r
6179 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6180 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6181 if ( FAILED( result ) ) {
\r
6182 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6183 errorText_ = errorStream_.str();
\r
6184 error( RtAudioError::SYSTEM_ERROR );
\r
6188 // Copy our buffer into the DS buffer
\r
6189 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6190 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6192 // Update our buffer offset and unlock sound buffer
\r
6193 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6194 if ( FAILED( result ) ) {
\r
6195 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6196 errorText_ = errorStream_.str();
\r
6197 error( RtAudioError::SYSTEM_ERROR );
\r
6200 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6201 handle->bufferPointer[0] = nextWritePointer;
\r
6203 if ( handle->drainCounter ) {
\r
6204 handle->drainCounter++;
\r
6209 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6211 // Setup parameters.
\r
6212 if ( stream_.doConvertBuffer[1] ) {
\r
6213 buffer = stream_.deviceBuffer;
\r
6214 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6215 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6218 buffer = stream_.userBuffer[1];
\r
6219 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6220 bufferBytes *= formatBytes( stream_.userFormat );
\r
6223 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6224 long nextReadPointer = handle->bufferPointer[1];
\r
6225 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6227 // Find out where the write and "safe read" pointers are.
\r
6228 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6229 if ( FAILED( result ) ) {
\r
6230 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6231 errorText_ = errorStream_.str();
\r
6232 error( RtAudioError::SYSTEM_ERROR );
\r
6236 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6237 DWORD endRead = nextReadPointer + bufferBytes;
\r
6239 // Handling depends on whether we are INPUT or DUPLEX.
\r
6240 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6241 // then a wait here will drag the write pointers into the forbidden zone.
\r
6243 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6244 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6245 // practical way to sync up the read and write pointers reliably, given the
\r
6246 // the very complex relationship between phase and increment of the read and write
\r
6249 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6250 // provide a pre-roll period of 0.5 seconds in which we return
\r
6251 // zeros from the read buffer while the pointers sync up.
\r
6253 if ( stream_.mode == DUPLEX ) {
\r
6254 if ( safeReadPointer < endRead ) {
\r
6255 if ( duplexPrerollBytes <= 0 ) {
\r
6256 // Pre-roll time over. Be more agressive.
\r
6257 int adjustment = endRead-safeReadPointer;
\r
6259 handle->xrun[1] = true;
\r
6261 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6262 // and perform fine adjustments later.
\r
6263 // - small adjustments: back off by twice as much.
\r
6264 if ( adjustment >= 2*bufferBytes )
\r
6265 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6267 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6269 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6273 // In pre=roll time. Just do it.
\r
6274 nextReadPointer = safeReadPointer - bufferBytes;
\r
6275 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6277 endRead = nextReadPointer + bufferBytes;
\r
6280 else { // mode == INPUT
\r
6281 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6282 // See comments for playback.
\r
6283 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6284 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6285 if ( millis < 1.0 ) millis = 1.0;
\r
6286 Sleep( (DWORD) millis );
\r
6288 // Wake up and find out where we are now.
\r
6289 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6290 if ( FAILED( result ) ) {
\r
6291 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6292 errorText_ = errorStream_.str();
\r
6293 error( RtAudioError::SYSTEM_ERROR );
\r
6297 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6301 // Lock free space in the buffer
\r
6302 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6303 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6304 if ( FAILED( result ) ) {
\r
6305 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6306 errorText_ = errorStream_.str();
\r
6307 error( RtAudioError::SYSTEM_ERROR );
\r
6311 if ( duplexPrerollBytes <= 0 ) {
\r
6312 // Copy our buffer into the DS buffer
\r
6313 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6314 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6317 memset( buffer, 0, bufferSize1 );
\r
6318 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6319 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6322 // Update our buffer offset and unlock sound buffer
\r
6323 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6324 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6325 if ( FAILED( result ) ) {
\r
6326 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6327 errorText_ = errorStream_.str();
\r
6328 error( RtAudioError::SYSTEM_ERROR );
\r
6331 handle->bufferPointer[1] = nextReadPointer;
\r
6333 // No byte swapping necessary in DirectSound implementation.
\r
6335 // If necessary, convert 8-bit data from unsigned to signed.
\r
6336 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6337 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6339 // Do buffer conversion if necessary.
\r
6340 if ( stream_.doConvertBuffer[1] )
\r
6341 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6345 RtApi::tickStreamTime();
\r
6348 // Definitions for utility functions and callbacks
\r
6349 // specific to the DirectSound implementation.
\r
6351 static unsigned __stdcall callbackHandler( void *ptr )
\r
6353 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6354 RtApiDs *object = (RtApiDs *) info->object;
\r
6355 bool* isRunning = &info->isRunning;
\r
6357 while ( *isRunning == true ) {
\r
6358 object->callbackEvent();
\r
6361 _endthreadex( 0 );
\r
6365 #include "tchar.h"
\r
6367 static std::string convertTChar( LPCTSTR name )
\r
6369 #if defined( UNICODE ) || defined( _UNICODE )
\r
6370 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6371 std::string s( length-1, '\0' );
\r
6372 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6374 std::string s( name );
\r
6380 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6381 LPCTSTR description,
\r
6382 LPCTSTR /*module*/,
\r
6383 LPVOID lpContext )
\r
6385 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6386 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6389 bool validDevice = false;
\r
6390 if ( probeInfo.isInput == true ) {
\r
6392 LPDIRECTSOUNDCAPTURE object;
\r
6394 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6395 if ( hr != DS_OK ) return TRUE;
\r
6397 caps.dwSize = sizeof(caps);
\r
6398 hr = object->GetCaps( &caps );
\r
6399 if ( hr == DS_OK ) {
\r
6400 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6401 validDevice = true;
\r
6403 object->Release();
\r
6407 LPDIRECTSOUND object;
\r
6408 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6409 if ( hr != DS_OK ) return TRUE;
\r
6411 caps.dwSize = sizeof(caps);
\r
6412 hr = object->GetCaps( &caps );
\r
6413 if ( hr == DS_OK ) {
\r
6414 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6415 validDevice = true;
\r
6417 object->Release();
\r
6420 // If good device, then save its name and guid.
\r
6421 std::string name = convertTChar( description );
\r
6422 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6423 if ( lpguid == NULL )
\r
6424 name = "Default Device";
\r
6425 if ( validDevice ) {
\r
6426 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6427 if ( dsDevices[i].name == name ) {
\r
6428 dsDevices[i].found = true;
\r
6429 if ( probeInfo.isInput ) {
\r
6430 dsDevices[i].id[1] = lpguid;
\r
6431 dsDevices[i].validId[1] = true;
\r
6434 dsDevices[i].id[0] = lpguid;
\r
6435 dsDevices[i].validId[0] = true;
\r
6442 device.name = name;
\r
6443 device.found = true;
\r
6444 if ( probeInfo.isInput ) {
\r
6445 device.id[1] = lpguid;
\r
6446 device.validId[1] = true;
\r
6449 device.id[0] = lpguid;
\r
6450 device.validId[0] = true;
\r
6452 dsDevices.push_back( device );
\r
6458 static const char* getErrorString( int code )
\r
6462 case DSERR_ALLOCATED:
\r
6463 return "Already allocated";
\r
6465 case DSERR_CONTROLUNAVAIL:
\r
6466 return "Control unavailable";
\r
6468 case DSERR_INVALIDPARAM:
\r
6469 return "Invalid parameter";
\r
6471 case DSERR_INVALIDCALL:
\r
6472 return "Invalid call";
\r
6474 case DSERR_GENERIC:
\r
6475 return "Generic error";
\r
6477 case DSERR_PRIOLEVELNEEDED:
\r
6478 return "Priority level needed";
\r
6480 case DSERR_OUTOFMEMORY:
\r
6481 return "Out of memory";
\r
6483 case DSERR_BADFORMAT:
\r
6484 return "The sample rate or the channel format is not supported";
\r
6486 case DSERR_UNSUPPORTED:
\r
6487 return "Not supported";
\r
6489 case DSERR_NODRIVER:
\r
6490 return "No driver";
\r
6492 case DSERR_ALREADYINITIALIZED:
\r
6493 return "Already initialized";
\r
6495 case DSERR_NOAGGREGATION:
\r
6496 return "No aggregation";
\r
6498 case DSERR_BUFFERLOST:
\r
6499 return "Buffer lost";
\r
6501 case DSERR_OTHERAPPHASPRIO:
\r
6502 return "Another application already has priority";
\r
6504 case DSERR_UNINITIALIZED:
\r
6505 return "Uninitialized";
\r
6508 return "DirectSound unknown error";
\r
6511 //******************** End of __WINDOWS_DS__ *********************//
\r
6515 #if defined(__LINUX_ALSA__)
\r
6517 #include <alsa/asoundlib.h>
\r
6518 #include <unistd.h>
\r
6520 // A structure to hold various information related to the ALSA API
\r
6521 // implementation.
\r
6522 struct AlsaHandle {
\r
6523 snd_pcm_t *handles[2];
\r
6524 bool synchronized;
\r
6526 pthread_cond_t runnable_cv;
\r
6530 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6533 static void *alsaCallbackHandler( void * ptr );
\r
6535 RtApiAlsa :: RtApiAlsa()
\r
6537 // Nothing to do here.
\r
6540 RtApiAlsa :: ~RtApiAlsa()
\r
6542 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6545 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6547 unsigned nDevices = 0;
\r
6548 int result, subdevice, card;
\r
6550 snd_ctl_t *handle;
\r
6552 // Count cards and devices
\r
6554 snd_card_next( &card );
\r
6555 while ( card >= 0 ) {
\r
6556 sprintf( name, "hw:%d", card );
\r
6557 result = snd_ctl_open( &handle, name, 0 );
\r
6558 if ( result < 0 ) {
\r
6559 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6560 errorText_ = errorStream_.str();
\r
6561 error( RtAudioError::WARNING );
\r
6566 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6567 if ( result < 0 ) {
\r
6568 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6569 errorText_ = errorStream_.str();
\r
6570 error( RtAudioError::WARNING );
\r
6573 if ( subdevice < 0 )
\r
6578 snd_ctl_close( handle );
\r
6579 snd_card_next( &card );
\r
6582 result = snd_ctl_open( &handle, "default", 0 );
\r
6583 if (result == 0) {
\r
6585 snd_ctl_close( handle );
\r
6591 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6593 RtAudio::DeviceInfo info;
\r
6594 info.probed = false;
\r
6596 unsigned nDevices = 0;
\r
6597 int result, subdevice, card;
\r
6599 snd_ctl_t *chandle;
\r
6601 // Count cards and devices
\r
6603 snd_card_next( &card );
\r
6604 while ( card >= 0 ) {
\r
6605 sprintf( name, "hw:%d", card );
\r
6606 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6607 if ( result < 0 ) {
\r
6608 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6609 errorText_ = errorStream_.str();
\r
6610 error( RtAudioError::WARNING );
\r
6615 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6616 if ( result < 0 ) {
\r
6617 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6618 errorText_ = errorStream_.str();
\r
6619 error( RtAudioError::WARNING );
\r
6622 if ( subdevice < 0 ) break;
\r
6623 if ( nDevices == device ) {
\r
6624 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6630 snd_ctl_close( chandle );
\r
6631 snd_card_next( &card );
\r
6634 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6635 if ( result == 0 ) {
\r
6636 if ( nDevices == device ) {
\r
6637 strcpy( name, "default" );
\r
6643 if ( nDevices == 0 ) {
\r
6644 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6645 error( RtAudioError::INVALID_USE );
\r
6649 if ( device >= nDevices ) {
\r
6650 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6651 error( RtAudioError::INVALID_USE );
\r
6657 // If a stream is already open, we cannot probe the stream devices.
\r
6658 // Thus, use the saved results.
\r
6659 if ( stream_.state != STREAM_CLOSED &&
\r
6660 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6661 snd_ctl_close( chandle );
\r
6662 if ( device >= devices_.size() ) {
\r
6663 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6664 error( RtAudioError::WARNING );
\r
6667 return devices_[ device ];
\r
6670 int openMode = SND_PCM_ASYNC;
\r
6671 snd_pcm_stream_t stream;
\r
6672 snd_pcm_info_t *pcminfo;
\r
6673 snd_pcm_info_alloca( &pcminfo );
\r
6674 snd_pcm_t *phandle;
\r
6675 snd_pcm_hw_params_t *params;
\r
6676 snd_pcm_hw_params_alloca( ¶ms );
\r
6678 // First try for playback unless default device (which has subdev -1)
\r
6679 stream = SND_PCM_STREAM_PLAYBACK;
\r
6680 snd_pcm_info_set_stream( pcminfo, stream );
\r
6681 if ( subdevice != -1 ) {
\r
6682 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6683 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6685 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6686 if ( result < 0 ) {
\r
6687 // Device probably doesn't support playback.
\r
6688 goto captureProbe;
\r
6692 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6693 if ( result < 0 ) {
\r
6694 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6695 errorText_ = errorStream_.str();
\r
6696 error( RtAudioError::WARNING );
\r
6697 goto captureProbe;
\r
6700 // The device is open ... fill the parameter structure.
\r
6701 result = snd_pcm_hw_params_any( phandle, params );
\r
6702 if ( result < 0 ) {
\r
6703 snd_pcm_close( phandle );
\r
6704 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6705 errorText_ = errorStream_.str();
\r
6706 error( RtAudioError::WARNING );
\r
6707 goto captureProbe;
\r
6710 // Get output channel information.
\r
6711 unsigned int value;
\r
6712 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6713 if ( result < 0 ) {
\r
6714 snd_pcm_close( phandle );
\r
6715 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6716 errorText_ = errorStream_.str();
\r
6717 error( RtAudioError::WARNING );
\r
6718 goto captureProbe;
\r
6720 info.outputChannels = value;
\r
6721 snd_pcm_close( phandle );
\r
6724 stream = SND_PCM_STREAM_CAPTURE;
\r
6725 snd_pcm_info_set_stream( pcminfo, stream );
\r
6727 // Now try for capture unless default device (with subdev = -1)
\r
6728 if ( subdevice != -1 ) {
\r
6729 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6730 snd_ctl_close( chandle );
\r
6731 if ( result < 0 ) {
\r
6732 // Device probably doesn't support capture.
\r
6733 if ( info.outputChannels == 0 ) return info;
\r
6734 goto probeParameters;
\r
6738 snd_ctl_close( chandle );
\r
6740 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6741 if ( result < 0 ) {
\r
6742 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6743 errorText_ = errorStream_.str();
\r
6744 error( RtAudioError::WARNING );
\r
6745 if ( info.outputChannels == 0 ) return info;
\r
6746 goto probeParameters;
\r
6749 // The device is open ... fill the parameter structure.
\r
6750 result = snd_pcm_hw_params_any( phandle, params );
\r
6751 if ( result < 0 ) {
\r
6752 snd_pcm_close( phandle );
\r
6753 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6754 errorText_ = errorStream_.str();
\r
6755 error( RtAudioError::WARNING );
\r
6756 if ( info.outputChannels == 0 ) return info;
\r
6757 goto probeParameters;
\r
6760 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6761 if ( result < 0 ) {
\r
6762 snd_pcm_close( phandle );
\r
6763 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6764 errorText_ = errorStream_.str();
\r
6765 error( RtAudioError::WARNING );
\r
6766 if ( info.outputChannels == 0 ) return info;
\r
6767 goto probeParameters;
\r
6769 info.inputChannels = value;
\r
6770 snd_pcm_close( phandle );
\r
6772 // If device opens for both playback and capture, we determine the channels.
\r
6773 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6774 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6776 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6777 if ( device == 0 && info.outputChannels > 0 )
\r
6778 info.isDefaultOutput = true;
\r
6779 if ( device == 0 && info.inputChannels > 0 )
\r
6780 info.isDefaultInput = true;
\r
6783 // At this point, we just need to figure out the supported data
\r
6784 // formats and sample rates. We'll proceed by opening the device in
\r
6785 // the direction with the maximum number of channels, or playback if
\r
6786 // they are equal. This might limit our sample rate options, but so
\r
6789 if ( info.outputChannels >= info.inputChannels )
\r
6790 stream = SND_PCM_STREAM_PLAYBACK;
\r
6792 stream = SND_PCM_STREAM_CAPTURE;
\r
6793 snd_pcm_info_set_stream( pcminfo, stream );
\r
6795 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6796 if ( result < 0 ) {
\r
6797 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6798 errorText_ = errorStream_.str();
\r
6799 error( RtAudioError::WARNING );
\r
6803 // The device is open ... fill the parameter structure.
\r
6804 result = snd_pcm_hw_params_any( phandle, params );
\r
6805 if ( result < 0 ) {
\r
6806 snd_pcm_close( phandle );
\r
6807 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6808 errorText_ = errorStream_.str();
\r
6809 error( RtAudioError::WARNING );
\r
6813 // Test our discrete set of sample rate values.
\r
6814 info.sampleRates.clear();
\r
6815 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
6816 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
6817 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
6819 if ( info.sampleRates.size() == 0 ) {
\r
6820 snd_pcm_close( phandle );
\r
6821 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
6822 errorText_ = errorStream_.str();
\r
6823 error( RtAudioError::WARNING );
\r
6827 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
6828 snd_pcm_format_t format;
\r
6829 info.nativeFormats = 0;
\r
6830 format = SND_PCM_FORMAT_S8;
\r
6831 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6832 info.nativeFormats |= RTAUDIO_SINT8;
\r
6833 format = SND_PCM_FORMAT_S16;
\r
6834 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6835 info.nativeFormats |= RTAUDIO_SINT16;
\r
6836 format = SND_PCM_FORMAT_S24;
\r
6837 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6838 info.nativeFormats |= RTAUDIO_SINT24;
\r
6839 format = SND_PCM_FORMAT_S32;
\r
6840 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6841 info.nativeFormats |= RTAUDIO_SINT32;
\r
6842 format = SND_PCM_FORMAT_FLOAT;
\r
6843 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6844 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6845 format = SND_PCM_FORMAT_FLOAT64;
\r
6846 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6847 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
6849 // Check that we have at least one supported format
\r
6850 if ( info.nativeFormats == 0 ) {
\r
6851 snd_pcm_close( phandle );
\r
6852 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
6853 errorText_ = errorStream_.str();
\r
6854 error( RtAudioError::WARNING );
\r
6858 // Get the device name
\r
6860 result = snd_card_get_name( card, &cardname );
\r
6861 if ( result >= 0 ) {
\r
6862 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
6867 // That's all ... close the device and return
\r
6868 snd_pcm_close( phandle );
\r
6869 info.probed = true;
\r
6873 void RtApiAlsa :: saveDeviceInfo( void )
\r
6877 unsigned int nDevices = getDeviceCount();
\r
6878 devices_.resize( nDevices );
\r
6879 for ( unsigned int i=0; i<nDevices; i++ )
\r
6880 devices_[i] = getDeviceInfo( i );
\r
6883 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6884 unsigned int firstChannel, unsigned int sampleRate,
\r
6885 RtAudioFormat format, unsigned int *bufferSize,
\r
6886 RtAudio::StreamOptions *options )
\r
6889 #if defined(__RTAUDIO_DEBUG__)
\r
6890 snd_output_t *out;
\r
6891 snd_output_stdio_attach(&out, stderr, 0);
\r
6894 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
6896 unsigned nDevices = 0;
\r
6897 int result, subdevice, card;
\r
6899 snd_ctl_t *chandle;
\r
6901 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
6902 snprintf(name, sizeof(name), "%s", "default");
\r
6904 // Count cards and devices
\r
6906 snd_card_next( &card );
\r
6907 while ( card >= 0 ) {
\r
6908 sprintf( name, "hw:%d", card );
\r
6909 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6910 if ( result < 0 ) {
\r
6911 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6912 errorText_ = errorStream_.str();
\r
6917 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6918 if ( result < 0 ) break;
\r
6919 if ( subdevice < 0 ) break;
\r
6920 if ( nDevices == device ) {
\r
6921 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6922 snd_ctl_close( chandle );
\r
6927 snd_ctl_close( chandle );
\r
6928 snd_card_next( &card );
\r
6931 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6932 if ( result == 0 ) {
\r
6933 if ( nDevices == device ) {
\r
6934 strcpy( name, "default" );
\r
6940 if ( nDevices == 0 ) {
\r
6941 // This should not happen because a check is made before this function is called.
\r
6942 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
6946 if ( device >= nDevices ) {
\r
6947 // This should not happen because a check is made before this function is called.
\r
6948 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
6955 // The getDeviceInfo() function will not work for a device that is
\r
6956 // already open. Thus, we'll probe the system before opening a
\r
6957 // stream and save the results for use by getDeviceInfo().
\r
6958 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
6959 this->saveDeviceInfo();
\r
6961 snd_pcm_stream_t stream;
\r
6962 if ( mode == OUTPUT )
\r
6963 stream = SND_PCM_STREAM_PLAYBACK;
\r
6965 stream = SND_PCM_STREAM_CAPTURE;
\r
6967 snd_pcm_t *phandle;
\r
6968 int openMode = SND_PCM_ASYNC;
\r
6969 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
6970 if ( result < 0 ) {
\r
6971 if ( mode == OUTPUT )
\r
6972 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
6974 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
6975 errorText_ = errorStream_.str();
\r
6979 // Fill the parameter structure.
\r
6980 snd_pcm_hw_params_t *hw_params;
\r
6981 snd_pcm_hw_params_alloca( &hw_params );
\r
6982 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
6983 if ( result < 0 ) {
\r
6984 snd_pcm_close( phandle );
\r
6985 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
6986 errorText_ = errorStream_.str();
\r
6990 #if defined(__RTAUDIO_DEBUG__)
\r
6991 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
6992 snd_pcm_hw_params_dump( hw_params, out );
\r
6995 // Set access ... check user preference.
\r
6996 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
6997 stream_.userInterleaved = false;
\r
6998 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
6999 if ( result < 0 ) {
\r
7000 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7001 stream_.deviceInterleaved[mode] = true;
\r
7004 stream_.deviceInterleaved[mode] = false;
\r
7007 stream_.userInterleaved = true;
\r
7008 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7009 if ( result < 0 ) {
\r
7010 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7011 stream_.deviceInterleaved[mode] = false;
\r
7014 stream_.deviceInterleaved[mode] = true;
\r
7017 if ( result < 0 ) {
\r
7018 snd_pcm_close( phandle );
\r
7019 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7020 errorText_ = errorStream_.str();
\r
7024 // Determine how to set the device format.
\r
7025 stream_.userFormat = format;
\r
7026 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7028 if ( format == RTAUDIO_SINT8 )
\r
7029 deviceFormat = SND_PCM_FORMAT_S8;
\r
7030 else if ( format == RTAUDIO_SINT16 )
\r
7031 deviceFormat = SND_PCM_FORMAT_S16;
\r
7032 else if ( format == RTAUDIO_SINT24 )
\r
7033 deviceFormat = SND_PCM_FORMAT_S24;
\r
7034 else if ( format == RTAUDIO_SINT32 )
\r
7035 deviceFormat = SND_PCM_FORMAT_S32;
\r
7036 else if ( format == RTAUDIO_FLOAT32 )
\r
7037 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7038 else if ( format == RTAUDIO_FLOAT64 )
\r
7039 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7041 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7042 stream_.deviceFormat[mode] = format;
\r
7046 // The user requested format is not natively supported by the device.
\r
7047 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7048 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7049 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7053 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7054 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7055 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7059 deviceFormat = SND_PCM_FORMAT_S32;
\r
7060 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7061 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7065 deviceFormat = SND_PCM_FORMAT_S24;
\r
7066 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7067 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7071 deviceFormat = SND_PCM_FORMAT_S16;
\r
7072 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7073 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7077 deviceFormat = SND_PCM_FORMAT_S8;
\r
7078 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7079 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7083 // If we get here, no supported format was found.
\r
7084 snd_pcm_close( phandle );
\r
7085 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7086 errorText_ = errorStream_.str();
\r
7090 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7091 if ( result < 0 ) {
\r
7092 snd_pcm_close( phandle );
\r
7093 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7094 errorText_ = errorStream_.str();
\r
7098 // Determine whether byte-swaping is necessary.
\r
7099 stream_.doByteSwap[mode] = false;
\r
7100 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7101 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7102 if ( result == 0 )
\r
7103 stream_.doByteSwap[mode] = true;
\r
7104 else if (result < 0) {
\r
7105 snd_pcm_close( phandle );
\r
7106 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7107 errorText_ = errorStream_.str();
\r
7112 // Set the sample rate.
\r
7113 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7114 if ( result < 0 ) {
\r
7115 snd_pcm_close( phandle );
\r
7116 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7117 errorText_ = errorStream_.str();
\r
7121 // Determine the number of channels for this device. We support a possible
\r
7122 // minimum device channel number > than the value requested by the user.
\r
7123 stream_.nUserChannels[mode] = channels;
\r
7124 unsigned int value;
\r
7125 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7126 unsigned int deviceChannels = value;
\r
7127 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7128 snd_pcm_close( phandle );
\r
7129 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7130 errorText_ = errorStream_.str();
\r
7134 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7135 if ( result < 0 ) {
\r
7136 snd_pcm_close( phandle );
\r
7137 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7138 errorText_ = errorStream_.str();
\r
7141 deviceChannels = value;
\r
7142 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7143 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7145 // Set the device channels.
\r
7146 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7147 if ( result < 0 ) {
\r
7148 snd_pcm_close( phandle );
\r
7149 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7150 errorText_ = errorStream_.str();
\r
7154 // Set the buffer (or period) size.
\r
7156 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7157 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7158 if ( result < 0 ) {
\r
7159 snd_pcm_close( phandle );
\r
7160 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7161 errorText_ = errorStream_.str();
\r
7164 *bufferSize = periodSize;
\r
7166 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7167 unsigned int periods = 0;
\r
7168 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7169 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7170 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7171 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7172 if ( result < 0 ) {
\r
7173 snd_pcm_close( phandle );
\r
7174 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7175 errorText_ = errorStream_.str();
\r
7179 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7180 // MUST be the same in both directions!
\r
7181 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7182 snd_pcm_close( phandle );
\r
7183 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7184 errorText_ = errorStream_.str();
\r
7188 stream_.bufferSize = *bufferSize;
\r
7190 // Install the hardware configuration
\r
7191 result = snd_pcm_hw_params( phandle, hw_params );
\r
7192 if ( result < 0 ) {
\r
7193 snd_pcm_close( phandle );
\r
7194 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7195 errorText_ = errorStream_.str();
\r
7199 #if defined(__RTAUDIO_DEBUG__)
\r
7200 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7201 snd_pcm_hw_params_dump( hw_params, out );
\r
7204 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7205 snd_pcm_sw_params_t *sw_params = NULL;
\r
7206 snd_pcm_sw_params_alloca( &sw_params );
\r
7207 snd_pcm_sw_params_current( phandle, sw_params );
\r
7208 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7209 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7210 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7212 // The following two settings were suggested by Theo Veenker
\r
7213 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7214 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7216 // here are two options for a fix
\r
7217 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7218 snd_pcm_uframes_t val;
\r
7219 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7220 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7222 result = snd_pcm_sw_params( phandle, sw_params );
\r
7223 if ( result < 0 ) {
\r
7224 snd_pcm_close( phandle );
\r
7225 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7226 errorText_ = errorStream_.str();
\r
7230 #if defined(__RTAUDIO_DEBUG__)
\r
7231 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7232 snd_pcm_sw_params_dump( sw_params, out );
\r
7235 // Set flags for buffer conversion
\r
7236 stream_.doConvertBuffer[mode] = false;
\r
7237 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7238 stream_.doConvertBuffer[mode] = true;
\r
7239 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7240 stream_.doConvertBuffer[mode] = true;
\r
7241 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7242 stream_.nUserChannels[mode] > 1 )
\r
7243 stream_.doConvertBuffer[mode] = true;
\r
7245 // Allocate the ApiHandle if necessary and then save.
\r
7246 AlsaHandle *apiInfo = 0;
\r
7247 if ( stream_.apiHandle == 0 ) {
\r
7249 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7251 catch ( std::bad_alloc& ) {
\r
7252 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7256 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7257 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7261 stream_.apiHandle = (void *) apiInfo;
\r
7262 apiInfo->handles[0] = 0;
\r
7263 apiInfo->handles[1] = 0;
\r
7266 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7268 apiInfo->handles[mode] = phandle;
\r
7271 // Allocate necessary internal buffers.
\r
7272 unsigned long bufferBytes;
\r
7273 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7274 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7275 if ( stream_.userBuffer[mode] == NULL ) {
\r
7276 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7280 if ( stream_.doConvertBuffer[mode] ) {
\r
7282 bool makeBuffer = true;
\r
7283 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7284 if ( mode == INPUT ) {
\r
7285 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7286 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7287 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7291 if ( makeBuffer ) {
\r
7292 bufferBytes *= *bufferSize;
\r
7293 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7294 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7295 if ( stream_.deviceBuffer == NULL ) {
\r
7296 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7302 stream_.sampleRate = sampleRate;
\r
7303 stream_.nBuffers = periods;
\r
7304 stream_.device[mode] = device;
\r
7305 stream_.state = STREAM_STOPPED;
\r
7307 // Setup the buffer conversion information structure.
\r
7308 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7310 // Setup thread if necessary.
\r
7311 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7312 // We had already set up an output stream.
\r
7313 stream_.mode = DUPLEX;
\r
7314 // Link the streams if possible.
\r
7315 apiInfo->synchronized = false;
\r
7316 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7317 apiInfo->synchronized = true;
\r
7319 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7320 error( RtAudioError::WARNING );
\r
7324 stream_.mode = mode;
\r
7326 // Setup callback thread.
\r
7327 stream_.callbackInfo.object = (void *) this;
\r
7329 // Set the thread attributes for joinable and realtime scheduling
\r
7330 // priority (optional). The higher priority will only take affect
\r
7331 // if the program is run as root or suid. Note, under Linux
\r
7332 // processes with CAP_SYS_NICE privilege, a user can change
\r
7333 // scheduling policy and priority (thus need not be root). See
\r
7334 // POSIX "capabilities".
\r
7335 pthread_attr_t attr;
\r
7336 pthread_attr_init( &attr );
\r
7337 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7339 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7340 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7341 // We previously attempted to increase the audio callback priority
\r
7342 // to SCHED_RR here via the attributes. However, while no errors
\r
7343 // were reported in doing so, it did not work. So, now this is
\r
7344 // done in the alsaCallbackHandler function.
\r
7345 stream_.callbackInfo.doRealtime = true;
\r
7346 int priority = options->priority;
\r
7347 int min = sched_get_priority_min( SCHED_RR );
\r
7348 int max = sched_get_priority_max( SCHED_RR );
\r
7349 if ( priority < min ) priority = min;
\r
7350 else if ( priority > max ) priority = max;
\r
7351 stream_.callbackInfo.priority = priority;
\r
7355 stream_.callbackInfo.isRunning = true;
\r
7356 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7357 pthread_attr_destroy( &attr );
\r
7359 stream_.callbackInfo.isRunning = false;
\r
7360 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7369 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7370 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7371 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7373 stream_.apiHandle = 0;
\r
7376 if ( phandle) snd_pcm_close( phandle );
\r
7378 for ( int i=0; i<2; i++ ) {
\r
7379 if ( stream_.userBuffer[i] ) {
\r
7380 free( stream_.userBuffer[i] );
\r
7381 stream_.userBuffer[i] = 0;
\r
7385 if ( stream_.deviceBuffer ) {
\r
7386 free( stream_.deviceBuffer );
\r
7387 stream_.deviceBuffer = 0;
\r
7390 stream_.state = STREAM_CLOSED;
\r
7394 void RtApiAlsa :: closeStream()
\r
7396 if ( stream_.state == STREAM_CLOSED ) {
\r
7397 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7398 error( RtAudioError::WARNING );
\r
7402 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7403 stream_.callbackInfo.isRunning = false;
\r
7404 MUTEX_LOCK( &stream_.mutex );
\r
7405 if ( stream_.state == STREAM_STOPPED ) {
\r
7406 apiInfo->runnable = true;
\r
7407 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7409 MUTEX_UNLOCK( &stream_.mutex );
\r
7410 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7412 if ( stream_.state == STREAM_RUNNING ) {
\r
7413 stream_.state = STREAM_STOPPED;
\r
7414 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7415 snd_pcm_drop( apiInfo->handles[0] );
\r
7416 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7417 snd_pcm_drop( apiInfo->handles[1] );
\r
7421 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7422 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7423 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7425 stream_.apiHandle = 0;
\r
7428 for ( int i=0; i<2; i++ ) {
\r
7429 if ( stream_.userBuffer[i] ) {
\r
7430 free( stream_.userBuffer[i] );
\r
7431 stream_.userBuffer[i] = 0;
\r
7435 if ( stream_.deviceBuffer ) {
\r
7436 free( stream_.deviceBuffer );
\r
7437 stream_.deviceBuffer = 0;
\r
7440 stream_.mode = UNINITIALIZED;
\r
7441 stream_.state = STREAM_CLOSED;
\r
7444 void RtApiAlsa :: startStream()
\r
7446 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7449 if ( stream_.state == STREAM_RUNNING ) {
\r
7450 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7451 error( RtAudioError::WARNING );
\r
7455 MUTEX_LOCK( &stream_.mutex );
\r
7458 snd_pcm_state_t state;
\r
7459 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7460 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7461 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7462 state = snd_pcm_state( handle[0] );
\r
7463 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7464 result = snd_pcm_prepare( handle[0] );
\r
7465 if ( result < 0 ) {
\r
7466 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7467 errorText_ = errorStream_.str();
\r
7473 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7474 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7475 state = snd_pcm_state( handle[1] );
\r
7476 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7477 result = snd_pcm_prepare( handle[1] );
\r
7478 if ( result < 0 ) {
\r
7479 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7480 errorText_ = errorStream_.str();
\r
7486 stream_.state = STREAM_RUNNING;
\r
7489 apiInfo->runnable = true;
\r
7490 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7491 MUTEX_UNLOCK( &stream_.mutex );
\r
7493 if ( result >= 0 ) return;
\r
7494 error( RtAudioError::SYSTEM_ERROR );
\r
7497 void RtApiAlsa :: stopStream()
\r
7500 if ( stream_.state == STREAM_STOPPED ) {
\r
7501 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7502 error( RtAudioError::WARNING );
\r
7506 stream_.state = STREAM_STOPPED;
\r
7507 MUTEX_LOCK( &stream_.mutex );
\r
7510 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7511 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7513 if ( apiInfo->synchronized )
\r
7514 result = snd_pcm_drop( handle[0] );
\r
7516 result = snd_pcm_drain( handle[0] );
\r
7517 if ( result < 0 ) {
\r
7518 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7519 errorText_ = errorStream_.str();
\r
7524 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7525 result = snd_pcm_drop( handle[1] );
\r
7526 if ( result < 0 ) {
\r
7527 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7528 errorText_ = errorStream_.str();
\r
7534 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7535 MUTEX_UNLOCK( &stream_.mutex );
\r
7537 if ( result >= 0 ) return;
\r
7538 error( RtAudioError::SYSTEM_ERROR );
\r
7541 void RtApiAlsa :: abortStream()
\r
7544 if ( stream_.state == STREAM_STOPPED ) {
\r
7545 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7546 error( RtAudioError::WARNING );
\r
7550 stream_.state = STREAM_STOPPED;
\r
7551 MUTEX_LOCK( &stream_.mutex );
\r
7554 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7555 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7556 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7557 result = snd_pcm_drop( handle[0] );
\r
7558 if ( result < 0 ) {
\r
7559 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7560 errorText_ = errorStream_.str();
\r
7565 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7566 result = snd_pcm_drop( handle[1] );
\r
7567 if ( result < 0 ) {
\r
7568 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7569 errorText_ = errorStream_.str();
\r
7575 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7576 MUTEX_UNLOCK( &stream_.mutex );
\r
7578 if ( result >= 0 ) return;
\r
7579 error( RtAudioError::SYSTEM_ERROR );
\r
7582 void RtApiAlsa :: callbackEvent()
\r
7584 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7585 if ( stream_.state == STREAM_STOPPED ) {
\r
7586 MUTEX_LOCK( &stream_.mutex );
\r
7587 while ( !apiInfo->runnable )
\r
7588 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7590 if ( stream_.state != STREAM_RUNNING ) {
\r
7591 MUTEX_UNLOCK( &stream_.mutex );
\r
7594 MUTEX_UNLOCK( &stream_.mutex );
\r
7597 if ( stream_.state == STREAM_CLOSED ) {
\r
7598 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7599 error( RtAudioError::WARNING );
\r
7603 int doStopStream = 0;
\r
7604 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7605 double streamTime = getStreamTime();
\r
7606 RtAudioStreamStatus status = 0;
\r
7607 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7608 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7609 apiInfo->xrun[0] = false;
\r
7611 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7612 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7613 apiInfo->xrun[1] = false;
\r
7615 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7616 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7618 if ( doStopStream == 2 ) {
\r
7623 MUTEX_LOCK( &stream_.mutex );
\r
7625 // The state might change while waiting on a mutex.
\r
7626 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7631 snd_pcm_t **handle;
\r
7632 snd_pcm_sframes_t frames;
\r
7633 RtAudioFormat format;
\r
7634 handle = (snd_pcm_t **) apiInfo->handles;
\r
7636 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7638 // Setup parameters.
\r
7639 if ( stream_.doConvertBuffer[1] ) {
\r
7640 buffer = stream_.deviceBuffer;
\r
7641 channels = stream_.nDeviceChannels[1];
\r
7642 format = stream_.deviceFormat[1];
\r
7645 buffer = stream_.userBuffer[1];
\r
7646 channels = stream_.nUserChannels[1];
\r
7647 format = stream_.userFormat;
\r
7650 // Read samples from device in interleaved/non-interleaved format.
\r
7651 if ( stream_.deviceInterleaved[1] )
\r
7652 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7654 void *bufs[channels];
\r
7655 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7656 for ( int i=0; i<channels; i++ )
\r
7657 bufs[i] = (void *) (buffer + (i * offset));
\r
7658 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7661 if ( result < (int) stream_.bufferSize ) {
\r
7662 // Either an error or overrun occured.
\r
7663 if ( result == -EPIPE ) {
\r
7664 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7665 if ( state == SND_PCM_STATE_XRUN ) {
\r
7666 apiInfo->xrun[1] = true;
\r
7667 result = snd_pcm_prepare( handle[1] );
\r
7668 if ( result < 0 ) {
\r
7669 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7670 errorText_ = errorStream_.str();
\r
7674 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7675 errorText_ = errorStream_.str();
\r
7679 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7680 errorText_ = errorStream_.str();
\r
7682 error( RtAudioError::WARNING );
\r
7686 // Do byte swapping if necessary.
\r
7687 if ( stream_.doByteSwap[1] )
\r
7688 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7690 // Do buffer conversion if necessary.
\r
7691 if ( stream_.doConvertBuffer[1] )
\r
7692 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7694 // Check stream latency
\r
7695 result = snd_pcm_delay( handle[1], &frames );
\r
7696 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7701 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7703 // Setup parameters and do buffer conversion if necessary.
\r
7704 if ( stream_.doConvertBuffer[0] ) {
\r
7705 buffer = stream_.deviceBuffer;
\r
7706 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7707 channels = stream_.nDeviceChannels[0];
\r
7708 format = stream_.deviceFormat[0];
\r
7711 buffer = stream_.userBuffer[0];
\r
7712 channels = stream_.nUserChannels[0];
\r
7713 format = stream_.userFormat;
\r
7716 // Do byte swapping if necessary.
\r
7717 if ( stream_.doByteSwap[0] )
\r
7718 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7720 // Write samples to device in interleaved/non-interleaved format.
\r
7721 if ( stream_.deviceInterleaved[0] )
\r
7722 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7724 void *bufs[channels];
\r
7725 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7726 for ( int i=0; i<channels; i++ )
\r
7727 bufs[i] = (void *) (buffer + (i * offset));
\r
7728 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7731 if ( result < (int) stream_.bufferSize ) {
\r
7732 // Either an error or underrun occured.
\r
7733 if ( result == -EPIPE ) {
\r
7734 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7735 if ( state == SND_PCM_STATE_XRUN ) {
\r
7736 apiInfo->xrun[0] = true;
\r
7737 result = snd_pcm_prepare( handle[0] );
\r
7738 if ( result < 0 ) {
\r
7739 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7740 errorText_ = errorStream_.str();
\r
7744 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7745 errorText_ = errorStream_.str();
\r
7749 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7750 errorText_ = errorStream_.str();
\r
7752 error( RtAudioError::WARNING );
\r
7756 // Check stream latency
\r
7757 result = snd_pcm_delay( handle[0], &frames );
\r
7758 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7762 MUTEX_UNLOCK( &stream_.mutex );
\r
7764 RtApi::tickStreamTime();
\r
7765 if ( doStopStream == 1 ) this->stopStream();
\r
7768 static void *alsaCallbackHandler( void *ptr )
\r
7770 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7771 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7772 bool *isRunning = &info->isRunning;
\r
7774 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7775 if ( &info->doRealtime ) {
\r
7776 pthread_t tID = pthread_self(); // ID of this thread
\r
7777 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7778 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7782 while ( *isRunning == true ) {
\r
7783 pthread_testcancel();
\r
7784 object->callbackEvent();
\r
7787 pthread_exit( NULL );
\r
7790 //******************** End of __LINUX_ALSA__ *********************//
\r
7793 #if defined(__LINUX_PULSE__)
\r
7795 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
7796 // and Tristan Matthews.
\r
7798 #include <pulse/error.h>
\r
7799 #include <pulse/simple.h>
\r
7802 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
7803 44100, 48000, 96000, 0};
\r
7805 struct rtaudio_pa_format_mapping_t {
\r
7806 RtAudioFormat rtaudio_format;
\r
7807 pa_sample_format_t pa_format;
\r
7810 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
7811 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
7812 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
7813 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
7814 {0, PA_SAMPLE_INVALID}};
\r
7816 struct PulseAudioHandle {
\r
7817 pa_simple *s_play;
\r
7820 pthread_cond_t runnable_cv;
\r
7822 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
7825 RtApiPulse::~RtApiPulse()
\r
7827 if ( stream_.state != STREAM_CLOSED )
\r
7831 unsigned int RtApiPulse::getDeviceCount( void )
\r
7836 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
7838 RtAudio::DeviceInfo info;
\r
7839 info.probed = true;
\r
7840 info.name = "PulseAudio";
\r
7841 info.outputChannels = 2;
\r
7842 info.inputChannels = 2;
\r
7843 info.duplexChannels = 2;
\r
7844 info.isDefaultOutput = true;
\r
7845 info.isDefaultInput = true;
\r
7847 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
7848 info.sampleRates.push_back( *sr );
\r
7850 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
7855 static void *pulseaudio_callback( void * user )
\r
7857 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
7858 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
7859 volatile bool *isRunning = &cbi->isRunning;
\r
7861 while ( *isRunning ) {
\r
7862 pthread_testcancel();
\r
7863 context->callbackEvent();
\r
7866 pthread_exit( NULL );
\r
7869 void RtApiPulse::closeStream( void )
\r
7871 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
7873 stream_.callbackInfo.isRunning = false;
\r
7875 MUTEX_LOCK( &stream_.mutex );
\r
7876 if ( stream_.state == STREAM_STOPPED ) {
\r
7877 pah->runnable = true;
\r
7878 pthread_cond_signal( &pah->runnable_cv );
\r
7880 MUTEX_UNLOCK( &stream_.mutex );
\r
7882 pthread_join( pah->thread, 0 );
\r
7883 if ( pah->s_play ) {
\r
7884 pa_simple_flush( pah->s_play, NULL );
\r
7885 pa_simple_free( pah->s_play );
\r
7888 pa_simple_free( pah->s_rec );
\r
7890 pthread_cond_destroy( &pah->runnable_cv );
\r
7892 stream_.apiHandle = 0;
\r
7895 if ( stream_.userBuffer[0] ) {
\r
7896 free( stream_.userBuffer[0] );
\r
7897 stream_.userBuffer[0] = 0;
\r
7899 if ( stream_.userBuffer[1] ) {
\r
7900 free( stream_.userBuffer[1] );
\r
7901 stream_.userBuffer[1] = 0;
\r
7904 stream_.state = STREAM_CLOSED;
\r
7905 stream_.mode = UNINITIALIZED;
\r
7908 void RtApiPulse::callbackEvent( void )
\r
7910 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
7912 if ( stream_.state == STREAM_STOPPED ) {
\r
7913 MUTEX_LOCK( &stream_.mutex );
\r
7914 while ( !pah->runnable )
\r
7915 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
7917 if ( stream_.state != STREAM_RUNNING ) {
\r
7918 MUTEX_UNLOCK( &stream_.mutex );
\r
7921 MUTEX_UNLOCK( &stream_.mutex );
\r
7924 if ( stream_.state == STREAM_CLOSED ) {
\r
7925 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
7926 "this shouldn't happen!";
\r
7927 error( RtAudioError::WARNING );
\r
7931 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7932 double streamTime = getStreamTime();
\r
7933 RtAudioStreamStatus status = 0;
\r
7934 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
7935 stream_.bufferSize, streamTime, status,
\r
7936 stream_.callbackInfo.userData );
\r
7938 if ( doStopStream == 2 ) {
\r
7943 MUTEX_LOCK( &stream_.mutex );
\r
7944 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
7945 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
7947 if ( stream_.state != STREAM_RUNNING )
\r
7952 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7953 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
7954 convertBuffer( stream_.deviceBuffer,
\r
7955 stream_.userBuffer[OUTPUT],
\r
7956 stream_.convertInfo[OUTPUT] );
\r
7957 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
7958 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
7960 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
7961 formatBytes( stream_.userFormat );
\r
7963 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
7964 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
7965 pa_strerror( pa_error ) << ".";
\r
7966 errorText_ = errorStream_.str();
\r
7967 error( RtAudioError::WARNING );
\r
7971 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
7972 if ( stream_.doConvertBuffer[INPUT] )
\r
7973 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
7974 formatBytes( stream_.deviceFormat[INPUT] );
\r
7976 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
7977 formatBytes( stream_.userFormat );
\r
7979 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
7980 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
7981 pa_strerror( pa_error ) << ".";
\r
7982 errorText_ = errorStream_.str();
\r
7983 error( RtAudioError::WARNING );
\r
7985 if ( stream_.doConvertBuffer[INPUT] ) {
\r
7986 convertBuffer( stream_.userBuffer[INPUT],
\r
7987 stream_.deviceBuffer,
\r
7988 stream_.convertInfo[INPUT] );
\r
7993 MUTEX_UNLOCK( &stream_.mutex );
\r
7994 RtApi::tickStreamTime();
\r
7996 if ( doStopStream == 1 )
\r
8000 void RtApiPulse::startStream( void )
\r
8002 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8004 if ( stream_.state == STREAM_CLOSED ) {
\r
8005 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8006 error( RtAudioError::INVALID_USE );
\r
8009 if ( stream_.state == STREAM_RUNNING ) {
\r
8010 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8011 error( RtAudioError::WARNING );
\r
8015 MUTEX_LOCK( &stream_.mutex );
\r
8017 stream_.state = STREAM_RUNNING;
\r
8019 pah->runnable = true;
\r
8020 pthread_cond_signal( &pah->runnable_cv );
\r
8021 MUTEX_UNLOCK( &stream_.mutex );
\r
8024 void RtApiPulse::stopStream( void )
\r
8026 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8028 if ( stream_.state == STREAM_CLOSED ) {
\r
8029 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8030 error( RtAudioError::INVALID_USE );
\r
8033 if ( stream_.state == STREAM_STOPPED ) {
\r
8034 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8035 error( RtAudioError::WARNING );
\r
8039 stream_.state = STREAM_STOPPED;
\r
8040 MUTEX_LOCK( &stream_.mutex );
\r
8042 if ( pah && pah->s_play ) {
\r
8044 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8045 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8046 pa_strerror( pa_error ) << ".";
\r
8047 errorText_ = errorStream_.str();
\r
8048 MUTEX_UNLOCK( &stream_.mutex );
\r
8049 error( RtAudioError::SYSTEM_ERROR );
\r
8054 stream_.state = STREAM_STOPPED;
\r
8055 MUTEX_UNLOCK( &stream_.mutex );
\r
8058 void RtApiPulse::abortStream( void )
\r
8060 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8062 if ( stream_.state == STREAM_CLOSED ) {
\r
8063 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8064 error( RtAudioError::INVALID_USE );
\r
8067 if ( stream_.state == STREAM_STOPPED ) {
\r
8068 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8069 error( RtAudioError::WARNING );
\r
8073 stream_.state = STREAM_STOPPED;
\r
8074 MUTEX_LOCK( &stream_.mutex );
\r
8076 if ( pah && pah->s_play ) {
\r
8078 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8079 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8080 pa_strerror( pa_error ) << ".";
\r
8081 errorText_ = errorStream_.str();
\r
8082 MUTEX_UNLOCK( &stream_.mutex );
\r
8083 error( RtAudioError::SYSTEM_ERROR );
\r
8088 stream_.state = STREAM_STOPPED;
\r
8089 MUTEX_UNLOCK( &stream_.mutex );
\r
8092 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8093 unsigned int channels, unsigned int firstChannel,
\r
8094 unsigned int sampleRate, RtAudioFormat format,
\r
8095 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8097 PulseAudioHandle *pah = 0;
\r
8098 unsigned long bufferBytes = 0;
\r
8099 pa_sample_spec ss;
\r
8101 if ( device != 0 ) return false;
\r
8102 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8103 if ( channels != 1 && channels != 2 ) {
\r
8104 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8107 ss.channels = channels;
\r
8109 if ( firstChannel != 0 ) return false;
\r
8111 bool sr_found = false;
\r
8112 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8113 if ( sampleRate == *sr ) {
\r
8115 stream_.sampleRate = sampleRate;
\r
8116 ss.rate = sampleRate;
\r
8120 if ( !sr_found ) {
\r
8121 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8125 bool sf_found = 0;
\r
8126 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8127 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8128 if ( format == sf->rtaudio_format ) {
\r
8130 stream_.userFormat = sf->rtaudio_format;
\r
8131 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8132 ss.format = sf->pa_format;
\r
8136 if ( !sf_found ) { // Use internal data format conversion.
\r
8137 stream_.userFormat = format;
\r
8138 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8139 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8142 // Set other stream parameters.
\r
8143 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8144 else stream_.userInterleaved = true;
\r
8145 stream_.deviceInterleaved[mode] = true;
\r
8146 stream_.nBuffers = 1;
\r
8147 stream_.doByteSwap[mode] = false;
\r
8148 stream_.nUserChannels[mode] = channels;
\r
8149 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8150 stream_.channelOffset[mode] = 0;
\r
8151 std::string streamName = "RtAudio";
\r
8153 // Set flags for buffer conversion.
\r
8154 stream_.doConvertBuffer[mode] = false;
\r
8155 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8156 stream_.doConvertBuffer[mode] = true;
\r
8157 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8158 stream_.doConvertBuffer[mode] = true;
\r
8160 // Allocate necessary internal buffers.
\r
8161 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8162 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8163 if ( stream_.userBuffer[mode] == NULL ) {
\r
8164 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8167 stream_.bufferSize = *bufferSize;
\r
8169 if ( stream_.doConvertBuffer[mode] ) {
\r
8171 bool makeBuffer = true;
\r
8172 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8173 if ( mode == INPUT ) {
\r
8174 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8175 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8176 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8180 if ( makeBuffer ) {
\r
8181 bufferBytes *= *bufferSize;
\r
8182 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8183 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8184 if ( stream_.deviceBuffer == NULL ) {
\r
8185 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8191 stream_.device[mode] = device;
\r
8193 // Setup the buffer conversion information structure.
\r
8194 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8196 if ( !stream_.apiHandle ) {
\r
8197 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8199 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8203 stream_.apiHandle = pah;
\r
8204 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8205 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8209 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8212 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8215 pa_buffer_attr buffer_attr;
\r
8216 buffer_attr.fragsize = bufferBytes;
\r
8217 buffer_attr.maxlength = -1;
\r
8219 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8220 if ( !pah->s_rec ) {
\r
8221 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8226 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8227 if ( !pah->s_play ) {
\r
8228 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8236 if ( stream_.mode == UNINITIALIZED )
\r
8237 stream_.mode = mode;
\r
8238 else if ( stream_.mode == mode )
\r
8241 stream_.mode = DUPLEX;
\r
8243 if ( !stream_.callbackInfo.isRunning ) {
\r
8244 stream_.callbackInfo.object = this;
\r
8245 stream_.callbackInfo.isRunning = true;
\r
8246 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8247 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8252 stream_.state = STREAM_STOPPED;
\r
8256 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8257 pthread_cond_destroy( &pah->runnable_cv );
\r
8259 stream_.apiHandle = 0;
\r
8262 for ( int i=0; i<2; i++ ) {
\r
8263 if ( stream_.userBuffer[i] ) {
\r
8264 free( stream_.userBuffer[i] );
\r
8265 stream_.userBuffer[i] = 0;
\r
8269 if ( stream_.deviceBuffer ) {
\r
8270 free( stream_.deviceBuffer );
\r
8271 stream_.deviceBuffer = 0;
\r
8277 //******************** End of __LINUX_PULSE__ *********************//
\r
8280 #if defined(__LINUX_OSS__)
\r
8282 #include <unistd.h>
\r
8283 #include <sys/ioctl.h>
\r
8284 #include <unistd.h>
\r
8285 #include <fcntl.h>
\r
8286 #include <sys/soundcard.h>
\r
8287 #include <errno.h>
\r
8290 static void *ossCallbackHandler(void * ptr);
\r
8292 // A structure to hold various information related to the OSS API
\r
8293 // implementation.
\r
8294 struct OssHandle {
\r
8295 int id[2]; // device ids
\r
8298 pthread_cond_t runnable;
\r
8301 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8304 RtApiOss :: RtApiOss()
\r
8306 // Nothing to do here.
\r
8309 RtApiOss :: ~RtApiOss()
\r
8311 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8314 unsigned int RtApiOss :: getDeviceCount( void )
\r
8316 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8317 if ( mixerfd == -1 ) {
\r
8318 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8319 error( RtAudioError::WARNING );
\r
8323 oss_sysinfo sysinfo;
\r
8324 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8326 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8327 error( RtAudioError::WARNING );
\r
8332 return sysinfo.numaudios;
\r
8335 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8337 RtAudio::DeviceInfo info;
\r
8338 info.probed = false;
\r
8340 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8341 if ( mixerfd == -1 ) {
\r
8342 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8343 error( RtAudioError::WARNING );
\r
8347 oss_sysinfo sysinfo;
\r
8348 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8349 if ( result == -1 ) {
\r
8351 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8352 error( RtAudioError::WARNING );
\r
8356 unsigned nDevices = sysinfo.numaudios;
\r
8357 if ( nDevices == 0 ) {
\r
8359 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8360 error( RtAudioError::INVALID_USE );
\r
8364 if ( device >= nDevices ) {
\r
8366 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8367 error( RtAudioError::INVALID_USE );
\r
8371 oss_audioinfo ainfo;
\r
8372 ainfo.dev = device;
\r
8373 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8375 if ( result == -1 ) {
\r
8376 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8377 errorText_ = errorStream_.str();
\r
8378 error( RtAudioError::WARNING );
\r
8383 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8384 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8385 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8386 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8387 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8390 // Probe data formats ... do for input
\r
8391 unsigned long mask = ainfo.iformats;
\r
8392 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8393 info.nativeFormats |= RTAUDIO_SINT16;
\r
8394 if ( mask & AFMT_S8 )
\r
8395 info.nativeFormats |= RTAUDIO_SINT8;
\r
8396 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8397 info.nativeFormats |= RTAUDIO_SINT32;
\r
8398 if ( mask & AFMT_FLOAT )
\r
8399 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8400 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8401 info.nativeFormats |= RTAUDIO_SINT24;
\r
8403 // Check that we have at least one supported format
\r
8404 if ( info.nativeFormats == 0 ) {
\r
8405 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8406 errorText_ = errorStream_.str();
\r
8407 error( RtAudioError::WARNING );
\r
8411 // Probe the supported sample rates.
\r
8412 info.sampleRates.clear();
\r
8413 if ( ainfo.nrates ) {
\r
8414 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8415 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8416 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8417 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8424 // Check min and max rate values;
\r
8425 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8426 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8427 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8431 if ( info.sampleRates.size() == 0 ) {
\r
8432 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8433 errorText_ = errorStream_.str();
\r
8434 error( RtAudioError::WARNING );
\r
8437 info.probed = true;
\r
8438 info.name = ainfo.name;
\r
8445 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8446 unsigned int firstChannel, unsigned int sampleRate,
\r
8447 RtAudioFormat format, unsigned int *bufferSize,
\r
8448 RtAudio::StreamOptions *options )
\r
8450 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8451 if ( mixerfd == -1 ) {
\r
8452 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8456 oss_sysinfo sysinfo;
\r
8457 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8458 if ( result == -1 ) {
\r
8460 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8464 unsigned nDevices = sysinfo.numaudios;
\r
8465 if ( nDevices == 0 ) {
\r
8466 // This should not happen because a check is made before this function is called.
\r
8468 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8472 if ( device >= nDevices ) {
\r
8473 // This should not happen because a check is made before this function is called.
\r
8475 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8479 oss_audioinfo ainfo;
\r
8480 ainfo.dev = device;
\r
8481 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8483 if ( result == -1 ) {
\r
8484 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8485 errorText_ = errorStream_.str();
\r
8489 // Check if device supports input or output
\r
8490 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8491 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8492 if ( mode == OUTPUT )
\r
8493 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8495 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8496 errorText_ = errorStream_.str();
\r
8501 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8502 if ( mode == OUTPUT )
\r
8503 flags |= O_WRONLY;
\r
8504 else { // mode == INPUT
\r
8505 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8506 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8507 close( handle->id[0] );
\r
8508 handle->id[0] = 0;
\r
8509 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8510 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8511 errorText_ = errorStream_.str();
\r
8514 // Check that the number previously set channels is the same.
\r
8515 if ( stream_.nUserChannels[0] != channels ) {
\r
8516 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8517 errorText_ = errorStream_.str();
\r
8523 flags |= O_RDONLY;
\r
8526 // Set exclusive access if specified.
\r
8527 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8529 // Try to open the device.
\r
8531 fd = open( ainfo.devnode, flags, 0 );
\r
8533 if ( errno == EBUSY )
\r
8534 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8536 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8537 errorText_ = errorStream_.str();
\r
8541 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8543 if ( flags | O_RDWR ) {
\r
8544 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8545 if ( result == -1) {
\r
8546 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8547 errorText_ = errorStream_.str();
\r
8553 // Check the device channel support.
\r
8554 stream_.nUserChannels[mode] = channels;
\r
8555 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8557 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8558 errorText_ = errorStream_.str();
\r
8562 // Set the number of channels.
\r
8563 int deviceChannels = channels + firstChannel;
\r
8564 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8565 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8567 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8568 errorText_ = errorStream_.str();
\r
8571 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8573 // Get the data format mask
\r
8575 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8576 if ( result == -1 ) {
\r
8578 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8579 errorText_ = errorStream_.str();
\r
8583 // Determine how to set the device format.
\r
8584 stream_.userFormat = format;
\r
8585 int deviceFormat = -1;
\r
8586 stream_.doByteSwap[mode] = false;
\r
8587 if ( format == RTAUDIO_SINT8 ) {
\r
8588 if ( mask & AFMT_S8 ) {
\r
8589 deviceFormat = AFMT_S8;
\r
8590 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8593 else if ( format == RTAUDIO_SINT16 ) {
\r
8594 if ( mask & AFMT_S16_NE ) {
\r
8595 deviceFormat = AFMT_S16_NE;
\r
8596 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8598 else if ( mask & AFMT_S16_OE ) {
\r
8599 deviceFormat = AFMT_S16_OE;
\r
8600 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8601 stream_.doByteSwap[mode] = true;
\r
8604 else if ( format == RTAUDIO_SINT24 ) {
\r
8605 if ( mask & AFMT_S24_NE ) {
\r
8606 deviceFormat = AFMT_S24_NE;
\r
8607 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8609 else if ( mask & AFMT_S24_OE ) {
\r
8610 deviceFormat = AFMT_S24_OE;
\r
8611 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8612 stream_.doByteSwap[mode] = true;
\r
8615 else if ( format == RTAUDIO_SINT32 ) {
\r
8616 if ( mask & AFMT_S32_NE ) {
\r
8617 deviceFormat = AFMT_S32_NE;
\r
8618 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8620 else if ( mask & AFMT_S32_OE ) {
\r
8621 deviceFormat = AFMT_S32_OE;
\r
8622 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8623 stream_.doByteSwap[mode] = true;
\r
8627 if ( deviceFormat == -1 ) {
\r
8628 // The user requested format is not natively supported by the device.
\r
8629 if ( mask & AFMT_S16_NE ) {
\r
8630 deviceFormat = AFMT_S16_NE;
\r
8631 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8633 else if ( mask & AFMT_S32_NE ) {
\r
8634 deviceFormat = AFMT_S32_NE;
\r
8635 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8637 else if ( mask & AFMT_S24_NE ) {
\r
8638 deviceFormat = AFMT_S24_NE;
\r
8639 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8641 else if ( mask & AFMT_S16_OE ) {
\r
8642 deviceFormat = AFMT_S16_OE;
\r
8643 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8644 stream_.doByteSwap[mode] = true;
\r
8646 else if ( mask & AFMT_S32_OE ) {
\r
8647 deviceFormat = AFMT_S32_OE;
\r
8648 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8649 stream_.doByteSwap[mode] = true;
\r
8651 else if ( mask & AFMT_S24_OE ) {
\r
8652 deviceFormat = AFMT_S24_OE;
\r
8653 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8654 stream_.doByteSwap[mode] = true;
\r
8656 else if ( mask & AFMT_S8) {
\r
8657 deviceFormat = AFMT_S8;
\r
8658 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8662 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8663 // This really shouldn't happen ...
\r
8665 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8666 errorText_ = errorStream_.str();
\r
8670 // Set the data format.
\r
8671 int temp = deviceFormat;
\r
8672 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8673 if ( result == -1 || deviceFormat != temp ) {
\r
8675 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8676 errorText_ = errorStream_.str();
\r
8680 // Attempt to set the buffer size. According to OSS, the minimum
\r
8681 // number of buffers is two. The supposed minimum buffer size is 16
\r
8682 // bytes, so that will be our lower bound. The argument to this
\r
8683 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8684 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8685 // We'll check the actual value used near the end of the setup
\r
8687 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8688 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8690 if ( options ) buffers = options->numberOfBuffers;
\r
8691 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8692 if ( buffers < 2 ) buffers = 3;
\r
8693 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8694 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8695 if ( result == -1 ) {
\r
8697 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8698 errorText_ = errorStream_.str();
\r
8701 stream_.nBuffers = buffers;
\r
8703 // Save buffer size (in sample frames).
\r
8704 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8705 stream_.bufferSize = *bufferSize;
\r
8707 // Set the sample rate.
\r
8708 int srate = sampleRate;
\r
8709 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8710 if ( result == -1 ) {
\r
8712 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8713 errorText_ = errorStream_.str();
\r
8717 // Verify the sample rate setup worked.
\r
8718 if ( abs( srate - sampleRate ) > 100 ) {
\r
8720 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8721 errorText_ = errorStream_.str();
\r
8724 stream_.sampleRate = sampleRate;
\r
8726 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8727 // We're doing duplex setup here.
\r
8728 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8729 stream_.nDeviceChannels[0] = deviceChannels;
\r
8732 // Set interleaving parameters.
\r
8733 stream_.userInterleaved = true;
\r
8734 stream_.deviceInterleaved[mode] = true;
\r
8735 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8736 stream_.userInterleaved = false;
\r
8738 // Set flags for buffer conversion
\r
8739 stream_.doConvertBuffer[mode] = false;
\r
8740 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8741 stream_.doConvertBuffer[mode] = true;
\r
8742 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8743 stream_.doConvertBuffer[mode] = true;
\r
8744 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8745 stream_.nUserChannels[mode] > 1 )
\r
8746 stream_.doConvertBuffer[mode] = true;
\r
8748 // Allocate the stream handles if necessary and then save.
\r
8749 if ( stream_.apiHandle == 0 ) {
\r
8751 handle = new OssHandle;
\r
8753 catch ( std::bad_alloc& ) {
\r
8754 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8758 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8759 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8763 stream_.apiHandle = (void *) handle;
\r
8766 handle = (OssHandle *) stream_.apiHandle;
\r
8768 handle->id[mode] = fd;
\r
8770 // Allocate necessary internal buffers.
\r
8771 unsigned long bufferBytes;
\r
8772 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8773 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8774 if ( stream_.userBuffer[mode] == NULL ) {
\r
8775 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8779 if ( stream_.doConvertBuffer[mode] ) {
\r
8781 bool makeBuffer = true;
\r
8782 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8783 if ( mode == INPUT ) {
\r
8784 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8785 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8786 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8790 if ( makeBuffer ) {
\r
8791 bufferBytes *= *bufferSize;
\r
8792 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8793 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8794 if ( stream_.deviceBuffer == NULL ) {
\r
8795 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
8801 stream_.device[mode] = device;
\r
8802 stream_.state = STREAM_STOPPED;
\r
8804 // Setup the buffer conversion information structure.
\r
8805 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8807 // Setup thread if necessary.
\r
8808 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
8809 // We had already set up an output stream.
\r
8810 stream_.mode = DUPLEX;
\r
8811 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
8814 stream_.mode = mode;
\r
8816 // Setup callback thread.
\r
8817 stream_.callbackInfo.object = (void *) this;
\r
8819 // Set the thread attributes for joinable and realtime scheduling
\r
8820 // priority. The higher priority will only take affect if the
\r
8821 // program is run as root or suid.
\r
8822 pthread_attr_t attr;
\r
8823 pthread_attr_init( &attr );
\r
8824 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
8825 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8826 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
8827 struct sched_param param;
\r
8828 int priority = options->priority;
\r
8829 int min = sched_get_priority_min( SCHED_RR );
\r
8830 int max = sched_get_priority_max( SCHED_RR );
\r
8831 if ( priority < min ) priority = min;
\r
8832 else if ( priority > max ) priority = max;
\r
8833 param.sched_priority = priority;
\r
8834 pthread_attr_setschedparam( &attr, ¶m );
\r
8835 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
8838 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
8840 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
8843 stream_.callbackInfo.isRunning = true;
\r
8844 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
8845 pthread_attr_destroy( &attr );
\r
8847 stream_.callbackInfo.isRunning = false;
\r
8848 errorText_ = "RtApiOss::error creating callback thread!";
\r
8857 pthread_cond_destroy( &handle->runnable );
\r
8858 if ( handle->id[0] ) close( handle->id[0] );
\r
8859 if ( handle->id[1] ) close( handle->id[1] );
\r
8861 stream_.apiHandle = 0;
\r
8864 for ( int i=0; i<2; i++ ) {
\r
8865 if ( stream_.userBuffer[i] ) {
\r
8866 free( stream_.userBuffer[i] );
\r
8867 stream_.userBuffer[i] = 0;
\r
8871 if ( stream_.deviceBuffer ) {
\r
8872 free( stream_.deviceBuffer );
\r
8873 stream_.deviceBuffer = 0;
\r
8879 void RtApiOss :: closeStream()
\r
8881 if ( stream_.state == STREAM_CLOSED ) {
\r
8882 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
8883 error( RtAudioError::WARNING );
\r
8887 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8888 stream_.callbackInfo.isRunning = false;
\r
8889 MUTEX_LOCK( &stream_.mutex );
\r
8890 if ( stream_.state == STREAM_STOPPED )
\r
8891 pthread_cond_signal( &handle->runnable );
\r
8892 MUTEX_UNLOCK( &stream_.mutex );
\r
8893 pthread_join( stream_.callbackInfo.thread, NULL );
\r
8895 if ( stream_.state == STREAM_RUNNING ) {
\r
8896 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
8897 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
8899 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
8900 stream_.state = STREAM_STOPPED;
\r
8904 pthread_cond_destroy( &handle->runnable );
\r
8905 if ( handle->id[0] ) close( handle->id[0] );
\r
8906 if ( handle->id[1] ) close( handle->id[1] );
\r
8908 stream_.apiHandle = 0;
\r
8911 for ( int i=0; i<2; i++ ) {
\r
8912 if ( stream_.userBuffer[i] ) {
\r
8913 free( stream_.userBuffer[i] );
\r
8914 stream_.userBuffer[i] = 0;
\r
8918 if ( stream_.deviceBuffer ) {
\r
8919 free( stream_.deviceBuffer );
\r
8920 stream_.deviceBuffer = 0;
\r
8923 stream_.mode = UNINITIALIZED;
\r
8924 stream_.state = STREAM_CLOSED;
\r
8927 void RtApiOss :: startStream()
\r
8930 if ( stream_.state == STREAM_RUNNING ) {
\r
8931 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
8932 error( RtAudioError::WARNING );
\r
8936 MUTEX_LOCK( &stream_.mutex );
\r
8938 stream_.state = STREAM_RUNNING;
\r
8940 // No need to do anything else here ... OSS automatically starts
\r
8941 // when fed samples.
\r
8943 MUTEX_UNLOCK( &stream_.mutex );
\r
8945 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8946 pthread_cond_signal( &handle->runnable );
\r
8949 void RtApiOss :: stopStream()
\r
8952 if ( stream_.state == STREAM_STOPPED ) {
\r
8953 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
8954 error( RtAudioError::WARNING );
\r
8958 MUTEX_LOCK( &stream_.mutex );
\r
8960 // The state might change while waiting on a mutex.
\r
8961 if ( stream_.state == STREAM_STOPPED ) {
\r
8962 MUTEX_UNLOCK( &stream_.mutex );
\r
8967 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8968 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8970 // Flush the output with zeros a few times.
\r
8973 RtAudioFormat format;
\r
8975 if ( stream_.doConvertBuffer[0] ) {
\r
8976 buffer = stream_.deviceBuffer;
\r
8977 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
8978 format = stream_.deviceFormat[0];
\r
8981 buffer = stream_.userBuffer[0];
\r
8982 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
8983 format = stream_.userFormat;
\r
8986 memset( buffer, 0, samples * formatBytes(format) );
\r
8987 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
8988 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
8989 if ( result == -1 ) {
\r
8990 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
8991 error( RtAudioError::WARNING );
\r
8995 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
8996 if ( result == -1 ) {
\r
8997 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
8998 errorText_ = errorStream_.str();
\r
9001 handle->triggered = false;
\r
9004 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9005 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9006 if ( result == -1 ) {
\r
9007 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9008 errorText_ = errorStream_.str();
\r
9014 stream_.state = STREAM_STOPPED;
\r
9015 MUTEX_UNLOCK( &stream_.mutex );
\r
9017 if ( result != -1 ) return;
\r
9018 error( RtAudioError::SYSTEM_ERROR );
\r
9021 void RtApiOss :: abortStream()
\r
9024 if ( stream_.state == STREAM_STOPPED ) {
\r
9025 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9026 error( RtAudioError::WARNING );
\r
9030 MUTEX_LOCK( &stream_.mutex );
\r
9032 // The state might change while waiting on a mutex.
\r
9033 if ( stream_.state == STREAM_STOPPED ) {
\r
9034 MUTEX_UNLOCK( &stream_.mutex );
\r
9039 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9040 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9041 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9042 if ( result == -1 ) {
\r
9043 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9044 errorText_ = errorStream_.str();
\r
9047 handle->triggered = false;
\r
9050 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9051 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9052 if ( result == -1 ) {
\r
9053 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9054 errorText_ = errorStream_.str();
\r
9060 stream_.state = STREAM_STOPPED;
\r
9061 MUTEX_UNLOCK( &stream_.mutex );
\r
9063 if ( result != -1 ) return;
\r
9064 error( RtAudioError::SYSTEM_ERROR );
\r
9067 void RtApiOss :: callbackEvent()
\r
9069 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9070 if ( stream_.state == STREAM_STOPPED ) {
\r
9071 MUTEX_LOCK( &stream_.mutex );
\r
9072 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9073 if ( stream_.state != STREAM_RUNNING ) {
\r
9074 MUTEX_UNLOCK( &stream_.mutex );
\r
9077 MUTEX_UNLOCK( &stream_.mutex );
\r
9080 if ( stream_.state == STREAM_CLOSED ) {
\r
9081 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9082 error( RtAudioError::WARNING );
\r
9086 // Invoke user callback to get fresh output data.
\r
9087 int doStopStream = 0;
\r
9088 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9089 double streamTime = getStreamTime();
\r
9090 RtAudioStreamStatus status = 0;
\r
9091 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9092 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9093 handle->xrun[0] = false;
\r
9095 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9096 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9097 handle->xrun[1] = false;
\r
9099 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9100 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9101 if ( doStopStream == 2 ) {
\r
9102 this->abortStream();
\r
9106 MUTEX_LOCK( &stream_.mutex );
\r
9108 // The state might change while waiting on a mutex.
\r
9109 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9114 RtAudioFormat format;
\r
9116 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9118 // Setup parameters and do buffer conversion if necessary.
\r
9119 if ( stream_.doConvertBuffer[0] ) {
\r
9120 buffer = stream_.deviceBuffer;
\r
9121 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9122 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9123 format = stream_.deviceFormat[0];
\r
9126 buffer = stream_.userBuffer[0];
\r
9127 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9128 format = stream_.userFormat;
\r
9131 // Do byte swapping if necessary.
\r
9132 if ( stream_.doByteSwap[0] )
\r
9133 byteSwapBuffer( buffer, samples, format );
\r
9135 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9137 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9138 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9139 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9140 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9141 handle->triggered = true;
\r
9144 // Write samples to device.
\r
9145 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9147 if ( result == -1 ) {
\r
9148 // We'll assume this is an underrun, though there isn't a
\r
9149 // specific means for determining that.
\r
9150 handle->xrun[0] = true;
\r
9151 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9152 error( RtAudioError::WARNING );
\r
9153 // Continue on to input section.
\r
9157 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9159 // Setup parameters.
\r
9160 if ( stream_.doConvertBuffer[1] ) {
\r
9161 buffer = stream_.deviceBuffer;
\r
9162 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9163 format = stream_.deviceFormat[1];
\r
9166 buffer = stream_.userBuffer[1];
\r
9167 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9168 format = stream_.userFormat;
\r
9171 // Read samples from device.
\r
9172 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9174 if ( result == -1 ) {
\r
9175 // We'll assume this is an overrun, though there isn't a
\r
9176 // specific means for determining that.
\r
9177 handle->xrun[1] = true;
\r
9178 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9179 error( RtAudioError::WARNING );
\r
9183 // Do byte swapping if necessary.
\r
9184 if ( stream_.doByteSwap[1] )
\r
9185 byteSwapBuffer( buffer, samples, format );
\r
9187 // Do buffer conversion if necessary.
\r
9188 if ( stream_.doConvertBuffer[1] )
\r
9189 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9193 MUTEX_UNLOCK( &stream_.mutex );
\r
9195 RtApi::tickStreamTime();
\r
9196 if ( doStopStream == 1 ) this->stopStream();
\r
9199 static void *ossCallbackHandler( void *ptr )
\r
9201 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9202 RtApiOss *object = (RtApiOss *) info->object;
\r
9203 bool *isRunning = &info->isRunning;
\r
9205 while ( *isRunning == true ) {
\r
9206 pthread_testcancel();
\r
9207 object->callbackEvent();
\r
9210 pthread_exit( NULL );
\r
9213 //******************** End of __LINUX_OSS__ *********************//
\r
9217 // *************************************************** //
\r
9219 // Protected common (OS-independent) RtAudio methods.
\r
9221 // *************************************************** //
\r
9223 // This method can be modified to control the behavior of error
\r
9224 // message printing.
\r
9225 void RtApi :: error( RtAudioError::Type type )
\r
9227 errorStream_.str(""); // clear the ostringstream
\r
9229 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9230 if ( errorCallback ) {
\r
9231 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9233 if ( firstErrorOccurred_ )
\r
9236 firstErrorOccurred_ = true;
\r
9237 const std::string errorMessage = errorText_;
\r
9239 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9240 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9244 errorCallback( type, errorMessage );
\r
9245 firstErrorOccurred_ = false;
\r
9249 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9250 std::cerr << '\n' << errorText_ << "\n\n";
\r
9251 else if ( type != RtAudioError::WARNING )
\r
9252 throw( RtAudioError( errorText_, type ) );
\r
9255 void RtApi :: verifyStream()
\r
9257 if ( stream_.state == STREAM_CLOSED ) {
\r
9258 errorText_ = "RtApi:: a stream is not open!";
\r
9259 error( RtAudioError::INVALID_USE );
\r
9263 void RtApi :: clearStreamInfo()
\r
9265 stream_.mode = UNINITIALIZED;
\r
9266 stream_.state = STREAM_CLOSED;
\r
9267 stream_.sampleRate = 0;
\r
9268 stream_.bufferSize = 0;
\r
9269 stream_.nBuffers = 0;
\r
9270 stream_.userFormat = 0;
\r
9271 stream_.userInterleaved = true;
\r
9272 stream_.streamTime = 0.0;
\r
9273 stream_.apiHandle = 0;
\r
9274 stream_.deviceBuffer = 0;
\r
9275 stream_.callbackInfo.callback = 0;
\r
9276 stream_.callbackInfo.userData = 0;
\r
9277 stream_.callbackInfo.isRunning = false;
\r
9278 stream_.callbackInfo.errorCallback = 0;
\r
9279 for ( int i=0; i<2; i++ ) {
\r
9280 stream_.device[i] = 11111;
\r
9281 stream_.doConvertBuffer[i] = false;
\r
9282 stream_.deviceInterleaved[i] = true;
\r
9283 stream_.doByteSwap[i] = false;
\r
9284 stream_.nUserChannels[i] = 0;
\r
9285 stream_.nDeviceChannels[i] = 0;
\r
9286 stream_.channelOffset[i] = 0;
\r
9287 stream_.deviceFormat[i] = 0;
\r
9288 stream_.latency[i] = 0;
\r
9289 stream_.userBuffer[i] = 0;
\r
9290 stream_.convertInfo[i].channels = 0;
\r
9291 stream_.convertInfo[i].inJump = 0;
\r
9292 stream_.convertInfo[i].outJump = 0;
\r
9293 stream_.convertInfo[i].inFormat = 0;
\r
9294 stream_.convertInfo[i].outFormat = 0;
\r
9295 stream_.convertInfo[i].inOffset.clear();
\r
9296 stream_.convertInfo[i].outOffset.clear();
\r
9300 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9302 if ( format == RTAUDIO_SINT16 )
\r
9304 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9306 else if ( format == RTAUDIO_FLOAT64 )
\r
9308 else if ( format == RTAUDIO_SINT24 )
\r
9310 else if ( format == RTAUDIO_SINT8 )
\r
9313 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9314 error( RtAudioError::WARNING );
\r
9319 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9321 if ( mode == INPUT ) { // convert device to user buffer
\r
9322 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9323 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9324 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9325 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9327 else { // convert user to device buffer
\r
9328 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9329 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9330 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9331 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9334 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9335 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9337 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9339 // Set up the interleave/deinterleave offsets.
\r
9340 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9341 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9342 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9343 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9344 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9345 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9346 stream_.convertInfo[mode].inJump = 1;
\r
9350 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9351 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9352 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9353 stream_.convertInfo[mode].outJump = 1;
\r
9357 else { // no (de)interleaving
\r
9358 if ( stream_.userInterleaved ) {
\r
9359 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9360 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9361 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9365 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9366 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9367 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9368 stream_.convertInfo[mode].inJump = 1;
\r
9369 stream_.convertInfo[mode].outJump = 1;
\r
9374 // Add channel offset.
\r
9375 if ( firstChannel > 0 ) {
\r
9376 if ( stream_.deviceInterleaved[mode] ) {
\r
9377 if ( mode == OUTPUT ) {
\r
9378 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9379 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9382 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9383 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9387 if ( mode == OUTPUT ) {
\r
9388 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9389 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9392 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9393 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9399 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9401 // This function does format conversion, input/output channel compensation, and
\r
9402 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9403 // the lower three bytes of a 32-bit integer.
\r
9405 // Clear our device buffer when in/out duplex device channels are different
\r
9406 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9407 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9408 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9411 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9413 Float64 *out = (Float64 *)outBuffer;
\r
9415 if (info.inFormat == RTAUDIO_SINT8) {
\r
9416 signed char *in = (signed char *)inBuffer;
\r
9417 scale = 1.0 / 127.5;
\r
9418 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9419 for (j=0; j<info.channels; j++) {
\r
9420 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9421 out[info.outOffset[j]] += 0.5;
\r
9422 out[info.outOffset[j]] *= scale;
\r
9424 in += info.inJump;
\r
9425 out += info.outJump;
\r
9428 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9429 Int16 *in = (Int16 *)inBuffer;
\r
9430 scale = 1.0 / 32767.5;
\r
9431 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9432 for (j=0; j<info.channels; j++) {
\r
9433 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9434 out[info.outOffset[j]] += 0.5;
\r
9435 out[info.outOffset[j]] *= scale;
\r
9437 in += info.inJump;
\r
9438 out += info.outJump;
\r
9441 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9442 Int24 *in = (Int24 *)inBuffer;
\r
9443 scale = 1.0 / 8388607.5;
\r
9444 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9445 for (j=0; j<info.channels; j++) {
\r
9446 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9447 out[info.outOffset[j]] += 0.5;
\r
9448 out[info.outOffset[j]] *= scale;
\r
9450 in += info.inJump;
\r
9451 out += info.outJump;
\r
9454 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9455 Int32 *in = (Int32 *)inBuffer;
\r
9456 scale = 1.0 / 2147483647.5;
\r
9457 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9458 for (j=0; j<info.channels; j++) {
\r
9459 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9460 out[info.outOffset[j]] += 0.5;
\r
9461 out[info.outOffset[j]] *= scale;
\r
9463 in += info.inJump;
\r
9464 out += info.outJump;
\r
9467 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9468 Float32 *in = (Float32 *)inBuffer;
\r
9469 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9470 for (j=0; j<info.channels; j++) {
\r
9471 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9473 in += info.inJump;
\r
9474 out += info.outJump;
\r
9477 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9478 // Channel compensation and/or (de)interleaving only.
\r
9479 Float64 *in = (Float64 *)inBuffer;
\r
9480 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9481 for (j=0; j<info.channels; j++) {
\r
9482 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9484 in += info.inJump;
\r
9485 out += info.outJump;
\r
9489 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9491 Float32 *out = (Float32 *)outBuffer;
\r
9493 if (info.inFormat == RTAUDIO_SINT8) {
\r
9494 signed char *in = (signed char *)inBuffer;
\r
9495 scale = (Float32) ( 1.0 / 127.5 );
\r
9496 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9497 for (j=0; j<info.channels; j++) {
\r
9498 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9499 out[info.outOffset[j]] += 0.5;
\r
9500 out[info.outOffset[j]] *= scale;
\r
9502 in += info.inJump;
\r
9503 out += info.outJump;
\r
9506 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9507 Int16 *in = (Int16 *)inBuffer;
\r
9508 scale = (Float32) ( 1.0 / 32767.5 );
\r
9509 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9510 for (j=0; j<info.channels; j++) {
\r
9511 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9512 out[info.outOffset[j]] += 0.5;
\r
9513 out[info.outOffset[j]] *= scale;
\r
9515 in += info.inJump;
\r
9516 out += info.outJump;
\r
9519 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9520 Int24 *in = (Int24 *)inBuffer;
\r
9521 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9522 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9523 for (j=0; j<info.channels; j++) {
\r
9524 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9525 out[info.outOffset[j]] += 0.5;
\r
9526 out[info.outOffset[j]] *= scale;
\r
9528 in += info.inJump;
\r
9529 out += info.outJump;
\r
9532 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9533 Int32 *in = (Int32 *)inBuffer;
\r
9534 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9535 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9536 for (j=0; j<info.channels; j++) {
\r
9537 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9538 out[info.outOffset[j]] += 0.5;
\r
9539 out[info.outOffset[j]] *= scale;
\r
9541 in += info.inJump;
\r
9542 out += info.outJump;
\r
9545 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9546 // Channel compensation and/or (de)interleaving only.
\r
9547 Float32 *in = (Float32 *)inBuffer;
\r
9548 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9549 for (j=0; j<info.channels; j++) {
\r
9550 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9552 in += info.inJump;
\r
9553 out += info.outJump;
\r
9556 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9557 Float64 *in = (Float64 *)inBuffer;
\r
9558 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9559 for (j=0; j<info.channels; j++) {
\r
9560 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9562 in += info.inJump;
\r
9563 out += info.outJump;
\r
9567 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9568 Int32 *out = (Int32 *)outBuffer;
\r
9569 if (info.inFormat == RTAUDIO_SINT8) {
\r
9570 signed char *in = (signed char *)inBuffer;
\r
9571 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9572 for (j=0; j<info.channels; j++) {
\r
9573 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9574 out[info.outOffset[j]] <<= 24;
\r
9576 in += info.inJump;
\r
9577 out += info.outJump;
\r
9580 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9581 Int16 *in = (Int16 *)inBuffer;
\r
9582 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9583 for (j=0; j<info.channels; j++) {
\r
9584 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9585 out[info.outOffset[j]] <<= 16;
\r
9587 in += info.inJump;
\r
9588 out += info.outJump;
\r
9591 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9592 Int24 *in = (Int24 *)inBuffer;
\r
9593 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9594 for (j=0; j<info.channels; j++) {
\r
9595 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9596 out[info.outOffset[j]] <<= 8;
\r
9598 in += info.inJump;
\r
9599 out += info.outJump;
\r
9602 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9603 // Channel compensation and/or (de)interleaving only.
\r
9604 Int32 *in = (Int32 *)inBuffer;
\r
9605 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9606 for (j=0; j<info.channels; j++) {
\r
9607 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9609 in += info.inJump;
\r
9610 out += info.outJump;
\r
9613 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9614 Float32 *in = (Float32 *)inBuffer;
\r
9615 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9616 for (j=0; j<info.channels; j++) {
\r
9617 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9619 in += info.inJump;
\r
9620 out += info.outJump;
\r
9623 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9624 Float64 *in = (Float64 *)inBuffer;
\r
9625 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9626 for (j=0; j<info.channels; j++) {
\r
9627 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9629 in += info.inJump;
\r
9630 out += info.outJump;
\r
9634 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9635 Int24 *out = (Int24 *)outBuffer;
\r
9636 if (info.inFormat == RTAUDIO_SINT8) {
\r
9637 signed char *in = (signed char *)inBuffer;
\r
9638 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9639 for (j=0; j<info.channels; j++) {
\r
9640 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9641 //out[info.outOffset[j]] <<= 16;
\r
9643 in += info.inJump;
\r
9644 out += info.outJump;
\r
9647 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9648 Int16 *in = (Int16 *)inBuffer;
\r
9649 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9650 for (j=0; j<info.channels; j++) {
\r
9651 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9652 //out[info.outOffset[j]] <<= 8;
\r
9654 in += info.inJump;
\r
9655 out += info.outJump;
\r
9658 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9659 // Channel compensation and/or (de)interleaving only.
\r
9660 Int24 *in = (Int24 *)inBuffer;
\r
9661 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9662 for (j=0; j<info.channels; j++) {
\r
9663 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9665 in += info.inJump;
\r
9666 out += info.outJump;
\r
9669 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9670 Int32 *in = (Int32 *)inBuffer;
\r
9671 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9672 for (j=0; j<info.channels; j++) {
\r
9673 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9674 //out[info.outOffset[j]] >>= 8;
\r
9676 in += info.inJump;
\r
9677 out += info.outJump;
\r
9680 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9681 Float32 *in = (Float32 *)inBuffer;
\r
9682 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9683 for (j=0; j<info.channels; j++) {
\r
9684 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9686 in += info.inJump;
\r
9687 out += info.outJump;
\r
9690 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9691 Float64 *in = (Float64 *)inBuffer;
\r
9692 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9693 for (j=0; j<info.channels; j++) {
\r
9694 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9696 in += info.inJump;
\r
9697 out += info.outJump;
\r
9701 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9702 Int16 *out = (Int16 *)outBuffer;
\r
9703 if (info.inFormat == RTAUDIO_SINT8) {
\r
9704 signed char *in = (signed char *)inBuffer;
\r
9705 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9706 for (j=0; j<info.channels; j++) {
\r
9707 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9708 out[info.outOffset[j]] <<= 8;
\r
9710 in += info.inJump;
\r
9711 out += info.outJump;
\r
9714 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9715 // Channel compensation and/or (de)interleaving only.
\r
9716 Int16 *in = (Int16 *)inBuffer;
\r
9717 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9718 for (j=0; j<info.channels; j++) {
\r
9719 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9721 in += info.inJump;
\r
9722 out += info.outJump;
\r
9725 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9726 Int24 *in = (Int24 *)inBuffer;
\r
9727 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9728 for (j=0; j<info.channels; j++) {
\r
9729 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9731 in += info.inJump;
\r
9732 out += info.outJump;
\r
9735 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9736 Int32 *in = (Int32 *)inBuffer;
\r
9737 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9738 for (j=0; j<info.channels; j++) {
\r
9739 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9741 in += info.inJump;
\r
9742 out += info.outJump;
\r
9745 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9746 Float32 *in = (Float32 *)inBuffer;
\r
9747 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9748 for (j=0; j<info.channels; j++) {
\r
9749 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9751 in += info.inJump;
\r
9752 out += info.outJump;
\r
9755 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9756 Float64 *in = (Float64 *)inBuffer;
\r
9757 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9758 for (j=0; j<info.channels; j++) {
\r
9759 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9761 in += info.inJump;
\r
9762 out += info.outJump;
\r
9766 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9767 signed char *out = (signed char *)outBuffer;
\r
9768 if (info.inFormat == RTAUDIO_SINT8) {
\r
9769 // Channel compensation and/or (de)interleaving only.
\r
9770 signed char *in = (signed char *)inBuffer;
\r
9771 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9772 for (j=0; j<info.channels; j++) {
\r
9773 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9775 in += info.inJump;
\r
9776 out += info.outJump;
\r
9779 if (info.inFormat == RTAUDIO_SINT16) {
\r
9780 Int16 *in = (Int16 *)inBuffer;
\r
9781 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9782 for (j=0; j<info.channels; j++) {
\r
9783 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9785 in += info.inJump;
\r
9786 out += info.outJump;
\r
9789 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9790 Int24 *in = (Int24 *)inBuffer;
\r
9791 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9792 for (j=0; j<info.channels; j++) {
\r
9793 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
9795 in += info.inJump;
\r
9796 out += info.outJump;
\r
9799 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9800 Int32 *in = (Int32 *)inBuffer;
\r
9801 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9802 for (j=0; j<info.channels; j++) {
\r
9803 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
9805 in += info.inJump;
\r
9806 out += info.outJump;
\r
9809 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9810 Float32 *in = (Float32 *)inBuffer;
\r
9811 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9812 for (j=0; j<info.channels; j++) {
\r
9813 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
9815 in += info.inJump;
\r
9816 out += info.outJump;
\r
9819 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9820 Float64 *in = (Float64 *)inBuffer;
\r
9821 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9822 for (j=0; j<info.channels; j++) {
\r
9823 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
9825 in += info.inJump;
\r
9826 out += info.outJump;
\r
9832 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
9833 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
9834 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
9836 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
9838 register char val;
\r
9839 register char *ptr;
\r
9842 if ( format == RTAUDIO_SINT16 ) {
\r
9843 for ( unsigned int i=0; i<samples; i++ ) {
\r
9844 // Swap 1st and 2nd bytes.
\r
9846 *(ptr) = *(ptr+1);
\r
9849 // Increment 2 bytes.
\r
9853 else if ( format == RTAUDIO_SINT32 ||
\r
9854 format == RTAUDIO_FLOAT32 ) {
\r
9855 for ( unsigned int i=0; i<samples; i++ ) {
\r
9856 // Swap 1st and 4th bytes.
\r
9858 *(ptr) = *(ptr+3);
\r
9861 // Swap 2nd and 3rd bytes.
\r
9864 *(ptr) = *(ptr+1);
\r
9867 // Increment 3 more bytes.
\r
9871 else if ( format == RTAUDIO_SINT24 ) {
\r
9872 for ( unsigned int i=0; i<samples; i++ ) {
\r
9873 // Swap 1st and 3rd bytes.
\r
9875 *(ptr) = *(ptr+2);
\r
9878 // Increment 2 more bytes.
\r
9882 else if ( format == RTAUDIO_FLOAT64 ) {
\r
9883 for ( unsigned int i=0; i<samples; i++ ) {
\r
9884 // Swap 1st and 8th bytes
\r
9886 *(ptr) = *(ptr+7);
\r
9889 // Swap 2nd and 7th bytes
\r
9892 *(ptr) = *(ptr+5);
\r
9895 // Swap 3rd and 6th bytes
\r
9898 *(ptr) = *(ptr+3);
\r
9901 // Swap 4th and 5th bytes
\r
9904 *(ptr) = *(ptr+1);
\r
9907 // Increment 5 more bytes.
\r
9913 // Indentation settings for Vim and Emacs
\r
9915 // Local Variables:
\r
9916 // c-basic-offset: 2
\r
9917 // indent-tabs-mode: nil
\r
9920 // vim: et sts=2 sw=2
\r