1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 if ( oParams && oParams->nChannels < 1 ) {
\r
253 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
254 error( RtAudioError::INVALID_USE );
\r
258 if ( iParams && iParams->nChannels < 1 ) {
\r
259 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
260 error( RtAudioError::INVALID_USE );
\r
264 if ( oParams == NULL && iParams == NULL ) {
\r
265 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
266 error( RtAudioError::INVALID_USE );
\r
270 if ( formatBytes(format) == 0 ) {
\r
271 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
272 error( RtAudioError::INVALID_USE );
\r
276 unsigned int nDevices = getDeviceCount();
\r
277 unsigned int oChannels = 0;
\r
279 oChannels = oParams->nChannels;
\r
280 if ( oParams->deviceId >= nDevices ) {
\r
281 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
282 error( RtAudioError::INVALID_USE );
\r
287 unsigned int iChannels = 0;
\r
289 iChannels = iParams->nChannels;
\r
290 if ( iParams->deviceId >= nDevices ) {
\r
291 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
292 error( RtAudioError::INVALID_USE );
\r
300 if ( oChannels > 0 ) {
\r
302 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
303 sampleRate, format, bufferFrames, options );
\r
304 if ( result == false ) {
\r
305 error( RtAudioError::SYSTEM_ERROR );
\r
310 if ( iChannels > 0 ) {
\r
312 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
313 sampleRate, format, bufferFrames, options );
\r
314 if ( result == false ) {
\r
315 if ( oChannels > 0 ) closeStream();
\r
316 error( RtAudioError::SYSTEM_ERROR );
\r
321 stream_.callbackInfo.callback = (void *) callback;
\r
322 stream_.callbackInfo.userData = userData;
\r
323 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
325 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
326 stream_.state = STREAM_STOPPED;
\r
329 unsigned int RtApi :: getDefaultInputDevice( void )
\r
331 // Should be implemented in subclasses if possible.
\r
335 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
337 // Should be implemented in subclasses if possible.
\r
341 void RtApi :: closeStream( void )
\r
343 // MUST be implemented in subclasses!
\r
347 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
348 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
349 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
350 RtAudio::StreamOptions * /*options*/ )
\r
352 // MUST be implemented in subclasses!
\r
356 void RtApi :: tickStreamTime( void )
\r
358 // Subclasses that do not provide their own implementation of
\r
359 // getStreamTime should call this function once per buffer I/O to
\r
360 // provide basic stream time support.
\r
362 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
364 #if defined( HAVE_GETTIMEOFDAY )
\r
365 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
369 long RtApi :: getStreamLatency( void )
\r
373 long totalLatency = 0;
\r
374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
375 totalLatency = stream_.latency[0];
\r
376 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
377 totalLatency += stream_.latency[1];
\r
379 return totalLatency;
\r
382 double RtApi :: getStreamTime( void )
\r
386 #if defined( HAVE_GETTIMEOFDAY )
\r
387 // Return a very accurate estimate of the stream time by
\r
388 // adding in the elapsed time since the last tick.
\r
389 struct timeval then;
\r
390 struct timeval now;
\r
392 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
393 return stream_.streamTime;
\r
395 gettimeofday( &now, NULL );
\r
396 then = stream_.lastTickTimestamp;
\r
397 return stream_.streamTime +
\r
398 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
399 (then.tv_sec + 0.000001 * then.tv_usec));
\r
401 return stream_.streamTime;
\r
405 unsigned int RtApi :: getStreamSampleRate( void )
\r
409 return stream_.sampleRate;
\r
413 // *************************************************** //
\r
415 // OS/API-specific methods.
\r
417 // *************************************************** //
\r
419 #if defined(__MACOSX_CORE__)
\r
421 // The OS X CoreAudio API is designed to use a separate callback
\r
422 // procedure for each of its audio devices. A single RtAudio duplex
\r
423 // stream using two different devices is supported here, though it
\r
424 // cannot be guaranteed to always behave correctly because we cannot
\r
425 // synchronize these two callbacks.
\r
427 // A property listener is installed for over/underrun information.
\r
428 // However, no functionality is currently provided to allow property
\r
429 // listeners to trigger user handlers because it is unclear what could
\r
430 // be done if a critical stream parameter (buffer size, sample rate,
\r
431 // device disconnect) notification arrived. The listeners entail
\r
432 // quite a bit of extra code and most likely, a user program wouldn't
\r
433 // be prepared for the result anyway. However, we do provide a flag
\r
434 // to the client callback function to inform of an over/underrun.
\r
436 // A structure to hold various information related to the CoreAudio API
\r
438 struct CoreHandle {
\r
439 AudioDeviceID id[2]; // device ids
\r
440 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
441 AudioDeviceIOProcID procId[2];
\r
443 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
444 UInt32 nStreams[2]; // number of streams to use
\r
446 char *deviceBuffer;
\r
447 pthread_cond_t condition;
\r
448 int drainCounter; // Tracks callback counts when draining
\r
449 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
452 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
455 RtApiCore:: RtApiCore()
\r
457 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
458 // This is a largely undocumented but absolutely necessary
\r
459 // requirement starting with OS-X 10.6. If not called, queries and
\r
460 // updates to various audio device properties are not handled
\r
462 CFRunLoopRef theRunLoop = NULL;
\r
463 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
464 kAudioObjectPropertyScopeGlobal,
\r
465 kAudioObjectPropertyElementMaster };
\r
466 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
467 if ( result != noErr ) {
\r
468 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
469 error( RtAudioError::WARNING );
\r
474 RtApiCore :: ~RtApiCore()
\r
476 // The subclass destructor gets called before the base class
\r
477 // destructor, so close an existing stream before deallocating
\r
478 // apiDeviceId memory.
\r
479 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
482 unsigned int RtApiCore :: getDeviceCount( void )
\r
484 // Find out how many audio devices there are, if any.
\r
486 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
487 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
488 if ( result != noErr ) {
\r
489 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
490 error( RtAudioError::WARNING );
\r
494 return dataSize / sizeof( AudioDeviceID );
\r
497 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
499 unsigned int nDevices = getDeviceCount();
\r
500 if ( nDevices <= 1 ) return 0;
\r
503 UInt32 dataSize = sizeof( AudioDeviceID );
\r
504 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
505 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
506 if ( result != noErr ) {
\r
507 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
508 error( RtAudioError::WARNING );
\r
512 dataSize *= nDevices;
\r
513 AudioDeviceID deviceList[ nDevices ];
\r
514 property.mSelector = kAudioHardwarePropertyDevices;
\r
515 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
516 if ( result != noErr ) {
\r
517 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
518 error( RtAudioError::WARNING );
\r
522 for ( unsigned int i=0; i<nDevices; i++ )
\r
523 if ( id == deviceList[i] ) return i;
\r
525 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
526 error( RtAudioError::WARNING );
\r
530 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
532 unsigned int nDevices = getDeviceCount();
\r
533 if ( nDevices <= 1 ) return 0;
\r
536 UInt32 dataSize = sizeof( AudioDeviceID );
\r
537 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
538 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
539 if ( result != noErr ) {
\r
540 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
541 error( RtAudioError::WARNING );
\r
545 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
546 AudioDeviceID deviceList[ nDevices ];
\r
547 property.mSelector = kAudioHardwarePropertyDevices;
\r
548 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
549 if ( result != noErr ) {
\r
550 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
551 error( RtAudioError::WARNING );
\r
555 for ( unsigned int i=0; i<nDevices; i++ )
\r
556 if ( id == deviceList[i] ) return i;
\r
558 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
559 error( RtAudioError::WARNING );
\r
563 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
565 RtAudio::DeviceInfo info;
\r
566 info.probed = false;
\r
569 unsigned int nDevices = getDeviceCount();
\r
570 if ( nDevices == 0 ) {
\r
571 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
572 error( RtAudioError::INVALID_USE );
\r
576 if ( device >= nDevices ) {
\r
577 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
578 error( RtAudioError::INVALID_USE );
\r
582 AudioDeviceID deviceList[ nDevices ];
\r
583 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
584 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
585 kAudioObjectPropertyScopeGlobal,
\r
586 kAudioObjectPropertyElementMaster };
\r
587 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
588 0, NULL, &dataSize, (void *) &deviceList );
\r
589 if ( result != noErr ) {
\r
590 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
591 error( RtAudioError::WARNING );
\r
595 AudioDeviceID id = deviceList[ device ];
\r
597 // Get the device name.
\r
599 CFStringRef cfname;
\r
600 dataSize = sizeof( CFStringRef );
\r
601 property.mSelector = kAudioObjectPropertyManufacturer;
\r
602 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
603 if ( result != noErr ) {
\r
604 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
605 errorText_ = errorStream_.str();
\r
606 error( RtAudioError::WARNING );
\r
610 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
611 int length = CFStringGetLength(cfname);
\r
612 char *mname = (char *)malloc(length * 3 + 1);
\r
613 #if defined( UNICODE ) || defined( _UNICODE )
\r
614 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
618 info.name.append( (const char *)mname, strlen(mname) );
\r
619 info.name.append( ": " );
\r
620 CFRelease( cfname );
\r
623 property.mSelector = kAudioObjectPropertyName;
\r
624 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
625 if ( result != noErr ) {
\r
626 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
627 errorText_ = errorStream_.str();
\r
628 error( RtAudioError::WARNING );
\r
632 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
633 length = CFStringGetLength(cfname);
\r
634 char *name = (char *)malloc(length * 3 + 1);
\r
635 #if defined( UNICODE ) || defined( _UNICODE )
\r
636 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
640 info.name.append( (const char *)name, strlen(name) );
\r
641 CFRelease( cfname );
\r
644 // Get the output stream "configuration".
\r
645 AudioBufferList *bufferList = nil;
\r
646 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
647 property.mScope = kAudioDevicePropertyScopeOutput;
\r
648 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
650 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
651 if ( result != noErr || dataSize == 0 ) {
\r
652 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
653 errorText_ = errorStream_.str();
\r
654 error( RtAudioError::WARNING );
\r
658 // Allocate the AudioBufferList.
\r
659 bufferList = (AudioBufferList *) malloc( dataSize );
\r
660 if ( bufferList == NULL ) {
\r
661 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
662 error( RtAudioError::WARNING );
\r
666 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
667 if ( result != noErr || dataSize == 0 ) {
\r
668 free( bufferList );
\r
669 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
670 errorText_ = errorStream_.str();
\r
671 error( RtAudioError::WARNING );
\r
675 // Get output channel information.
\r
676 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
677 for ( i=0; i<nStreams; i++ )
\r
678 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
679 free( bufferList );
\r
681 // Get the input stream "configuration".
\r
682 property.mScope = kAudioDevicePropertyScopeInput;
\r
683 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
684 if ( result != noErr || dataSize == 0 ) {
\r
685 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
686 errorText_ = errorStream_.str();
\r
687 error( RtAudioError::WARNING );
\r
691 // Allocate the AudioBufferList.
\r
692 bufferList = (AudioBufferList *) malloc( dataSize );
\r
693 if ( bufferList == NULL ) {
\r
694 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
695 error( RtAudioError::WARNING );
\r
699 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
700 if (result != noErr || dataSize == 0) {
\r
701 free( bufferList );
\r
702 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
703 errorText_ = errorStream_.str();
\r
704 error( RtAudioError::WARNING );
\r
708 // Get input channel information.
\r
709 nStreams = bufferList->mNumberBuffers;
\r
710 for ( i=0; i<nStreams; i++ )
\r
711 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
712 free( bufferList );
\r
714 // If device opens for both playback and capture, we determine the channels.
\r
715 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
716 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
718 // Probe the device sample rates.
\r
719 bool isInput = false;
\r
720 if ( info.outputChannels == 0 ) isInput = true;
\r
722 // Determine the supported sample rates.
\r
723 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
724 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
725 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
726 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
727 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
728 errorText_ = errorStream_.str();
\r
729 error( RtAudioError::WARNING );
\r
733 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
734 AudioValueRange rangeList[ nRanges ];
\r
735 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
736 if ( result != kAudioHardwareNoError ) {
\r
737 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
738 errorText_ = errorStream_.str();
\r
739 error( RtAudioError::WARNING );
\r
743 // The sample rate reporting mechanism is a bit of a mystery. It
\r
744 // seems that it can either return individual rates or a range of
\r
745 // rates. I assume that if the min / max range values are the same,
\r
746 // then that represents a single supported rate and if the min / max
\r
747 // range values are different, the device supports an arbitrary
\r
748 // range of values (though there might be multiple ranges, so we'll
\r
749 // use the most conservative range).
\r
750 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
751 bool haveValueRange = false;
\r
752 info.sampleRates.clear();
\r
753 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
754 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
755 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
757 haveValueRange = true;
\r
758 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
759 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
763 if ( haveValueRange ) {
\r
764 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
765 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
766 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
770 // Sort and remove any redundant values
\r
771 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
772 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
774 if ( info.sampleRates.size() == 0 ) {
\r
775 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
776 errorText_ = errorStream_.str();
\r
777 error( RtAudioError::WARNING );
\r
781 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
782 // Thus, any other "physical" formats supported by the device are of
\r
783 // no interest to the client.
\r
784 info.nativeFormats = RTAUDIO_FLOAT32;
\r
786 if ( info.outputChannels > 0 )
\r
787 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
788 if ( info.inputChannels > 0 )
\r
789 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
791 info.probed = true;
\r
795 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
796 const AudioTimeStamp* /*inNow*/,
\r
797 const AudioBufferList* inInputData,
\r
798 const AudioTimeStamp* /*inInputTime*/,
\r
799 AudioBufferList* outOutputData,
\r
800 const AudioTimeStamp* /*inOutputTime*/,
\r
801 void* infoPointer )
\r
803 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
805 RtApiCore *object = (RtApiCore *) info->object;
\r
806 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
807 return kAudioHardwareUnspecifiedError;
\r
809 return kAudioHardwareNoError;
\r
812 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
814 const AudioObjectPropertyAddress properties[],
\r
815 void* handlePointer )
\r
817 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
818 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
819 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
820 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
821 handle->xrun[1] = true;
\r
823 handle->xrun[0] = true;
\r
827 return kAudioHardwareNoError;
\r
830 static OSStatus rateListener( AudioObjectID inDevice,
\r
831 UInt32 /*nAddresses*/,
\r
832 const AudioObjectPropertyAddress /*properties*/[],
\r
833 void* ratePointer )
\r
835 Float64 *rate = (Float64 *) ratePointer;
\r
836 UInt32 dataSize = sizeof( Float64 );
\r
837 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
838 kAudioObjectPropertyScopeGlobal,
\r
839 kAudioObjectPropertyElementMaster };
\r
840 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
841 return kAudioHardwareNoError;
\r
844 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
845 unsigned int firstChannel, unsigned int sampleRate,
\r
846 RtAudioFormat format, unsigned int *bufferSize,
\r
847 RtAudio::StreamOptions *options )
\r
850 unsigned int nDevices = getDeviceCount();
\r
851 if ( nDevices == 0 ) {
\r
852 // This should not happen because a check is made before this function is called.
\r
853 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
857 if ( device >= nDevices ) {
\r
858 // This should not happen because a check is made before this function is called.
\r
859 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
863 AudioDeviceID deviceList[ nDevices ];
\r
864 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
865 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
866 kAudioObjectPropertyScopeGlobal,
\r
867 kAudioObjectPropertyElementMaster };
\r
868 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
869 0, NULL, &dataSize, (void *) &deviceList );
\r
870 if ( result != noErr ) {
\r
871 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
875 AudioDeviceID id = deviceList[ device ];
\r
877 // Setup for stream mode.
\r
878 bool isInput = false;
\r
879 if ( mode == INPUT ) {
\r
881 property.mScope = kAudioDevicePropertyScopeInput;
\r
884 property.mScope = kAudioDevicePropertyScopeOutput;
\r
886 // Get the stream "configuration".
\r
887 AudioBufferList *bufferList = nil;
\r
889 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
890 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
891 if ( result != noErr || dataSize == 0 ) {
\r
892 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
893 errorText_ = errorStream_.str();
\r
897 // Allocate the AudioBufferList.
\r
898 bufferList = (AudioBufferList *) malloc( dataSize );
\r
899 if ( bufferList == NULL ) {
\r
900 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
904 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
905 if (result != noErr || dataSize == 0) {
\r
906 free( bufferList );
\r
907 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
908 errorText_ = errorStream_.str();
\r
912 // Search for one or more streams that contain the desired number of
\r
913 // channels. CoreAudio devices can have an arbitrary number of
\r
914 // streams and each stream can have an arbitrary number of channels.
\r
915 // For each stream, a single buffer of interleaved samples is
\r
916 // provided. RtAudio prefers the use of one stream of interleaved
\r
917 // data or multiple consecutive single-channel streams. However, we
\r
918 // now support multiple consecutive multi-channel streams of
\r
919 // interleaved data as well.
\r
920 UInt32 iStream, offsetCounter = firstChannel;
\r
921 UInt32 nStreams = bufferList->mNumberBuffers;
\r
922 bool monoMode = false;
\r
923 bool foundStream = false;
\r
925 // First check that the device supports the requested number of
\r
927 UInt32 deviceChannels = 0;
\r
928 for ( iStream=0; iStream<nStreams; iStream++ )
\r
929 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
931 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
932 free( bufferList );
\r
933 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
934 errorText_ = errorStream_.str();
\r
938 // Look for a single stream meeting our needs.
\r
939 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
940 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
941 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
942 if ( streamChannels >= channels + offsetCounter ) {
\r
943 firstStream = iStream;
\r
944 channelOffset = offsetCounter;
\r
945 foundStream = true;
\r
948 if ( streamChannels > offsetCounter ) break;
\r
949 offsetCounter -= streamChannels;
\r
952 // If we didn't find a single stream above, then we should be able
\r
953 // to meet the channel specification with multiple streams.
\r
954 if ( foundStream == false ) {
\r
956 offsetCounter = firstChannel;
\r
957 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
958 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
959 if ( streamChannels > offsetCounter ) break;
\r
960 offsetCounter -= streamChannels;
\r
963 firstStream = iStream;
\r
964 channelOffset = offsetCounter;
\r
965 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
967 if ( streamChannels > 1 ) monoMode = false;
\r
968 while ( channelCounter > 0 ) {
\r
969 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
970 if ( streamChannels > 1 ) monoMode = false;
\r
971 channelCounter -= streamChannels;
\r
976 free( bufferList );
\r
978 // Determine the buffer size.
\r
979 AudioValueRange bufferRange;
\r
980 dataSize = sizeof( AudioValueRange );
\r
981 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
982 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
984 if ( result != noErr ) {
\r
985 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
986 errorText_ = errorStream_.str();
\r
990 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
991 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
992 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
994 // Set the buffer size. For multiple streams, I'm assuming we only
\r
995 // need to make this setting for the master channel.
\r
996 UInt32 theSize = (UInt32) *bufferSize;
\r
997 dataSize = sizeof( UInt32 );
\r
998 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
999 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1001 if ( result != noErr ) {
\r
1002 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1003 errorText_ = errorStream_.str();
\r
1007 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1008 // MUST be the same in both directions!
\r
1009 *bufferSize = theSize;
\r
1010 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1011 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1012 errorText_ = errorStream_.str();
\r
1016 stream_.bufferSize = *bufferSize;
\r
1017 stream_.nBuffers = 1;
\r
1019 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1020 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1022 dataSize = sizeof( hog_pid );
\r
1023 property.mSelector = kAudioDevicePropertyHogMode;
\r
1024 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1025 if ( result != noErr ) {
\r
1026 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1027 errorText_ = errorStream_.str();
\r
1031 if ( hog_pid != getpid() ) {
\r
1032 hog_pid = getpid();
\r
1033 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1034 if ( result != noErr ) {
\r
1035 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1036 errorText_ = errorStream_.str();
\r
1042 // Check and if necessary, change the sample rate for the device.
\r
1043 Float64 nominalRate;
\r
1044 dataSize = sizeof( Float64 );
\r
1045 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1046 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1047 if ( result != noErr ) {
\r
1048 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1049 errorText_ = errorStream_.str();
\r
1053 // Only change the sample rate if off by more than 1 Hz.
\r
1054 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1056 // Set a property listener for the sample rate change
\r
1057 Float64 reportedRate = 0.0;
\r
1058 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1059 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1060 if ( result != noErr ) {
\r
1061 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1062 errorText_ = errorStream_.str();
\r
1066 nominalRate = (Float64) sampleRate;
\r
1067 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1068 if ( result != noErr ) {
\r
1069 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1070 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1071 errorText_ = errorStream_.str();
\r
1075 // Now wait until the reported nominal rate is what we just set.
\r
1076 UInt32 microCounter = 0;
\r
1077 while ( reportedRate != nominalRate ) {
\r
1078 microCounter += 5000;
\r
1079 if ( microCounter > 5000000 ) break;
\r
1083 // Remove the property listener.
\r
1084 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1086 if ( microCounter > 5000000 ) {
\r
1087 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1088 errorText_ = errorStream_.str();
\r
1093 // Now set the stream format for all streams. Also, check the
\r
1094 // physical format of the device and change that if necessary.
\r
1095 AudioStreamBasicDescription description;
\r
1096 dataSize = sizeof( AudioStreamBasicDescription );
\r
1097 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1098 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1099 if ( result != noErr ) {
\r
1100 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1101 errorText_ = errorStream_.str();
\r
1105 // Set the sample rate and data format id. However, only make the
\r
1106 // change if the sample rate is not within 1.0 of the desired
\r
1107 // rate and the format is not linear pcm.
\r
1108 bool updateFormat = false;
\r
1109 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1110 description.mSampleRate = (Float64) sampleRate;
\r
1111 updateFormat = true;
\r
1114 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1115 description.mFormatID = kAudioFormatLinearPCM;
\r
1116 updateFormat = true;
\r
1119 if ( updateFormat ) {
\r
1120 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1121 if ( result != noErr ) {
\r
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1123 errorText_ = errorStream_.str();
\r
1128 // Now check the physical format.
\r
1129 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1130 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1131 if ( result != noErr ) {
\r
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1133 errorText_ = errorStream_.str();
\r
1137 //std::cout << "Current physical stream format:" << std::endl;
\r
1138 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1139 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1140 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1141 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1143 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1144 description.mFormatID = kAudioFormatLinearPCM;
\r
1145 //description.mSampleRate = (Float64) sampleRate;
\r
1146 AudioStreamBasicDescription testDescription = description;
\r
1147 UInt32 formatFlags;
\r
1149 // We'll try higher bit rates first and then work our way down.
\r
1150 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1151 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1152 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1156 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1158 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1160 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1162 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1164 bool setPhysicalFormat = false;
\r
1165 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1166 testDescription = description;
\r
1167 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1168 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1169 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1170 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1172 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1173 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1174 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1175 if ( result == noErr ) {
\r
1176 setPhysicalFormat = true;
\r
1177 //std::cout << "Updated physical stream format:" << std::endl;
\r
1178 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1179 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1180 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1181 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1186 if ( !setPhysicalFormat ) {
\r
1187 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1188 errorText_ = errorStream_.str();
\r
1191 } // done setting virtual/physical formats.
\r
1193 // Get the stream / device latency.
\r
1195 dataSize = sizeof( UInt32 );
\r
1196 property.mSelector = kAudioDevicePropertyLatency;
\r
1197 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1198 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1199 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1201 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1202 errorText_ = errorStream_.str();
\r
1203 error( RtAudioError::WARNING );
\r
1207 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1208 // always be presented in native-endian format, so we should never
\r
1209 // need to byte swap.
\r
1210 stream_.doByteSwap[mode] = false;
\r
1212 // From the CoreAudio documentation, PCM data must be supplied as
\r
1214 stream_.userFormat = format;
\r
1215 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1217 if ( streamCount == 1 )
\r
1218 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1219 else // multiple streams
\r
1220 stream_.nDeviceChannels[mode] = channels;
\r
1221 stream_.nUserChannels[mode] = channels;
\r
1222 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1223 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1224 else stream_.userInterleaved = true;
\r
1225 stream_.deviceInterleaved[mode] = true;
\r
1226 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1228 // Set flags for buffer conversion.
\r
1229 stream_.doConvertBuffer[mode] = false;
\r
1230 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1231 stream_.doConvertBuffer[mode] = true;
\r
1232 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( streamCount == 1 ) {
\r
1235 if ( stream_.nUserChannels[mode] > 1 &&
\r
1236 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1237 stream_.doConvertBuffer[mode] = true;
\r
1239 else if ( monoMode && stream_.userInterleaved )
\r
1240 stream_.doConvertBuffer[mode] = true;
\r
1242 // Allocate our CoreHandle structure for the stream.
\r
1243 CoreHandle *handle = 0;
\r
1244 if ( stream_.apiHandle == 0 ) {
\r
1246 handle = new CoreHandle;
\r
1248 catch ( std::bad_alloc& ) {
\r
1249 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1253 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1254 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1257 stream_.apiHandle = (void *) handle;
\r
1260 handle = (CoreHandle *) stream_.apiHandle;
\r
1261 handle->iStream[mode] = firstStream;
\r
1262 handle->nStreams[mode] = streamCount;
\r
1263 handle->id[mode] = id;
\r
1265 // Allocate necessary internal buffers.
\r
1266 unsigned long bufferBytes;
\r
1267 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1268 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1269 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1270 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1271 if ( stream_.userBuffer[mode] == NULL ) {
\r
1272 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1276 // If possible, we will make use of the CoreAudio stream buffers as
\r
1277 // "device buffers". However, we can't do this if using multiple
\r
1279 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1281 bool makeBuffer = true;
\r
1282 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1283 if ( mode == INPUT ) {
\r
1284 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1285 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1286 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1290 if ( makeBuffer ) {
\r
1291 bufferBytes *= *bufferSize;
\r
1292 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1293 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1294 if ( stream_.deviceBuffer == NULL ) {
\r
1295 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1301 stream_.sampleRate = sampleRate;
\r
1302 stream_.device[mode] = device;
\r
1303 stream_.state = STREAM_STOPPED;
\r
1304 stream_.callbackInfo.object = (void *) this;
\r
1306 // Setup the buffer conversion information structure.
\r
1307 if ( stream_.doConvertBuffer[mode] ) {
\r
1308 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1309 else setConvertInfo( mode, channelOffset );
\r
1312 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1313 // Only one callback procedure per device.
\r
1314 stream_.mode = DUPLEX;
\r
1316 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1317 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1319 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1320 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1322 if ( result != noErr ) {
\r
1323 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1324 errorText_ = errorStream_.str();
\r
1327 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1328 stream_.mode = DUPLEX;
\r
1330 stream_.mode = mode;
\r
1333 // Setup the device property listener for over/underload.
\r
1334 property.mSelector = kAudioDeviceProcessorOverload;
\r
1335 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1336 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1342 pthread_cond_destroy( &handle->condition );
\r
1344 stream_.apiHandle = 0;
\r
1347 for ( int i=0; i<2; i++ ) {
\r
1348 if ( stream_.userBuffer[i] ) {
\r
1349 free( stream_.userBuffer[i] );
\r
1350 stream_.userBuffer[i] = 0;
\r
1354 if ( stream_.deviceBuffer ) {
\r
1355 free( stream_.deviceBuffer );
\r
1356 stream_.deviceBuffer = 0;
\r
1359 stream_.state = STREAM_CLOSED;
\r
1363 void RtApiCore :: closeStream( void )
\r
1365 if ( stream_.state == STREAM_CLOSED ) {
\r
1366 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1367 error( RtAudioError::WARNING );
\r
1371 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1372 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1373 if ( stream_.state == STREAM_RUNNING )
\r
1374 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1375 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1376 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1378 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1379 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1383 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1384 if ( stream_.state == STREAM_RUNNING )
\r
1385 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1386 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1387 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1389 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1390 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1394 for ( int i=0; i<2; i++ ) {
\r
1395 if ( stream_.userBuffer[i] ) {
\r
1396 free( stream_.userBuffer[i] );
\r
1397 stream_.userBuffer[i] = 0;
\r
1401 if ( stream_.deviceBuffer ) {
\r
1402 free( stream_.deviceBuffer );
\r
1403 stream_.deviceBuffer = 0;
\r
1406 // Destroy pthread condition variable.
\r
1407 pthread_cond_destroy( &handle->condition );
\r
1409 stream_.apiHandle = 0;
\r
1411 stream_.mode = UNINITIALIZED;
\r
1412 stream_.state = STREAM_CLOSED;
\r
1415 void RtApiCore :: startStream( void )
\r
1418 if ( stream_.state == STREAM_RUNNING ) {
\r
1419 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1420 error( RtAudioError::WARNING );
\r
1424 OSStatus result = noErr;
\r
1425 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1426 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1428 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1429 if ( result != noErr ) {
\r
1430 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1431 errorText_ = errorStream_.str();
\r
1436 if ( stream_.mode == INPUT ||
\r
1437 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1439 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1440 if ( result != noErr ) {
\r
1441 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1442 errorText_ = errorStream_.str();
\r
1447 handle->drainCounter = 0;
\r
1448 handle->internalDrain = false;
\r
1449 stream_.state = STREAM_RUNNING;
\r
1452 if ( result == noErr ) return;
\r
1453 error( RtAudioError::SYSTEM_ERROR );
\r
1456 void RtApiCore :: stopStream( void )
\r
1459 if ( stream_.state == STREAM_STOPPED ) {
\r
1460 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1461 error( RtAudioError::WARNING );
\r
1465 OSStatus result = noErr;
\r
1466 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1467 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1469 if ( handle->drainCounter == 0 ) {
\r
1470 handle->drainCounter = 2;
\r
1471 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1474 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1475 if ( result != noErr ) {
\r
1476 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1477 errorText_ = errorStream_.str();
\r
1482 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1484 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1485 if ( result != noErr ) {
\r
1486 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1487 errorText_ = errorStream_.str();
\r
1492 stream_.state = STREAM_STOPPED;
\r
1495 if ( result == noErr ) return;
\r
1496 error( RtAudioError::SYSTEM_ERROR );
\r
1499 void RtApiCore :: abortStream( void )
\r
1502 if ( stream_.state == STREAM_STOPPED ) {
\r
1503 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1504 error( RtAudioError::WARNING );
\r
1508 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1509 handle->drainCounter = 2;
\r
1514 // This function will be called by a spawned thread when the user
\r
1515 // callback function signals that the stream should be stopped or
\r
1516 // aborted. It is better to handle it this way because the
\r
1517 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1518 // function is called.
\r
1519 static void *coreStopStream( void *ptr )
\r
1521 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1522 RtApiCore *object = (RtApiCore *) info->object;
\r
1524 object->stopStream();
\r
1525 pthread_exit( NULL );
\r
1528 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1529 const AudioBufferList *inBufferList,
\r
1530 const AudioBufferList *outBufferList )
\r
1532 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1533 if ( stream_.state == STREAM_CLOSED ) {
\r
1534 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1535 error( RtAudioError::WARNING );
\r
1539 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1540 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1542 // Check if we were draining the stream and signal is finished.
\r
1543 if ( handle->drainCounter > 3 ) {
\r
1544 ThreadHandle threadId;
\r
1546 stream_.state = STREAM_STOPPING;
\r
1547 if ( handle->internalDrain == true )
\r
1548 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1549 else // external call to stopStream()
\r
1550 pthread_cond_signal( &handle->condition );
\r
1554 AudioDeviceID outputDevice = handle->id[0];
\r
1556 // Invoke user callback to get fresh output data UNLESS we are
\r
1557 // draining stream or duplex mode AND the input/output devices are
\r
1558 // different AND this function is called for the input device.
\r
1559 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1560 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1561 double streamTime = getStreamTime();
\r
1562 RtAudioStreamStatus status = 0;
\r
1563 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1564 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1565 handle->xrun[0] = false;
\r
1567 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1568 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1569 handle->xrun[1] = false;
\r
1572 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1573 stream_.bufferSize, streamTime, status, info->userData );
\r
1574 if ( cbReturnValue == 2 ) {
\r
1575 stream_.state = STREAM_STOPPING;
\r
1576 handle->drainCounter = 2;
\r
1580 else if ( cbReturnValue == 1 ) {
\r
1581 handle->drainCounter = 1;
\r
1582 handle->internalDrain = true;
\r
1586 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1588 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1590 if ( handle->nStreams[0] == 1 ) {
\r
1591 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1593 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1595 else { // fill multiple streams with zeros
\r
1596 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1597 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1599 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1603 else if ( handle->nStreams[0] == 1 ) {
\r
1604 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1605 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1606 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1608 else { // copy from user buffer
\r
1609 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1610 stream_.userBuffer[0],
\r
1611 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1614 else { // fill multiple streams
\r
1615 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1616 if ( stream_.doConvertBuffer[0] ) {
\r
1617 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1618 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1621 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1622 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1623 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1624 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1625 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1628 else { // fill multiple multi-channel streams with interleaved data
\r
1629 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1630 Float32 *out, *in;
\r
1632 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1633 UInt32 inChannels = stream_.nUserChannels[0];
\r
1634 if ( stream_.doConvertBuffer[0] ) {
\r
1635 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1636 inChannels = stream_.nDeviceChannels[0];
\r
1639 if ( inInterleaved ) inOffset = 1;
\r
1640 else inOffset = stream_.bufferSize;
\r
1642 channelsLeft = inChannels;
\r
1643 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1645 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1646 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1649 // Account for possible channel offset in first stream
\r
1650 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1651 streamChannels -= stream_.channelOffset[0];
\r
1652 outJump = stream_.channelOffset[0];
\r
1656 // Account for possible unfilled channels at end of the last stream
\r
1657 if ( streamChannels > channelsLeft ) {
\r
1658 outJump = streamChannels - channelsLeft;
\r
1659 streamChannels = channelsLeft;
\r
1662 // Determine input buffer offsets and skips
\r
1663 if ( inInterleaved ) {
\r
1664 inJump = inChannels;
\r
1665 in += inChannels - channelsLeft;
\r
1669 in += (inChannels - channelsLeft) * inOffset;
\r
1672 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1673 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1674 *out++ = in[j*inOffset];
\r
1679 channelsLeft -= streamChannels;
\r
1684 if ( handle->drainCounter ) {
\r
1685 handle->drainCounter++;
\r
1690 AudioDeviceID inputDevice;
\r
1691 inputDevice = handle->id[1];
\r
1692 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1694 if ( handle->nStreams[1] == 1 ) {
\r
1695 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1696 convertBuffer( stream_.userBuffer[1],
\r
1697 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1698 stream_.convertInfo[1] );
\r
1700 else { // copy to user buffer
\r
1701 memcpy( stream_.userBuffer[1],
\r
1702 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1703 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1706 else { // read from multiple streams
\r
1707 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1708 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1710 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1711 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1712 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1713 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1714 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1717 else { // read from multiple multi-channel streams
\r
1718 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1719 Float32 *out, *in;
\r
1721 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1722 UInt32 outChannels = stream_.nUserChannels[1];
\r
1723 if ( stream_.doConvertBuffer[1] ) {
\r
1724 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1725 outChannels = stream_.nDeviceChannels[1];
\r
1728 if ( outInterleaved ) outOffset = 1;
\r
1729 else outOffset = stream_.bufferSize;
\r
1731 channelsLeft = outChannels;
\r
1732 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1734 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1735 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1738 // Account for possible channel offset in first stream
\r
1739 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1740 streamChannels -= stream_.channelOffset[1];
\r
1741 inJump = stream_.channelOffset[1];
\r
1745 // Account for possible unread channels at end of the last stream
\r
1746 if ( streamChannels > channelsLeft ) {
\r
1747 inJump = streamChannels - channelsLeft;
\r
1748 streamChannels = channelsLeft;
\r
1751 // Determine output buffer offsets and skips
\r
1752 if ( outInterleaved ) {
\r
1753 outJump = outChannels;
\r
1754 out += outChannels - channelsLeft;
\r
1758 out += (outChannels - channelsLeft) * outOffset;
\r
1761 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1762 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1763 out[j*outOffset] = *in++;
\r
1768 channelsLeft -= streamChannels;
\r
1772 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1773 convertBuffer( stream_.userBuffer[1],
\r
1774 stream_.deviceBuffer,
\r
1775 stream_.convertInfo[1] );
\r
1781 //MUTEX_UNLOCK( &stream_.mutex );
\r
1783 RtApi::tickStreamTime();
\r
1787 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1791 case kAudioHardwareNotRunningError:
\r
1792 return "kAudioHardwareNotRunningError";
\r
1794 case kAudioHardwareUnspecifiedError:
\r
1795 return "kAudioHardwareUnspecifiedError";
\r
1797 case kAudioHardwareUnknownPropertyError:
\r
1798 return "kAudioHardwareUnknownPropertyError";
\r
1800 case kAudioHardwareBadPropertySizeError:
\r
1801 return "kAudioHardwareBadPropertySizeError";
\r
1803 case kAudioHardwareIllegalOperationError:
\r
1804 return "kAudioHardwareIllegalOperationError";
\r
1806 case kAudioHardwareBadObjectError:
\r
1807 return "kAudioHardwareBadObjectError";
\r
1809 case kAudioHardwareBadDeviceError:
\r
1810 return "kAudioHardwareBadDeviceError";
\r
1812 case kAudioHardwareBadStreamError:
\r
1813 return "kAudioHardwareBadStreamError";
\r
1815 case kAudioHardwareUnsupportedOperationError:
\r
1816 return "kAudioHardwareUnsupportedOperationError";
\r
1818 case kAudioDeviceUnsupportedFormatError:
\r
1819 return "kAudioDeviceUnsupportedFormatError";
\r
1821 case kAudioDevicePermissionsError:
\r
1822 return "kAudioDevicePermissionsError";
\r
1825 return "CoreAudio unknown error";
\r
1829 //******************** End of __MACOSX_CORE__ *********************//
\r
1832 #if defined(__UNIX_JACK__)
\r
1834 // JACK is a low-latency audio server, originally written for the
\r
1835 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1836 // connect a number of different applications to an audio device, as
\r
1837 // well as allowing them to share audio between themselves.
\r
1839 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1840 // have ports connected to the server. The JACK server is typically
\r
1841 // started in a terminal as follows:
\r
1843 // .jackd -d alsa -d hw:0
\r
1845 // or through an interface program such as qjackctl. Many of the
\r
1846 // parameters normally set for a stream are fixed by the JACK server
\r
1847 // and can be specified when the JACK server is started. In
\r
1850 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1852 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1853 // frames, and number of buffers = 4. Once the server is running, it
\r
1854 // is not possible to override these values. If the values are not
\r
1855 // specified in the command-line, the JACK server uses default values.
\r
1857 // The JACK server does not have to be running when an instance of
\r
1858 // RtApiJack is created, though the function getDeviceCount() will
\r
1859 // report 0 devices found until JACK has been started. When no
\r
1860 // devices are available (i.e., the JACK server is not running), a
\r
1861 // stream cannot be opened.
\r
1863 #include <jack/jack.h>
\r
1864 #include <unistd.h>
\r
1867 // A structure to hold various information related to the Jack API
\r
1868 // implementation.
\r
1869 struct JackHandle {
\r
1870 jack_client_t *client;
\r
1871 jack_port_t **ports[2];
\r
1872 std::string deviceName[2];
\r
1874 pthread_cond_t condition;
\r
1875 int drainCounter; // Tracks callback counts when draining
\r
1876 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1879 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1882 static void jackSilentError( const char * ) {};
\r
1884 RtApiJack :: RtApiJack()
\r
1886 // Nothing to do here.
\r
1887 #if !defined(__RTAUDIO_DEBUG__)
\r
1888 // Turn off Jack's internal error reporting.
\r
1889 jack_set_error_function( &jackSilentError );
\r
1893 RtApiJack :: ~RtApiJack()
\r
1895 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1898 unsigned int RtApiJack :: getDeviceCount( void )
\r
1900 // See if we can become a jack client.
\r
1901 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1902 jack_status_t *status = NULL;
\r
1903 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1904 if ( client == 0 ) return 0;
\r
1906 const char **ports;
\r
1907 std::string port, previousPort;
\r
1908 unsigned int nChannels = 0, nDevices = 0;
\r
1909 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1911 // Parse the port names up to the first colon (:).
\r
1912 size_t iColon = 0;
\r
1914 port = (char *) ports[ nChannels ];
\r
1915 iColon = port.find(":");
\r
1916 if ( iColon != std::string::npos ) {
\r
1917 port = port.substr( 0, iColon + 1 );
\r
1918 if ( port != previousPort ) {
\r
1920 previousPort = port;
\r
1923 } while ( ports[++nChannels] );
\r
1927 jack_client_close( client );
\r
1931 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1933 RtAudio::DeviceInfo info;
\r
1934 info.probed = false;
\r
1936 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1937 jack_status_t *status = NULL;
\r
1938 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1939 if ( client == 0 ) {
\r
1940 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1941 error( RtAudioError::WARNING );
\r
1945 const char **ports;
\r
1946 std::string port, previousPort;
\r
1947 unsigned int nPorts = 0, nDevices = 0;
\r
1948 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1950 // Parse the port names up to the first colon (:).
\r
1951 size_t iColon = 0;
\r
1953 port = (char *) ports[ nPorts ];
\r
1954 iColon = port.find(":");
\r
1955 if ( iColon != std::string::npos ) {
\r
1956 port = port.substr( 0, iColon );
\r
1957 if ( port != previousPort ) {
\r
1958 if ( nDevices == device ) info.name = port;
\r
1960 previousPort = port;
\r
1963 } while ( ports[++nPorts] );
\r
1967 if ( device >= nDevices ) {
\r
1968 jack_client_close( client );
\r
1969 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1970 error( RtAudioError::INVALID_USE );
\r
1974 // Get the current jack server sample rate.
\r
1975 info.sampleRates.clear();
\r
1976 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1978 // Count the available ports containing the client name as device
\r
1979 // channels. Jack "input ports" equal RtAudio output channels.
\r
1980 unsigned int nChannels = 0;
\r
1981 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1983 while ( ports[ nChannels ] ) nChannels++;
\r
1985 info.outputChannels = nChannels;
\r
1988 // Jack "output ports" equal RtAudio input channels.
\r
1990 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1992 while ( ports[ nChannels ] ) nChannels++;
\r
1994 info.inputChannels = nChannels;
\r
1997 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1998 jack_client_close(client);
\r
1999 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2000 error( RtAudioError::WARNING );
\r
2004 // If device opens for both playback and capture, we determine the channels.
\r
2005 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2006 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2008 // Jack always uses 32-bit floats.
\r
2009 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2011 // Jack doesn't provide default devices so we'll use the first available one.
\r
2012 if ( device == 0 && info.outputChannels > 0 )
\r
2013 info.isDefaultOutput = true;
\r
2014 if ( device == 0 && info.inputChannels > 0 )
\r
2015 info.isDefaultInput = true;
\r
2017 jack_client_close(client);
\r
2018 info.probed = true;
\r
2022 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2024 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2026 RtApiJack *object = (RtApiJack *) info->object;
\r
2027 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2032 // This function will be called by a spawned thread when the Jack
\r
2033 // server signals that it is shutting down. It is necessary to handle
\r
2034 // it this way because the jackShutdown() function must return before
\r
2035 // the jack_deactivate() function (in closeStream()) will return.
\r
2036 static void *jackCloseStream( void *ptr )
\r
2038 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2039 RtApiJack *object = (RtApiJack *) info->object;
\r
2041 object->closeStream();
\r
2043 pthread_exit( NULL );
\r
2045 static void jackShutdown( void *infoPointer )
\r
2047 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2048 RtApiJack *object = (RtApiJack *) info->object;
\r
2050 // Check current stream state. If stopped, then we'll assume this
\r
2051 // was called as a result of a call to RtApiJack::stopStream (the
\r
2052 // deactivation of a client handle causes this function to be called).
\r
2053 // If not, we'll assume the Jack server is shutting down or some
\r
2054 // other problem occurred and we should close the stream.
\r
2055 if ( object->isStreamRunning() == false ) return;
\r
2057 ThreadHandle threadId;
\r
2058 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2059 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2062 static int jackXrun( void *infoPointer )
\r
2064 JackHandle *handle = (JackHandle *) infoPointer;
\r
2066 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2067 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2072 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2073 unsigned int firstChannel, unsigned int sampleRate,
\r
2074 RtAudioFormat format, unsigned int *bufferSize,
\r
2075 RtAudio::StreamOptions *options )
\r
2077 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2079 // Look for jack server and try to become a client (only do once per stream).
\r
2080 jack_client_t *client = 0;
\r
2081 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2082 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2083 jack_status_t *status = NULL;
\r
2084 if ( options && !options->streamName.empty() )
\r
2085 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2087 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2088 if ( client == 0 ) {
\r
2089 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2090 error( RtAudioError::WARNING );
\r
2095 // The handle must have been created on an earlier pass.
\r
2096 client = handle->client;
\r
2099 const char **ports;
\r
2100 std::string port, previousPort, deviceName;
\r
2101 unsigned int nPorts = 0, nDevices = 0;
\r
2102 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2104 // Parse the port names up to the first colon (:).
\r
2105 size_t iColon = 0;
\r
2107 port = (char *) ports[ nPorts ];
\r
2108 iColon = port.find(":");
\r
2109 if ( iColon != std::string::npos ) {
\r
2110 port = port.substr( 0, iColon );
\r
2111 if ( port != previousPort ) {
\r
2112 if ( nDevices == device ) deviceName = port;
\r
2114 previousPort = port;
\r
2117 } while ( ports[++nPorts] );
\r
2121 if ( device >= nDevices ) {
\r
2122 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2126 // Count the available ports containing the client name as device
\r
2127 // channels. Jack "input ports" equal RtAudio output channels.
\r
2128 unsigned int nChannels = 0;
\r
2129 unsigned long flag = JackPortIsInput;
\r
2130 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2131 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2133 while ( ports[ nChannels ] ) nChannels++;
\r
2137 // Compare the jack ports for specified client to the requested number of channels.
\r
2138 if ( nChannels < (channels + firstChannel) ) {
\r
2139 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2140 errorText_ = errorStream_.str();
\r
2144 // Check the jack server sample rate.
\r
2145 unsigned int jackRate = jack_get_sample_rate( client );
\r
2146 if ( sampleRate != jackRate ) {
\r
2147 jack_client_close( client );
\r
2148 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2149 errorText_ = errorStream_.str();
\r
2152 stream_.sampleRate = jackRate;
\r
2154 // Get the latency of the JACK port.
\r
2155 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2156 if ( ports[ firstChannel ] ) {
\r
2157 // Added by Ge Wang
\r
2158 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2159 // the range (usually the min and max are equal)
\r
2160 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2161 // get the latency range
\r
2162 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2163 // be optimistic, use the min!
\r
2164 stream_.latency[mode] = latrange.min;
\r
2165 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2169 // The jack server always uses 32-bit floating-point data.
\r
2170 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2171 stream_.userFormat = format;
\r
2173 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2174 else stream_.userInterleaved = true;
\r
2176 // Jack always uses non-interleaved buffers.
\r
2177 stream_.deviceInterleaved[mode] = false;
\r
2179 // Jack always provides host byte-ordered data.
\r
2180 stream_.doByteSwap[mode] = false;
\r
2182 // Get the buffer size. The buffer size and number of buffers
\r
2183 // (periods) is set when the jack server is started.
\r
2184 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2185 *bufferSize = stream_.bufferSize;
\r
2187 stream_.nDeviceChannels[mode] = channels;
\r
2188 stream_.nUserChannels[mode] = channels;
\r
2190 // Set flags for buffer conversion.
\r
2191 stream_.doConvertBuffer[mode] = false;
\r
2192 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2193 stream_.doConvertBuffer[mode] = true;
\r
2194 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2195 stream_.nUserChannels[mode] > 1 )
\r
2196 stream_.doConvertBuffer[mode] = true;
\r
2198 // Allocate our JackHandle structure for the stream.
\r
2199 if ( handle == 0 ) {
\r
2201 handle = new JackHandle;
\r
2203 catch ( std::bad_alloc& ) {
\r
2204 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2208 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2209 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2212 stream_.apiHandle = (void *) handle;
\r
2213 handle->client = client;
\r
2215 handle->deviceName[mode] = deviceName;
\r
2217 // Allocate necessary internal buffers.
\r
2218 unsigned long bufferBytes;
\r
2219 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2220 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2221 if ( stream_.userBuffer[mode] == NULL ) {
\r
2222 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2226 if ( stream_.doConvertBuffer[mode] ) {
\r
2228 bool makeBuffer = true;
\r
2229 if ( mode == OUTPUT )
\r
2230 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2231 else { // mode == INPUT
\r
2232 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2233 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2234 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2235 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2239 if ( makeBuffer ) {
\r
2240 bufferBytes *= *bufferSize;
\r
2241 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2242 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2243 if ( stream_.deviceBuffer == NULL ) {
\r
2244 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2250 // Allocate memory for the Jack ports (channels) identifiers.
\r
2251 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2252 if ( handle->ports[mode] == NULL ) {
\r
2253 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2257 stream_.device[mode] = device;
\r
2258 stream_.channelOffset[mode] = firstChannel;
\r
2259 stream_.state = STREAM_STOPPED;
\r
2260 stream_.callbackInfo.object = (void *) this;
\r
2262 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2263 // We had already set up the stream for output.
\r
2264 stream_.mode = DUPLEX;
\r
2266 stream_.mode = mode;
\r
2267 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2268 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2269 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2272 // Register our ports.
\r
2274 if ( mode == OUTPUT ) {
\r
2275 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2276 snprintf( label, 64, "outport %d", i );
\r
2277 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2278 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2282 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2283 snprintf( label, 64, "inport %d", i );
\r
2284 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2285 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2289 // Setup the buffer conversion information structure. We don't use
\r
2290 // buffers to do channel offsets, so we override that parameter
\r
2292 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2298 pthread_cond_destroy( &handle->condition );
\r
2299 jack_client_close( handle->client );
\r
2301 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2302 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2305 stream_.apiHandle = 0;
\r
2308 for ( int i=0; i<2; i++ ) {
\r
2309 if ( stream_.userBuffer[i] ) {
\r
2310 free( stream_.userBuffer[i] );
\r
2311 stream_.userBuffer[i] = 0;
\r
2315 if ( stream_.deviceBuffer ) {
\r
2316 free( stream_.deviceBuffer );
\r
2317 stream_.deviceBuffer = 0;
\r
2323 void RtApiJack :: closeStream( void )
\r
2325 if ( stream_.state == STREAM_CLOSED ) {
\r
2326 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2327 error( RtAudioError::WARNING );
\r
2331 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2334 if ( stream_.state == STREAM_RUNNING )
\r
2335 jack_deactivate( handle->client );
\r
2337 jack_client_close( handle->client );
\r
2341 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2342 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2343 pthread_cond_destroy( &handle->condition );
\r
2345 stream_.apiHandle = 0;
\r
2348 for ( int i=0; i<2; i++ ) {
\r
2349 if ( stream_.userBuffer[i] ) {
\r
2350 free( stream_.userBuffer[i] );
\r
2351 stream_.userBuffer[i] = 0;
\r
2355 if ( stream_.deviceBuffer ) {
\r
2356 free( stream_.deviceBuffer );
\r
2357 stream_.deviceBuffer = 0;
\r
2360 stream_.mode = UNINITIALIZED;
\r
2361 stream_.state = STREAM_CLOSED;
\r
2364 void RtApiJack :: startStream( void )
\r
2367 if ( stream_.state == STREAM_RUNNING ) {
\r
2368 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2369 error( RtAudioError::WARNING );
\r
2373 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2374 int result = jack_activate( handle->client );
\r
2376 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2380 const char **ports;
\r
2382 // Get the list of available ports.
\r
2383 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2385 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2386 if ( ports == NULL) {
\r
2387 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2391 // Now make the port connections. Since RtAudio wasn't designed to
\r
2392 // allow the user to select particular channels of a device, we'll
\r
2393 // just open the first "nChannels" ports with offset.
\r
2394 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2396 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2397 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2400 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2407 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2409 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2410 if ( ports == NULL) {
\r
2411 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2415 // Now make the port connections. See note above.
\r
2416 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2418 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2419 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2422 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2429 handle->drainCounter = 0;
\r
2430 handle->internalDrain = false;
\r
2431 stream_.state = STREAM_RUNNING;
\r
2434 if ( result == 0 ) return;
\r
2435 error( RtAudioError::SYSTEM_ERROR );
\r
2438 void RtApiJack :: stopStream( void )
\r
2441 if ( stream_.state == STREAM_STOPPED ) {
\r
2442 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2443 error( RtAudioError::WARNING );
\r
2447 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2450 if ( handle->drainCounter == 0 ) {
\r
2451 handle->drainCounter = 2;
\r
2452 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2456 jack_deactivate( handle->client );
\r
2457 stream_.state = STREAM_STOPPED;
\r
2460 void RtApiJack :: abortStream( void )
\r
2463 if ( stream_.state == STREAM_STOPPED ) {
\r
2464 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2465 error( RtAudioError::WARNING );
\r
2469 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2470 handle->drainCounter = 2;
\r
2475 // This function will be called by a spawned thread when the user
\r
2476 // callback function signals that the stream should be stopped or
\r
2477 // aborted. It is necessary to handle it this way because the
\r
2478 // callbackEvent() function must return before the jack_deactivate()
\r
2479 // function will return.
\r
2480 static void *jackStopStream( void *ptr )
\r
2482 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2483 RtApiJack *object = (RtApiJack *) info->object;
\r
2485 object->stopStream();
\r
2486 pthread_exit( NULL );
\r
2489 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2491 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2492 if ( stream_.state == STREAM_CLOSED ) {
\r
2493 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2494 error( RtAudioError::WARNING );
\r
2497 if ( stream_.bufferSize != nframes ) {
\r
2498 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2499 error( RtAudioError::WARNING );
\r
2503 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2504 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2506 // Check if we were draining the stream and signal is finished.
\r
2507 if ( handle->drainCounter > 3 ) {
\r
2508 ThreadHandle threadId;
\r
2510 stream_.state = STREAM_STOPPING;
\r
2511 if ( handle->internalDrain == true )
\r
2512 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2514 pthread_cond_signal( &handle->condition );
\r
2518 // Invoke user callback first, to get fresh output data.
\r
2519 if ( handle->drainCounter == 0 ) {
\r
2520 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2521 double streamTime = getStreamTime();
\r
2522 RtAudioStreamStatus status = 0;
\r
2523 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2524 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2525 handle->xrun[0] = false;
\r
2527 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2528 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2529 handle->xrun[1] = false;
\r
2531 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2532 stream_.bufferSize, streamTime, status, info->userData );
\r
2533 if ( cbReturnValue == 2 ) {
\r
2534 stream_.state = STREAM_STOPPING;
\r
2535 handle->drainCounter = 2;
\r
2537 pthread_create( &id, NULL, jackStopStream, info );
\r
2540 else if ( cbReturnValue == 1 ) {
\r
2541 handle->drainCounter = 1;
\r
2542 handle->internalDrain = true;
\r
2546 jack_default_audio_sample_t *jackbuffer;
\r
2547 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2548 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2550 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2552 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2553 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2554 memset( jackbuffer, 0, bufferBytes );
\r
2558 else if ( stream_.doConvertBuffer[0] ) {
\r
2560 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2562 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2563 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2564 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2567 else { // no buffer conversion
\r
2568 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2569 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2570 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2574 if ( handle->drainCounter ) {
\r
2575 handle->drainCounter++;
\r
2580 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2582 if ( stream_.doConvertBuffer[1] ) {
\r
2583 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2584 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2585 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2587 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2589 else { // no buffer conversion
\r
2590 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2591 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2592 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2598 RtApi::tickStreamTime();
\r
2601 //******************** End of __UNIX_JACK__ *********************//
\r
2604 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2606 // The ASIO API is designed around a callback scheme, so this
\r
2607 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2608 // Jack. The primary constraint with ASIO is that it only allows
\r
2609 // access to a single driver at a time. Thus, it is not possible to
\r
2610 // have more than one simultaneous RtAudio stream.
\r
2612 // This implementation also requires a number of external ASIO files
\r
2613 // and a few global variables. The ASIO callback scheme does not
\r
2614 // allow for the passing of user data, so we must create a global
\r
2615 // pointer to our callbackInfo structure.
\r
2617 // On unix systems, we make use of a pthread condition variable.
\r
2618 // Since there is no equivalent in Windows, I hacked something based
\r
2619 // on information found in
\r
2620 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2622 #include "asiosys.h"
\r
2624 #include "iasiothiscallresolver.h"
\r
2625 #include "asiodrivers.h"
\r
2628 static AsioDrivers drivers;
\r
2629 static ASIOCallbacks asioCallbacks;
\r
2630 static ASIODriverInfo driverInfo;
\r
2631 static CallbackInfo *asioCallbackInfo;
\r
2632 static bool asioXRun;
\r
2634 struct AsioHandle {
\r
2635 int drainCounter; // Tracks callback counts when draining
\r
2636 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2637 ASIOBufferInfo *bufferInfos;
\r
2641 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2644 // Function declarations (definitions at end of section)
\r
2645 static const char* getAsioErrorString( ASIOError result );
\r
2646 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2647 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2649 RtApiAsio :: RtApiAsio()
\r
2651 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2652 // CoInitialize beforehand, but it must be for appartment threading
\r
2653 // (in which case, CoInitilialize will return S_FALSE here).
\r
2654 coInitialized_ = false;
\r
2655 HRESULT hr = CoInitialize( NULL );
\r
2656 if ( FAILED(hr) ) {
\r
2657 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2658 error( RtAudioError::WARNING );
\r
2660 coInitialized_ = true;
\r
2662 drivers.removeCurrentDriver();
\r
2663 driverInfo.asioVersion = 2;
\r
2665 // See note in DirectSound implementation about GetDesktopWindow().
\r
2666 driverInfo.sysRef = GetForegroundWindow();
\r
2669 RtApiAsio :: ~RtApiAsio()
\r
2671 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2672 if ( coInitialized_ ) CoUninitialize();
\r
2675 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2677 return (unsigned int) drivers.asioGetNumDev();
\r
2680 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2682 RtAudio::DeviceInfo info;
\r
2683 info.probed = false;
\r
2686 unsigned int nDevices = getDeviceCount();
\r
2687 if ( nDevices == 0 ) {
\r
2688 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2689 error( RtAudioError::INVALID_USE );
\r
2693 if ( device >= nDevices ) {
\r
2694 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2695 error( RtAudioError::INVALID_USE );
\r
2699 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2700 if ( stream_.state != STREAM_CLOSED ) {
\r
2701 if ( device >= devices_.size() ) {
\r
2702 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2703 error( RtAudioError::WARNING );
\r
2706 return devices_[ device ];
\r
2709 char driverName[32];
\r
2710 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2711 if ( result != ASE_OK ) {
\r
2712 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2713 errorText_ = errorStream_.str();
\r
2714 error( RtAudioError::WARNING );
\r
2718 info.name = driverName;
\r
2720 if ( !drivers.loadDriver( driverName ) ) {
\r
2721 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2722 errorText_ = errorStream_.str();
\r
2723 error( RtAudioError::WARNING );
\r
2727 result = ASIOInit( &driverInfo );
\r
2728 if ( result != ASE_OK ) {
\r
2729 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2730 errorText_ = errorStream_.str();
\r
2731 error( RtAudioError::WARNING );
\r
2735 // Determine the device channel information.
\r
2736 long inputChannels, outputChannels;
\r
2737 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2738 if ( result != ASE_OK ) {
\r
2739 drivers.removeCurrentDriver();
\r
2740 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2741 errorText_ = errorStream_.str();
\r
2742 error( RtAudioError::WARNING );
\r
2746 info.outputChannels = outputChannels;
\r
2747 info.inputChannels = inputChannels;
\r
2748 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2749 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2751 // Determine the supported sample rates.
\r
2752 info.sampleRates.clear();
\r
2753 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2754 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2755 if ( result == ASE_OK )
\r
2756 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2759 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2760 ASIOChannelInfo channelInfo;
\r
2761 channelInfo.channel = 0;
\r
2762 channelInfo.isInput = true;
\r
2763 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2764 result = ASIOGetChannelInfo( &channelInfo );
\r
2765 if ( result != ASE_OK ) {
\r
2766 drivers.removeCurrentDriver();
\r
2767 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2768 errorText_ = errorStream_.str();
\r
2769 error( RtAudioError::WARNING );
\r
2773 info.nativeFormats = 0;
\r
2774 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2775 info.nativeFormats |= RTAUDIO_SINT16;
\r
2776 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2777 info.nativeFormats |= RTAUDIO_SINT32;
\r
2778 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2779 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2780 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2781 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2782 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2783 info.nativeFormats |= RTAUDIO_SINT24;
\r
2785 if ( info.outputChannels > 0 )
\r
2786 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2787 if ( info.inputChannels > 0 )
\r
2788 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2790 info.probed = true;
\r
2791 drivers.removeCurrentDriver();
\r
2795 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2797 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2798 object->callbackEvent( index );
\r
2801 void RtApiAsio :: saveDeviceInfo( void )
\r
2805 unsigned int nDevices = getDeviceCount();
\r
2806 devices_.resize( nDevices );
\r
2807 for ( unsigned int i=0; i<nDevices; i++ )
\r
2808 devices_[i] = getDeviceInfo( i );
\r
2811 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2812 unsigned int firstChannel, unsigned int sampleRate,
\r
2813 RtAudioFormat format, unsigned int *bufferSize,
\r
2814 RtAudio::StreamOptions *options )
\r
2816 // For ASIO, a duplex stream MUST use the same driver.
\r
2817 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2818 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2822 char driverName[32];
\r
2823 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2824 if ( result != ASE_OK ) {
\r
2825 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2826 errorText_ = errorStream_.str();
\r
2830 // Only load the driver once for duplex stream.
\r
2831 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2832 // The getDeviceInfo() function will not work when a stream is open
\r
2833 // because ASIO does not allow multiple devices to run at the same
\r
2834 // time. Thus, we'll probe the system before opening a stream and
\r
2835 // save the results for use by getDeviceInfo().
\r
2836 this->saveDeviceInfo();
\r
2838 if ( !drivers.loadDriver( driverName ) ) {
\r
2839 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2840 errorText_ = errorStream_.str();
\r
2844 result = ASIOInit( &driverInfo );
\r
2845 if ( result != ASE_OK ) {
\r
2846 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2847 errorText_ = errorStream_.str();
\r
2852 // Check the device channel count.
\r
2853 long inputChannels, outputChannels;
\r
2854 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2855 if ( result != ASE_OK ) {
\r
2856 drivers.removeCurrentDriver();
\r
2857 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2858 errorText_ = errorStream_.str();
\r
2862 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2863 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2864 drivers.removeCurrentDriver();
\r
2865 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2866 errorText_ = errorStream_.str();
\r
2869 stream_.nDeviceChannels[mode] = channels;
\r
2870 stream_.nUserChannels[mode] = channels;
\r
2871 stream_.channelOffset[mode] = firstChannel;
\r
2873 // Verify the sample rate is supported.
\r
2874 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2875 if ( result != ASE_OK ) {
\r
2876 drivers.removeCurrentDriver();
\r
2877 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2878 errorText_ = errorStream_.str();
\r
2882 // Get the current sample rate
\r
2883 ASIOSampleRate currentRate;
\r
2884 result = ASIOGetSampleRate( ¤tRate );
\r
2885 if ( result != ASE_OK ) {
\r
2886 drivers.removeCurrentDriver();
\r
2887 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2888 errorText_ = errorStream_.str();
\r
2892 // Set the sample rate only if necessary
\r
2893 if ( currentRate != sampleRate ) {
\r
2894 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2895 if ( result != ASE_OK ) {
\r
2896 drivers.removeCurrentDriver();
\r
2897 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2898 errorText_ = errorStream_.str();
\r
2903 // Determine the driver data type.
\r
2904 ASIOChannelInfo channelInfo;
\r
2905 channelInfo.channel = 0;
\r
2906 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2907 else channelInfo.isInput = true;
\r
2908 result = ASIOGetChannelInfo( &channelInfo );
\r
2909 if ( result != ASE_OK ) {
\r
2910 drivers.removeCurrentDriver();
\r
2911 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2912 errorText_ = errorStream_.str();
\r
2916 // Assuming WINDOWS host is always little-endian.
\r
2917 stream_.doByteSwap[mode] = false;
\r
2918 stream_.userFormat = format;
\r
2919 stream_.deviceFormat[mode] = 0;
\r
2920 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2921 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2922 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2924 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2925 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2926 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2928 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2929 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2930 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2932 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2933 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2934 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2936 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2937 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2938 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2941 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2942 drivers.removeCurrentDriver();
\r
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2944 errorText_ = errorStream_.str();
\r
2948 // Set the buffer size. For a duplex stream, this will end up
\r
2949 // setting the buffer size based on the input constraints, which
\r
2951 long minSize, maxSize, preferSize, granularity;
\r
2952 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2953 if ( result != ASE_OK ) {
\r
2954 drivers.removeCurrentDriver();
\r
2955 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2956 errorText_ = errorStream_.str();
\r
2960 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2961 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2962 else if ( granularity == -1 ) {
\r
2963 // Make sure bufferSize is a power of two.
\r
2964 int log2_of_min_size = 0;
\r
2965 int log2_of_max_size = 0;
\r
2967 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2968 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2969 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2972 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2973 int min_delta_num = log2_of_min_size;
\r
2975 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2976 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2977 if (current_delta < min_delta) {
\r
2978 min_delta = current_delta;
\r
2979 min_delta_num = i;
\r
2983 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2984 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2985 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2987 else if ( granularity != 0 ) {
\r
2988 // Set to an even multiple of granularity, rounding up.
\r
2989 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2992 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2993 drivers.removeCurrentDriver();
\r
2994 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2998 stream_.bufferSize = *bufferSize;
\r
2999 stream_.nBuffers = 2;
\r
3001 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3002 else stream_.userInterleaved = true;
\r
3004 // ASIO always uses non-interleaved buffers.
\r
3005 stream_.deviceInterleaved[mode] = false;
\r
3007 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3008 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3009 if ( handle == 0 ) {
\r
3011 handle = new AsioHandle;
\r
3013 catch ( std::bad_alloc& ) {
\r
3014 //if ( handle == NULL ) {
\r
3015 drivers.removeCurrentDriver();
\r
3016 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3019 handle->bufferInfos = 0;
\r
3021 // Create a manual-reset event.
\r
3022 handle->condition = CreateEvent( NULL, // no security
\r
3023 TRUE, // manual-reset
\r
3024 FALSE, // non-signaled initially
\r
3025 NULL ); // unnamed
\r
3026 stream_.apiHandle = (void *) handle;
\r
3029 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3030 // and output separately, we'll have to dispose of previously
\r
3031 // created output buffers for a duplex stream.
\r
3032 long inputLatency, outputLatency;
\r
3033 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3034 ASIODisposeBuffers();
\r
3035 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3038 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3039 bool buffersAllocated = false;
\r
3040 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3041 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3042 if ( handle->bufferInfos == NULL ) {
\r
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3044 errorText_ = errorStream_.str();
\r
3048 ASIOBufferInfo *infos;
\r
3049 infos = handle->bufferInfos;
\r
3050 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3051 infos->isInput = ASIOFalse;
\r
3052 infos->channelNum = i + stream_.channelOffset[0];
\r
3053 infos->buffers[0] = infos->buffers[1] = 0;
\r
3055 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3056 infos->isInput = ASIOTrue;
\r
3057 infos->channelNum = i + stream_.channelOffset[1];
\r
3058 infos->buffers[0] = infos->buffers[1] = 0;
\r
3061 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3062 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3063 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3064 asioCallbacks.asioMessage = &asioMessages;
\r
3065 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3066 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3067 if ( result != ASE_OK ) {
\r
3068 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3069 errorText_ = errorStream_.str();
\r
3072 buffersAllocated = true;
\r
3074 // Set flags for buffer conversion.
\r
3075 stream_.doConvertBuffer[mode] = false;
\r
3076 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3077 stream_.doConvertBuffer[mode] = true;
\r
3078 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3079 stream_.nUserChannels[mode] > 1 )
\r
3080 stream_.doConvertBuffer[mode] = true;
\r
3082 // Allocate necessary internal buffers
\r
3083 unsigned long bufferBytes;
\r
3084 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3085 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3086 if ( stream_.userBuffer[mode] == NULL ) {
\r
3087 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3091 if ( stream_.doConvertBuffer[mode] ) {
\r
3093 bool makeBuffer = true;
\r
3094 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3095 if ( mode == INPUT ) {
\r
3096 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3097 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3098 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3102 if ( makeBuffer ) {
\r
3103 bufferBytes *= *bufferSize;
\r
3104 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3105 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3106 if ( stream_.deviceBuffer == NULL ) {
\r
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3113 stream_.sampleRate = sampleRate;
\r
3114 stream_.device[mode] = device;
\r
3115 stream_.state = STREAM_STOPPED;
\r
3116 asioCallbackInfo = &stream_.callbackInfo;
\r
3117 stream_.callbackInfo.object = (void *) this;
\r
3118 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3119 // We had already set up an output stream.
\r
3120 stream_.mode = DUPLEX;
\r
3122 stream_.mode = mode;
\r
3124 // Determine device latencies
\r
3125 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3126 if ( result != ASE_OK ) {
\r
3127 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3128 errorText_ = errorStream_.str();
\r
3129 error( RtAudioError::WARNING); // warn but don't fail
\r
3132 stream_.latency[0] = outputLatency;
\r
3133 stream_.latency[1] = inputLatency;
\r
3136 // Setup the buffer conversion information structure. We don't use
\r
3137 // buffers to do channel offsets, so we override that parameter
\r
3139 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3144 if ( buffersAllocated )
\r
3145 ASIODisposeBuffers();
\r
3146 drivers.removeCurrentDriver();
\r
3149 CloseHandle( handle->condition );
\r
3150 if ( handle->bufferInfos )
\r
3151 free( handle->bufferInfos );
\r
3153 stream_.apiHandle = 0;
\r
3156 for ( int i=0; i<2; i++ ) {
\r
3157 if ( stream_.userBuffer[i] ) {
\r
3158 free( stream_.userBuffer[i] );
\r
3159 stream_.userBuffer[i] = 0;
\r
3163 if ( stream_.deviceBuffer ) {
\r
3164 free( stream_.deviceBuffer );
\r
3165 stream_.deviceBuffer = 0;
\r
3171 void RtApiAsio :: closeStream()
\r
3173 if ( stream_.state == STREAM_CLOSED ) {
\r
3174 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3175 error( RtAudioError::WARNING );
\r
3179 if ( stream_.state == STREAM_RUNNING ) {
\r
3180 stream_.state = STREAM_STOPPED;
\r
3183 ASIODisposeBuffers();
\r
3184 drivers.removeCurrentDriver();
\r
3186 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3188 CloseHandle( handle->condition );
\r
3189 if ( handle->bufferInfos )
\r
3190 free( handle->bufferInfos );
\r
3192 stream_.apiHandle = 0;
\r
3195 for ( int i=0; i<2; i++ ) {
\r
3196 if ( stream_.userBuffer[i] ) {
\r
3197 free( stream_.userBuffer[i] );
\r
3198 stream_.userBuffer[i] = 0;
\r
3202 if ( stream_.deviceBuffer ) {
\r
3203 free( stream_.deviceBuffer );
\r
3204 stream_.deviceBuffer = 0;
\r
3207 stream_.mode = UNINITIALIZED;
\r
3208 stream_.state = STREAM_CLOSED;
\r
3211 bool stopThreadCalled = false;
\r
3213 void RtApiAsio :: startStream()
\r
3216 if ( stream_.state == STREAM_RUNNING ) {
\r
3217 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3218 error( RtAudioError::WARNING );
\r
3222 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3223 ASIOError result = ASIOStart();
\r
3224 if ( result != ASE_OK ) {
\r
3225 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3226 errorText_ = errorStream_.str();
\r
3230 handle->drainCounter = 0;
\r
3231 handle->internalDrain = false;
\r
3232 ResetEvent( handle->condition );
\r
3233 stream_.state = STREAM_RUNNING;
\r
3237 stopThreadCalled = false;
\r
3239 if ( result == ASE_OK ) return;
\r
3240 error( RtAudioError::SYSTEM_ERROR );
\r
3243 void RtApiAsio :: stopStream()
\r
3246 if ( stream_.state == STREAM_STOPPED ) {
\r
3247 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3248 error( RtAudioError::WARNING );
\r
3252 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3253 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3254 if ( handle->drainCounter == 0 ) {
\r
3255 handle->drainCounter = 2;
\r
3256 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3260 stream_.state = STREAM_STOPPED;
\r
3262 ASIOError result = ASIOStop();
\r
3263 if ( result != ASE_OK ) {
\r
3264 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3265 errorText_ = errorStream_.str();
\r
3268 if ( result == ASE_OK ) return;
\r
3269 error( RtAudioError::SYSTEM_ERROR );
\r
3272 void RtApiAsio :: abortStream()
\r
3275 if ( stream_.state == STREAM_STOPPED ) {
\r
3276 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3277 error( RtAudioError::WARNING );
\r
3281 // The following lines were commented-out because some behavior was
\r
3282 // noted where the device buffers need to be zeroed to avoid
\r
3283 // continuing sound, even when the device buffers are completely
\r
3284 // disposed. So now, calling abort is the same as calling stop.
\r
3285 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3286 // handle->drainCounter = 2;
\r
3290 // This function will be called by a spawned thread when the user
\r
3291 // callback function signals that the stream should be stopped or
\r
3292 // aborted. It is necessary to handle it this way because the
\r
3293 // callbackEvent() function must return before the ASIOStop()
\r
3294 // function will return.
\r
3295 static unsigned __stdcall asioStopStream( void *ptr )
\r
3297 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3298 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3300 object->stopStream();
\r
3301 _endthreadex( 0 );
\r
3305 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3307 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3308 if ( stream_.state == STREAM_CLOSED ) {
\r
3309 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3310 error( RtAudioError::WARNING );
\r
3314 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3315 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3317 // Check if we were draining the stream and signal if finished.
\r
3318 if ( handle->drainCounter > 3 ) {
\r
3320 stream_.state = STREAM_STOPPING;
\r
3321 if ( handle->internalDrain == false )
\r
3322 SetEvent( handle->condition );
\r
3323 else { // spawn a thread to stop the stream
\r
3324 unsigned threadId;
\r
3325 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3326 &stream_.callbackInfo, 0, &threadId );
\r
3331 // Invoke user callback to get fresh output data UNLESS we are
\r
3332 // draining stream.
\r
3333 if ( handle->drainCounter == 0 ) {
\r
3334 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3335 double streamTime = getStreamTime();
\r
3336 RtAudioStreamStatus status = 0;
\r
3337 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3338 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3341 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3342 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3345 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3346 stream_.bufferSize, streamTime, status, info->userData );
\r
3347 if ( cbReturnValue == 2 ) {
\r
3348 stream_.state = STREAM_STOPPING;
\r
3349 handle->drainCounter = 2;
\r
3350 unsigned threadId;
\r
3351 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3352 &stream_.callbackInfo, 0, &threadId );
\r
3355 else if ( cbReturnValue == 1 ) {
\r
3356 handle->drainCounter = 1;
\r
3357 handle->internalDrain = true;
\r
3361 unsigned int nChannels, bufferBytes, i, j;
\r
3362 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3363 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3365 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3367 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3369 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3370 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3371 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3375 else if ( stream_.doConvertBuffer[0] ) {
\r
3377 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3378 if ( stream_.doByteSwap[0] )
\r
3379 byteSwapBuffer( stream_.deviceBuffer,
\r
3380 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3381 stream_.deviceFormat[0] );
\r
3383 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3384 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3385 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3386 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3392 if ( stream_.doByteSwap[0] )
\r
3393 byteSwapBuffer( stream_.userBuffer[0],
\r
3394 stream_.bufferSize * stream_.nUserChannels[0],
\r
3395 stream_.userFormat );
\r
3397 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3398 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3399 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3400 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3405 if ( handle->drainCounter ) {
\r
3406 handle->drainCounter++;
\r
3411 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3413 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3415 if (stream_.doConvertBuffer[1]) {
\r
3417 // Always interleave ASIO input data.
\r
3418 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3419 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3420 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3421 handle->bufferInfos[i].buffers[bufferIndex],
\r
3425 if ( stream_.doByteSwap[1] )
\r
3426 byteSwapBuffer( stream_.deviceBuffer,
\r
3427 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3428 stream_.deviceFormat[1] );
\r
3429 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3433 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3434 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3435 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3436 handle->bufferInfos[i].buffers[bufferIndex],
\r
3441 if ( stream_.doByteSwap[1] )
\r
3442 byteSwapBuffer( stream_.userBuffer[1],
\r
3443 stream_.bufferSize * stream_.nUserChannels[1],
\r
3444 stream_.userFormat );
\r
3449 // The following call was suggested by Malte Clasen. While the API
\r
3450 // documentation indicates it should not be required, some device
\r
3451 // drivers apparently do not function correctly without it.
\r
3452 ASIOOutputReady();
\r
3454 RtApi::tickStreamTime();
\r
3458 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3460 // The ASIO documentation says that this usually only happens during
\r
3461 // external sync. Audio processing is not stopped by the driver,
\r
3462 // actual sample rate might not have even changed, maybe only the
\r
3463 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3466 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3468 object->stopStream();
\r
3470 catch ( RtAudioError &exception ) {
\r
3471 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3475 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3478 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3482 switch( selector ) {
\r
3483 case kAsioSelectorSupported:
\r
3484 if ( value == kAsioResetRequest
\r
3485 || value == kAsioEngineVersion
\r
3486 || value == kAsioResyncRequest
\r
3487 || value == kAsioLatenciesChanged
\r
3488 // The following three were added for ASIO 2.0, you don't
\r
3489 // necessarily have to support them.
\r
3490 || value == kAsioSupportsTimeInfo
\r
3491 || value == kAsioSupportsTimeCode
\r
3492 || value == kAsioSupportsInputMonitor)
\r
3495 case kAsioResetRequest:
\r
3496 // Defer the task and perform the reset of the driver during the
\r
3497 // next "safe" situation. You cannot reset the driver right now,
\r
3498 // as this code is called from the driver. Reset the driver is
\r
3499 // done by completely destruct is. I.e. ASIOStop(),
\r
3500 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3502 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3505 case kAsioResyncRequest:
\r
3506 // This informs the application that the driver encountered some
\r
3507 // non-fatal data loss. It is used for synchronization purposes
\r
3508 // of different media. Added mainly to work around the Win16Mutex
\r
3509 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3510 // which could lose data because the Mutex was held too long by
\r
3511 // another thread. However a driver can issue it in other
\r
3512 // situations, too.
\r
3513 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3517 case kAsioLatenciesChanged:
\r
3518 // This will inform the host application that the drivers were
\r
3519 // latencies changed. Beware, it this does not mean that the
\r
3520 // buffer sizes have changed! You might need to update internal
\r
3522 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3525 case kAsioEngineVersion:
\r
3526 // Return the supported ASIO version of the host application. If
\r
3527 // a host application does not implement this selector, ASIO 1.0
\r
3528 // is assumed by the driver.
\r
3531 case kAsioSupportsTimeInfo:
\r
3532 // Informs the driver whether the
\r
3533 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3534 // For compatibility with ASIO 1.0 drivers the host application
\r
3535 // should always support the "old" bufferSwitch method, too.
\r
3538 case kAsioSupportsTimeCode:
\r
3539 // Informs the driver whether application is interested in time
\r
3540 // code info. If an application does not need to know about time
\r
3541 // code, the driver has less work to do.
\r
3548 static const char* getAsioErrorString( ASIOError result )
\r
3553 const char*message;
\r
3556 static const Messages m[] =
\r
3558 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3559 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3560 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3561 { ASE_InvalidMode, "Invalid mode." },
\r
3562 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3563 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3564 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3567 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3568 if ( m[i].value == result ) return m[i].message;
\r
3570 return "Unknown error.";
\r
3573 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3577 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3579 #include "RtWasapi.inl"
\r
3580 #include <audioclient.h>
\r
3582 #include <functiondiscoverykeys.h>
\r
3583 #include <mmdeviceapi.h>
\r
3585 //=============================================================================
\r
3587 #define EXIT_ON_ERROR( hr, errorType, errorText )\
\r
3588 if( FAILED( hr ) )\
\r
3590 errorText_ = __FUNCTION__ ": " errorText;\
\r
3591 error( errorType );\
\r
3595 #define SAFE_RELEASE( objectPtr )\
\r
3598 objectPtr->Release();\
\r
3599 objectPtr = NULL;\
\r
3602 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3604 //-----------------------------------------------------------------------------
\r
3606 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3607 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3608 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3609 // provide intermediate storage for read / write synchronization.
\r
3610 class WasapiBuffer
\r
3614 : buffer_( NULL ),
\r
3623 // sets the length of the internal ring buffer
\r
3624 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3627 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3629 bufferSize_ = bufferSize;
\r
3634 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3635 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3637 if( !buffer || // incoming buffer is NULL
\r
3638 bufferSize == 0 || // incoming buffer has no data
\r
3639 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3644 unsigned int relOutIndex = outIndex_;
\r
3645 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3646 if( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ )
\r
3648 relOutIndex += bufferSize_;
\r
3651 // "in" index can end on the "out" index but cannot begin at it
\r
3652 if( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex )
\r
3654 return false; // not enough space between "in" index and "out" index
\r
3657 // copy buffer from external to internal
\r
3658 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3659 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3660 int fromInSize = bufferSize - fromZeroSize;
\r
3664 case RTAUDIO_SINT8:
\r
3665 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3666 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3668 case RTAUDIO_SINT16:
\r
3669 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3670 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3672 case RTAUDIO_SINT24:
\r
3673 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3674 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3676 case RTAUDIO_SINT32:
\r
3677 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3678 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3680 case RTAUDIO_FLOAT32:
\r
3681 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3682 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3684 case RTAUDIO_FLOAT64:
\r
3685 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3686 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3690 // update "in" index
\r
3691 inIndex_ += bufferSize;
\r
3692 inIndex_ %= bufferSize_;
\r
3697 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3698 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3700 if( !buffer || // incoming buffer is NULL
\r
3701 bufferSize == 0 || // incoming buffer has no data
\r
3702 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3707 unsigned int relInIndex = inIndex_;
\r
3708 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3709 if( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ )
\r
3711 relInIndex += bufferSize_;
\r
3714 // "out" index can begin at and end on the "in" index
\r
3715 if( outIndex_ < relInIndex && outIndexEnd > relInIndex )
\r
3717 return false; // not enough space between "out" index and "in" index
\r
3720 // copy buffer from internal to external
\r
3721 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3722 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3723 int fromOutSize = bufferSize - fromZeroSize;
\r
3727 case RTAUDIO_SINT8:
\r
3728 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3729 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3731 case RTAUDIO_SINT16:
\r
3732 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3733 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3735 case RTAUDIO_SINT24:
\r
3736 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3737 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3739 case RTAUDIO_SINT32:
\r
3740 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3741 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3743 case RTAUDIO_FLOAT32:
\r
3744 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3745 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3747 case RTAUDIO_FLOAT64:
\r
3748 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3749 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3753 // update "out" index
\r
3754 outIndex_ += bufferSize;
\r
3755 outIndex_ %= bufferSize_;
\r
3762 unsigned int bufferSize_;
\r
3763 unsigned int inIndex_;
\r
3764 unsigned int outIndex_;
\r
3767 //-----------------------------------------------------------------------------
\r
3769 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3770 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3771 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3772 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3773 // one rate and its multiple. RtApiWasapi will not populate a device's sample rate list with rates
\r
3774 // that may cause artifacts via this conversion.
\r
3775 void convertBufferWasapi( char* outBuffer,
\r
3776 const char* inBuffer,
\r
3777 const unsigned int& inChannelCount,
\r
3778 const unsigned int& outChannelCount,
\r
3779 const unsigned int& inSampleRate,
\r
3780 const unsigned int& outSampleRate,
\r
3781 const unsigned int& inSampleCount,
\r
3782 unsigned int& outSampleCount,
\r
3783 const RtAudioFormat& format )
\r
3785 // calculate the new outSampleCount and relative sampleStep
\r
3786 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3787 float sampleStep = 1.0f / sampleRatio;
\r
3788 float inSampleFraction = 0.0f;
\r
3789 unsigned int commonChannelCount = min( inChannelCount, outChannelCount );
\r
3791 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3793 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3794 for( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3796 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3800 case RTAUDIO_SINT8:
\r
3801 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3803 case RTAUDIO_SINT16:
\r
3804 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3806 case RTAUDIO_SINT24:
\r
3807 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3809 case RTAUDIO_SINT32:
\r
3810 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3812 case RTAUDIO_FLOAT32:
\r
3813 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3815 case RTAUDIO_FLOAT64:
\r
3816 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3820 // jump to next in sample
\r
3821 inSampleFraction += sampleStep;
\r
3825 //-----------------------------------------------------------------------------
\r
3827 // A structure to hold various information related to the WASAPI implementation.
\r
3828 struct WasapiHandle
\r
3830 IAudioClient* captureAudioClient;
\r
3831 IAudioClient* renderAudioClient;
\r
3832 IAudioCaptureClient* captureClient;
\r
3833 IAudioRenderClient* renderClient;
\r
3834 HANDLE captureEvent;
\r
3835 HANDLE renderEvent;
\r
3838 : captureAudioClient( NULL ),
\r
3839 renderAudioClient( NULL ),
\r
3840 captureClient( NULL ),
\r
3841 renderClient( NULL ),
\r
3842 captureEvent( NULL ),
\r
3843 renderEvent( NULL ) {}
\r
3846 //=============================================================================
\r
3848 RtApiWasapi::RtApiWasapi()
\r
3849 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3851 // WASAPI can run either apartment or multi-threaded
\r
3852 HRESULT hr = CoInitialize( NULL );
\r
3854 if( !FAILED( hr ) )
\r
3855 coInitialized_ = true;
\r
3857 // instantiate device enumerator
\r
3858 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3859 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3860 ( void** ) &deviceEnumerator_ );
\r
3862 if( FAILED( hr ) ) {
\r
3863 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3864 error( RtAudioError::DRIVER_ERROR );
\r
3868 //-----------------------------------------------------------------------------
\r
3870 RtApiWasapi::~RtApiWasapi()
\r
3872 // if this object previously called CoInitialize()
\r
3873 if( coInitialized_ )
\r
3878 if( stream_.state != STREAM_CLOSED )
\r
3883 SAFE_RELEASE( deviceEnumerator_ );
\r
3886 //=============================================================================
\r
3888 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3890 unsigned int captureDeviceCount = 0;
\r
3891 unsigned int renderDeviceCount = 0;
\r
3893 IMMDeviceCollection* captureDevices = NULL;
\r
3894 IMMDeviceCollection* renderDevices = NULL;
\r
3896 // count capture devices
\r
3897 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3898 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" );
\r
3900 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3901 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" );
\r
3903 // count render devices
\r
3904 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3905 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" );
\r
3907 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3908 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" );
\r
3911 // release all references
\r
3912 SAFE_RELEASE( captureDevices );
\r
3913 SAFE_RELEASE( renderDevices );
\r
3915 return captureDeviceCount + renderDeviceCount;
\r
3918 //-----------------------------------------------------------------------------
\r
3920 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3922 RtAudio::DeviceInfo info;
\r
3923 unsigned int captureDeviceCount = 0;
\r
3924 unsigned int renderDeviceCount = 0;
\r
3925 std::wstring deviceName;
\r
3926 std::string defaultDeviceName;
\r
3927 bool isCaptureDevice = false;
\r
3929 PROPVARIANT deviceNameProp;
\r
3930 PROPVARIANT defaultDeviceNameProp;
\r
3932 IMMDeviceCollection* captureDevices = NULL;
\r
3933 IMMDeviceCollection* renderDevices = NULL;
\r
3934 IMMDevice* devicePtr = NULL;
\r
3935 IMMDevice* defaultDevicePtr = NULL;
\r
3936 IAudioClient* audioClient = NULL;
\r
3937 IPropertyStore* devicePropStore = NULL;
\r
3938 IPropertyStore* defaultDevicePropStore = NULL;
\r
3940 WAVEFORMATEX* deviceFormat = NULL;
\r
3941 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3944 info.probed = false;
\r
3946 // count capture devices
\r
3947 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3948 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" );
\r
3950 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3951 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" );
\r
3953 // count render devices
\r
3954 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3955 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" );
\r
3957 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3958 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" );
\r
3960 // validate device index
\r
3961 if ( device >= captureDeviceCount + renderDeviceCount )
\r
3962 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Invalid device index" );
\r
3964 // determine whether index falls within capture or render devices
\r
3965 if ( device < captureDeviceCount ) {
\r
3966 hr = captureDevices->Item( device, &devicePtr );
\r
3967 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device handle" );
\r
3969 isCaptureDevice = true;
\r
3972 hr = renderDevices->Item( device - captureDeviceCount, &devicePtr );
\r
3973 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device handle" );
\r
3975 isCaptureDevice = false;
\r
3978 // get default device name
\r
3979 if ( isCaptureDevice ) {
\r
3980 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
3981 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default render device handle" );
\r
3984 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
3985 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default capture device handle" );
\r
3988 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
3989 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to open default device property store" );
\r
3991 PropVariantInit( &defaultDeviceNameProp );
\r
3993 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
3994 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default device property: PKEY_Device_FriendlyName" );
\r
3996 deviceName = defaultDeviceNameProp.pwszVal;
\r
3997 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4000 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4001 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to open device property store" );
\r
4003 PropVariantInit( &deviceNameProp );
\r
4005 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4006 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device property: PKEY_Device_FriendlyName" );
\r
4008 deviceName = deviceNameProp.pwszVal;
\r
4009 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4012 if ( isCaptureDevice ) {
\r
4013 info.isDefaultInput = info.name == defaultDeviceName;
\r
4014 info.isDefaultOutput = false;
\r
4017 info.isDefaultInput = false;
\r
4018 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4022 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4023 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );
\r
4025 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4026 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4028 if ( isCaptureDevice ) {
\r
4029 info.inputChannels = deviceFormat->nChannels;
\r
4030 info.outputChannels = 0;
\r
4031 info.duplexChannels = 0;
\r
4034 info.inputChannels = 0;
\r
4035 info.outputChannels = deviceFormat->nChannels;
\r
4036 info.duplexChannels = 0;
\r
4040 info.sampleRates.clear();
\r
4042 // allow support for sample rates that are multiples of the base rate
\r
4043 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4044 if ( SAMPLE_RATES[i] < deviceFormat->nSamplesPerSec ) {
\r
4045 if ( deviceFormat->nSamplesPerSec % SAMPLE_RATES[i] == 0 ) {
\r
4046 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4050 if ( SAMPLE_RATES[i] % deviceFormat->nSamplesPerSec == 0 ) {
\r
4051 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4057 info.nativeFormats = 0;
\r
4059 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4060 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4061 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4063 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4064 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4066 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4067 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4070 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4071 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4072 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4074 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4075 info.nativeFormats |= RTAUDIO_SINT8;
\r
4077 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4078 info.nativeFormats |= RTAUDIO_SINT16;
\r
4080 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4081 info.nativeFormats |= RTAUDIO_SINT24;
\r
4083 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4084 info.nativeFormats |= RTAUDIO_SINT32;
\r
4089 info.probed = true;
\r
4092 // release all references
\r
4093 PropVariantClear( &deviceNameProp );
\r
4094 PropVariantClear( &defaultDeviceNameProp );
\r
4096 SAFE_RELEASE( captureDevices );
\r
4097 SAFE_RELEASE( renderDevices );
\r
4098 SAFE_RELEASE( devicePtr );
\r
4099 SAFE_RELEASE( defaultDevicePtr );
\r
4100 SAFE_RELEASE( audioClient );
\r
4101 SAFE_RELEASE( devicePropStore );
\r
4102 SAFE_RELEASE( defaultDevicePropStore );
\r
4104 CoTaskMemFree( deviceFormat );
\r
4105 CoTaskMemFree( closestMatchFormat );
\r
4110 //-----------------------------------------------------------------------------
\r
4112 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4114 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4115 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4123 //-----------------------------------------------------------------------------
\r
4125 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4127 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4128 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4136 //-----------------------------------------------------------------------------
\r
4138 void RtApiWasapi::closeStream( void )
\r
4140 if ( stream_.state == STREAM_CLOSED ) {
\r
4141 errorText_ = "RtApiWasapi::closeStream: No open stream to close";
\r
4142 error( RtAudioError::WARNING );
\r
4146 if ( stream_.state != STREAM_STOPPED )
\r
4149 // clean up stream memory
\r
4150 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4151 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4153 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4154 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4156 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4157 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4159 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4160 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4162 delete stream_.apiHandle;
\r
4163 stream_.apiHandle = NULL;
\r
4165 for ( int i = 0; i < 2; i++ ) {
\r
4166 if ( stream_.userBuffer[i] ) {
\r
4167 free( stream_.userBuffer[i] );
\r
4168 stream_.userBuffer[i] = 0;
\r
4172 if ( stream_.deviceBuffer ) {
\r
4173 free( stream_.deviceBuffer );
\r
4174 stream_.deviceBuffer = 0;
\r
4177 // update stream state
\r
4178 stream_.state = STREAM_CLOSED;
\r
4181 //-----------------------------------------------------------------------------
\r
4183 void RtApiWasapi::startStream( void )
\r
4187 if ( stream_.state == STREAM_RUNNING ) {
\r
4188 errorText_ = "RtApiWasapi::startStream: The stream is already running";
\r
4189 error( RtAudioError::WARNING );
\r
4193 // update stream state
\r
4194 stream_.state = STREAM_RUNNING;
\r
4196 // create WASAPI stream thread
\r
4197 stream_.callbackInfo.thread = ( unsigned int ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4199 if ( !stream_.callbackInfo.thread ) {
\r
4200 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread";
\r
4201 error( RtAudioError::THREAD_ERROR );
\r
4204 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4205 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4209 //-----------------------------------------------------------------------------
\r
4211 void RtApiWasapi::stopStream( void )
\r
4215 if ( stream_.state == STREAM_STOPPED ) {
\r
4216 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped";
\r
4217 error( RtAudioError::WARNING );
\r
4221 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4222 stream_.state = STREAM_STOPPING;
\r
4224 // wait until stream thread is stopped
\r
4225 while( stream_.state != STREAM_STOPPED ) {
\r
4229 // Wait for the last buffer to play before stopping.
\r
4230 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4232 // stop capture client if applicable
\r
4233 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4234 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4235 if ( FAILED( hr ) ) {
\r
4236 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream";
\r
4237 error( RtAudioError::DRIVER_ERROR );
\r
4241 // stop render client if applicable
\r
4242 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4243 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4244 if ( FAILED( hr ) ) {
\r
4245 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream";
\r
4246 error( RtAudioError::DRIVER_ERROR );
\r
4250 // close thread handle
\r
4251 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4252 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread";
\r
4253 error( RtAudioError::THREAD_ERROR );
\r
4256 stream_.callbackInfo.thread = NULL;
\r
4259 //-----------------------------------------------------------------------------
\r
4261 void RtApiWasapi::abortStream( void )
\r
4265 if ( stream_.state == STREAM_STOPPED ) {
\r
4266 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped";
\r
4267 error( RtAudioError::WARNING );
\r
4271 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4272 stream_.state = STREAM_STOPPING;
\r
4274 // wait until stream thread is stopped
\r
4275 while ( stream_.state != STREAM_STOPPED ) {
\r
4279 // stop capture client if applicable
\r
4280 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4281 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4282 if ( FAILED( hr ) ) {
\r
4283 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream";
\r
4284 error( RtAudioError::DRIVER_ERROR );
\r
4288 // stop render client if applicable
\r
4289 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4290 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4291 if ( FAILED( hr ) ) {
\r
4292 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream";
\r
4293 error( RtAudioError::DRIVER_ERROR );
\r
4297 // close thread handle
\r
4298 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4299 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread";
\r
4300 error( RtAudioError::THREAD_ERROR );
\r
4303 stream_.callbackInfo.thread = NULL;
\r
4306 //-----------------------------------------------------------------------------
\r
4308 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4309 unsigned int firstChannel, unsigned int sampleRate,
\r
4310 RtAudioFormat format, unsigned int* bufferSize,
\r
4311 RtAudio::StreamOptions* options )
\r
4313 bool methodResult = FAILURE;
\r
4314 unsigned int captureDeviceCount = 0;
\r
4315 unsigned int renderDeviceCount = 0;
\r
4317 IMMDeviceCollection* captureDevices = NULL;
\r
4318 IMMDeviceCollection* renderDevices = NULL;
\r
4319 IMMDevice* devicePtr = NULL;
\r
4320 WAVEFORMATEX* deviceFormat = NULL;
\r
4322 // create API Handle if not already created
\r
4323 if ( !stream_.apiHandle )
\r
4324 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4326 // count capture devices
\r
4327 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4328 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" );
\r
4330 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4331 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" );
\r
4333 // count render devices
\r
4334 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4335 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" );
\r
4337 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4338 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" );
\r
4340 // validate device index
\r
4341 if ( device >= captureDeviceCount + renderDeviceCount )
\r
4342 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Invalid device index" );
\r
4344 // determine whether index falls within capture or render devices
\r
4345 if ( device < captureDeviceCount ) {
\r
4346 if ( mode != INPUT )
\r
4347 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Capture device selected as output device" );
\r
4349 // retrieve captureAudioClient from devicePtr
\r
4350 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4352 hr = captureDevices->Item( device, &devicePtr );
\r
4353 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device handle" );
\r
4355 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4356 NULL, ( void** ) &captureAudioClient );
\r
4357 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );
\r
4359 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4360 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4362 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4363 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4366 if ( mode != OUTPUT )
\r
4367 EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Render device selected as input device" );
\r
4369 // retrieve renderAudioClient from devicePtr
\r
4370 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4372 hr = renderDevices->Item( device - captureDeviceCount, &devicePtr );
\r
4373 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device handle" );
\r
4375 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4376 NULL, ( void** ) &renderAudioClient );
\r
4377 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );
\r
4379 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4380 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4382 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4383 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4386 // fill stream data
\r
4387 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4388 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4389 stream_.mode = DUPLEX;
\r
4392 stream_.mode = mode;
\r
4395 stream_.device[mode] = device;
\r
4396 stream_.state = STREAM_STOPPED;
\r
4397 stream_.doByteSwap[mode] = false;
\r
4398 stream_.sampleRate = sampleRate;
\r
4399 stream_.bufferSize = *bufferSize;
\r
4400 stream_.nBuffers = 1;
\r
4401 stream_.nUserChannels[mode] = channels;
\r
4402 stream_.channelOffset[mode] = firstChannel;
\r
4403 stream_.userFormat = format;
\r
4404 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4406 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4407 stream_.userInterleaved = false;
\r
4409 stream_.userInterleaved = true;
\r
4410 stream_.deviceInterleaved[mode] = true;
\r
4412 // Set flags for buffer conversion.
\r
4413 stream_.doConvertBuffer[mode] = false;
\r
4414 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4415 stream_.doConvertBuffer[mode] = true;
\r
4416 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4417 stream_.nUserChannels[mode] > 1 )
\r
4418 stream_.doConvertBuffer[mode] = true;
\r
4420 if ( stream_.doConvertBuffer[mode] )
\r
4421 setConvertInfo( mode, 0 );
\r
4423 // Allocate necessary internal buffers
\r
4424 unsigned int bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4426 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4427 if ( !stream_.userBuffer[mode] )
\r
4428 EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating user buffer memory" );
\r
4430 if ( stream_.doConvertBuffer[mode] && !stream_.deviceBuffer ) {
\r
4431 unsigned int deviceBufferSize = max( stream_.nUserChannels[INPUT] * stream_.bufferSize * formatBytes( stream_.userFormat ),
\r
4432 stream_.nUserChannels[OUTPUT] * stream_.bufferSize * formatBytes( stream_.userFormat ) );
\r
4434 stream_.deviceBuffer = ( char* ) calloc( deviceBufferSize, 1 );
\r
4435 if ( !stream_.deviceBuffer )
\r
4436 EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating device buffer memory" );
\r
4439 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4440 stream_.callbackInfo.priority = 15;
\r
4442 stream_.callbackInfo.priority = 0;
\r
4444 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4445 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4447 methodResult = SUCCESS;
\r
4452 SAFE_RELEASE( captureDevices );
\r
4453 SAFE_RELEASE( renderDevices );
\r
4454 SAFE_RELEASE( devicePtr );
\r
4456 CoTaskMemFree( deviceFormat );
\r
4458 // if method failed, close the stream
\r
4459 if ( methodResult == FAILURE )
\r
4462 return methodResult;
\r
4465 //=============================================================================
\r
4467 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4470 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4475 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4478 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4483 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4486 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4491 //-----------------------------------------------------------------------------
\r
4493 void RtApiWasapi::wasapiThread()
\r
4495 // as this is a new thread, we must CoInitialize it
\r
4496 CoInitialize( NULL );
\r
4500 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4501 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4502 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4503 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4504 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4505 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4507 WAVEFORMATEX* captureFormat = NULL;
\r
4508 WAVEFORMATEX* renderFormat = NULL;
\r
4509 float captureSrRatio = 0.0f;
\r
4510 float renderSrRatio = 0.0f;
\r
4511 WasapiBuffer captureBuffer;
\r
4512 WasapiBuffer renderBuffer;
\r
4514 // Attempt to assign "Pro Audio" characteristic to thread
\r
4515 HMODULE AvrtDll = LoadLibrary( "AVRT.dll" );
\r
4517 DWORD taskIndex = 0;
\r
4518 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4519 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4520 FreeLibrary( AvrtDll );
\r
4523 // start capture stream if applicable
\r
4524 if ( captureAudioClient ) {
\r
4525 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4526 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4528 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4530 // initialize capture stream according to desire buffer size
\r
4531 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4532 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4534 if ( !captureClient ) {
\r
4535 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4536 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4537 desiredBufferPeriod,
\r
4538 desiredBufferPeriod,
\r
4541 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize capture audio client" );
\r
4543 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4544 ( void** ) &captureClient );
\r
4545 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture client handle" );
\r
4547 // configure captureEvent to trigger on every available capture buffer
\r
4548 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4549 if ( !captureEvent )
\r
4550 EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create capture event" );
\r
4552 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4553 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set capture event handle" );
\r
4555 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4556 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4559 unsigned int inBufferSize = 0;
\r
4560 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4561 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get capture buffer size" );
\r
4563 // scale outBufferSize according to stream->user sample rate ratio
\r
4564 // (outBufferSize must be a multiple of the input channel count)
\r
4565 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio );
\r
4566 if ( outBufferSize % stream_.nDeviceChannels[INPUT] )
\r
4567 outBufferSize += stream_.nDeviceChannels[INPUT] - ( outBufferSize % stream_.nDeviceChannels[INPUT] );
\r
4569 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4571 // set captureBuffer size
\r
4572 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4574 // reset the capture stream
\r
4575 hr = captureAudioClient->Reset();
\r
4576 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset capture stream" );
\r
4578 // start the capture stream
\r
4579 hr = captureAudioClient->Start();
\r
4580 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start capture stream" );
\r
4583 // start render stream if applicable
\r
4584 if ( renderAudioClient ) {
\r
4585 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4586 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );
\r
4588 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4590 // initialize render stream according to desire buffer size
\r
4591 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4592 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4594 if ( !renderClient ) {
\r
4595 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4596 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4597 desiredBufferPeriod,
\r
4598 desiredBufferPeriod,
\r
4601 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize render audio client" );
\r
4603 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4604 ( void** ) &renderClient );
\r
4605 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render client handle" );
\r
4607 // configure renderEvent to trigger on every available render buffer
\r
4608 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4609 if ( !renderEvent )
\r
4610 EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create render event" );
\r
4612 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4613 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set render event handle" );
\r
4615 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4616 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4619 unsigned int outBufferSize = 0;
\r
4620 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4621 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get render buffer size" );
\r
4623 // scale inBufferSize according to user->stream sample rate ratio
\r
4624 // (inBufferSize must be a multiple of the output channel count)
\r
4625 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio );
\r
4626 if ( inBufferSize % stream_.nDeviceChannels[OUTPUT] ) {
\r
4627 inBufferSize += stream_.nDeviceChannels[OUTPUT] - ( inBufferSize % stream_.nDeviceChannels[OUTPUT] );
\r
4630 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4632 // set renderBuffer size
\r
4633 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4635 // reset the render stream
\r
4636 hr = renderAudioClient->Reset();
\r
4637 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset render stream" );
\r
4639 // start the render stream
\r
4640 hr = renderAudioClient->Start();
\r
4641 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start render stream" );
\r
4644 // declare local stream variables
\r
4645 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4647 BYTE* streamBuffer = NULL;
\r
4648 unsigned long captureFlags = 0;
\r
4650 unsigned int bufferFrameCount = 0;
\r
4651 unsigned int numFramesPadding = 0;
\r
4652 unsigned int convBufferSize = 0;
\r
4654 bool callbackPushed = false;
\r
4655 bool callbackPulled = false;
\r
4656 bool callbackStopped = false;
\r
4658 int callbackResult = 0;
\r
4660 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4661 char* convBuffer = NULL;
\r
4663 if ( stream_.mode == INPUT ) {
\r
4664 convBuffer = ( char* ) malloc( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio * formatBytes( stream_.deviceFormat[INPUT] ) ) );
\r
4666 else if ( stream_.mode == OUTPUT ) {
\r
4667 convBuffer = ( char* ) malloc( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio * formatBytes( stream_.deviceFormat[OUTPUT] ) ) );
\r
4669 else if ( stream_.mode == DUPLEX ) {
\r
4670 convBuffer = ( char* ) malloc( max( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio * formatBytes( stream_.deviceFormat[INPUT] ) ),
\r
4671 ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio * formatBytes( stream_.deviceFormat[OUTPUT] ) ) ) );
\r
4674 // stream process loop
\r
4675 while ( stream_.state != STREAM_STOPPING ) {
\r
4676 if ( !callbackPulled ) {
\r
4679 // 1. Pull callback buffer from inputBuffer
\r
4680 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4681 // Convert callback buffer to user format
\r
4683 if ( captureAudioClient ) {
\r
4684 // Pull callback buffer from inputBuffer
\r
4685 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4686 ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio ),
\r
4687 stream_.deviceFormat[INPUT] );
\r
4689 if ( callbackPulled ) {
\r
4690 // Convert callback buffer to user sample rate and channel count
\r
4691 convertBufferWasapi( stream_.deviceBuffer,
\r
4693 stream_.nDeviceChannels[INPUT],
\r
4694 stream_.nUserChannels[INPUT],
\r
4695 captureFormat->nSamplesPerSec,
\r
4696 stream_.sampleRate,
\r
4697 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4699 stream_.deviceFormat[INPUT] );
\r
4701 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4702 // Convert callback buffer to user format
\r
4703 convertBuffer( stream_.userBuffer[INPUT],
\r
4704 stream_.deviceBuffer,
\r
4705 stream_.convertInfo[INPUT] );
\r
4708 // no conversion, simple copy deviceBuffer to userBuffer
\r
4709 memcpy( stream_.userBuffer[INPUT],
\r
4710 stream_.deviceBuffer,
\r
4711 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4716 // if there is no capture stream, set callbackPulled flag
\r
4717 callbackPulled = true;
\r
4720 // Execute Callback
\r
4721 // ================
\r
4722 // 1. Execute user callback method
\r
4723 // 2. Handle return value from callback
\r
4725 // if callback has not requested the stream to stop
\r
4726 if ( callbackPulled && !callbackStopped ) {
\r
4727 // Execute user callback method
\r
4728 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4729 stream_.userBuffer[INPUT],
\r
4730 stream_.bufferSize,
\r
4732 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4733 stream_.callbackInfo.userData );
\r
4735 // Handle return value from callback
\r
4736 if ( callbackResult == 1 ) {
\r
4737 // instantiate a thread to stop this thread
\r
4738 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, NULL, NULL );
\r
4740 if ( !threadHandle ) {
\r
4741 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream stop thread" );
\r
4743 else if ( !CloseHandle( threadHandle ) ) {
\r
4744 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream stop thread handle" );
\r
4747 callbackStopped = true;
\r
4749 else if ( callbackResult == 2 ) {
\r
4750 // instantiate a thread to stop this thread
\r
4751 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, NULL, NULL );
\r
4753 if ( !threadHandle ) {
\r
4754 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream abort thread" );
\r
4756 else if ( !CloseHandle( threadHandle ) ) {
\r
4757 EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream abort thread handle" );
\r
4760 callbackStopped = true;
\r
4765 // Callback Output
\r
4766 // ===============
\r
4767 // 1. Convert callback buffer to stream format
\r
4768 // 2. Convert callback buffer to stream sample rate and channel count
\r
4769 // 3. Push callback buffer into outputBuffer
\r
4771 if ( renderAudioClient && callbackPulled ) {
\r
4772 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4773 // Convert callback buffer to stream format
\r
4774 convertBuffer( stream_.deviceBuffer,
\r
4775 stream_.userBuffer[OUTPUT],
\r
4776 stream_.convertInfo[OUTPUT] );
\r
4778 // Convert callback buffer to stream sample rate and channel count
\r
4779 convertBufferWasapi( convBuffer,
\r
4780 stream_.deviceBuffer,
\r
4781 stream_.nUserChannels[OUTPUT],
\r
4782 stream_.nDeviceChannels[OUTPUT],
\r
4783 stream_.sampleRate,
\r
4784 renderFormat->nSamplesPerSec,
\r
4785 stream_.bufferSize,
\r
4787 stream_.deviceFormat[OUTPUT] );
\r
4790 // Convert callback buffer to stream sample rate and channel count
\r
4791 convertBufferWasapi( convBuffer,
\r
4792 stream_.userBuffer[OUTPUT],
\r
4793 stream_.nUserChannels[OUTPUT],
\r
4794 stream_.nDeviceChannels[OUTPUT],
\r
4795 stream_.sampleRate,
\r
4796 renderFormat->nSamplesPerSec,
\r
4797 stream_.bufferSize,
\r
4799 stream_.deviceFormat[OUTPUT] );
\r
4802 // Push callback buffer into outputBuffer
\r
4803 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4804 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4805 stream_.deviceFormat[OUTPUT] );
\r
4810 // 1. Get capture buffer from stream
\r
4811 // 2. Push capture buffer into inputBuffer
\r
4812 // 3. If 2. was successful: Release capture buffer
\r
4814 if ( captureAudioClient ) {
\r
4815 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4816 if ( !callbackPulled ) {
\r
4817 WaitForSingleObject( captureEvent, INFINITE );
\r
4820 // Get capture buffer from stream
\r
4821 hr = captureClient->GetBuffer( &streamBuffer,
\r
4822 &bufferFrameCount,
\r
4823 &captureFlags, NULL, NULL );
\r
4824 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture buffer" );
\r
4826 if ( bufferFrameCount != 0 ) {
\r
4827 // Push capture buffer into inputBuffer
\r
4828 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4829 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4830 stream_.deviceFormat[INPUT] ) )
\r
4832 // Release capture buffer
\r
4833 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4834 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );
\r
4838 // Inform WASAPI that capture was unsuccessful
\r
4839 hr = captureClient->ReleaseBuffer( 0 );
\r
4840 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );
\r
4845 // Inform WASAPI that capture was unsuccessful
\r
4846 hr = captureClient->ReleaseBuffer( 0 );
\r
4847 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );
\r
4853 // 1. Get render buffer from stream
\r
4854 // 2. Pull next buffer from outputBuffer
\r
4855 // 3. If 2. was successful: Fill render buffer with next buffer
\r
4856 // Release render buffer
\r
4858 if ( renderAudioClient ) {
\r
4859 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
4860 if ( callbackPulled && !callbackPushed ) {
\r
4861 WaitForSingleObject( renderEvent, INFINITE );
\r
4864 // Get render buffer from stream
\r
4865 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
4866 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer size" );
\r
4868 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
4869 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer padding" );
\r
4871 bufferFrameCount -= numFramesPadding;
\r
4873 if ( bufferFrameCount != 0 ) {
\r
4874 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
4875 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer" );
\r
4877 // Pull next buffer from outputBuffer
\r
4878 // Fill render buffer with next buffer
\r
4879 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
4880 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
4881 stream_.deviceFormat[OUTPUT] ) )
\r
4883 // Release render buffer
\r
4884 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
4885 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );
\r
4889 // Inform WASAPI that render was unsuccessful
\r
4890 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
4891 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );
\r
4896 // Inform WASAPI that render was unsuccessful
\r
4897 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
4898 EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );
\r
4902 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
4903 if ( callbackPushed ) {
\r
4904 callbackPulled = false;
\r
4907 // tick stream time
\r
4908 RtApi::tickStreamTime();
\r
4913 CoTaskMemFree( captureFormat );
\r
4914 CoTaskMemFree( renderFormat );
\r
4916 delete convBuffer;
\r
4920 // update stream state
\r
4921 stream_.state = STREAM_STOPPED;
\r
4924 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
4928 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
4930 // Modified by Robin Davies, October 2005
\r
4931 // - Improvements to DirectX pointer chasing.
\r
4932 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
4933 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
4934 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
4935 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
4937 #include <dsound.h>
\r
4938 #include <assert.h>
\r
4939 #include <algorithm>
\r
4941 #if defined(__MINGW32__)
\r
4942 // missing from latest mingw winapi
\r
4943 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
4944 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
4945 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
4946 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
4949 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
4951 #ifdef _MSC_VER // if Microsoft Visual C++
\r
4952 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
4955 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
4957 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
4958 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
4959 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
4960 return pointer >= earlierPointer && pointer < laterPointer;
\r
4963 // A structure to hold various information related to the DirectSound
\r
4964 // API implementation.
\r
4966 unsigned int drainCounter; // Tracks callback counts when draining
\r
4967 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
4971 UINT bufferPointer[2];
\r
4972 DWORD dsBufferSize[2];
\r
4973 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
4977 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
4980 // Declarations for utility functions, callbacks, and structures
\r
4981 // specific to the DirectSound implementation.
\r
4982 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4983 LPCTSTR description,
\r
4985 LPVOID lpContext );
\r
4987 static const char* getErrorString( int code );
\r
4989 static unsigned __stdcall callbackHandler( void *ptr );
\r
4998 : found(false) { validId[0] = false; validId[1] = false; }
\r
5001 struct DsProbeData {
\r
5003 std::vector<struct DsDevice>* dsDevices;
\r
5006 RtApiDs :: RtApiDs()
\r
5008 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5009 // accept whatever the mainline chose for a threading model.
\r
5010 coInitialized_ = false;
\r
5011 HRESULT hr = CoInitialize( NULL );
\r
5012 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5015 RtApiDs :: ~RtApiDs()
\r
5017 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5018 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5021 // The DirectSound default output is always the first device.
\r
5022 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5027 // The DirectSound default input is always the first input device,
\r
5028 // which is the first capture device enumerated.
\r
5029 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5034 unsigned int RtApiDs :: getDeviceCount( void )
\r
5036 // Set query flag for previously found devices to false, so that we
\r
5037 // can check for any devices that have disappeared.
\r
5038 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5039 dsDevices[i].found = false;
\r
5041 // Query DirectSound devices.
\r
5042 struct DsProbeData probeInfo;
\r
5043 probeInfo.isInput = false;
\r
5044 probeInfo.dsDevices = &dsDevices;
\r
5045 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5046 if ( FAILED( result ) ) {
\r
5047 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5048 errorText_ = errorStream_.str();
\r
5049 error( RtAudioError::WARNING );
\r
5052 // Query DirectSoundCapture devices.
\r
5053 probeInfo.isInput = true;
\r
5054 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5055 if ( FAILED( result ) ) {
\r
5056 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5057 errorText_ = errorStream_.str();
\r
5058 error( RtAudioError::WARNING );
\r
5061 // Clean out any devices that may have disappeared.
\r
5062 std::vector< int > indices;
\r
5063 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5064 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5065 unsigned int nErased = 0;
\r
5066 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5067 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5069 return static_cast<unsigned int>(dsDevices.size());
\r
5072 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5074 RtAudio::DeviceInfo info;
\r
5075 info.probed = false;
\r
5077 if ( dsDevices.size() == 0 ) {
\r
5078 // Force a query of all devices
\r
5080 if ( dsDevices.size() == 0 ) {
\r
5081 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5082 error( RtAudioError::INVALID_USE );
\r
5087 if ( device >= dsDevices.size() ) {
\r
5088 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5089 error( RtAudioError::INVALID_USE );
\r
5094 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5096 LPDIRECTSOUND output;
\r
5098 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5099 if ( FAILED( result ) ) {
\r
5100 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5101 errorText_ = errorStream_.str();
\r
5102 error( RtAudioError::WARNING );
\r
5106 outCaps.dwSize = sizeof( outCaps );
\r
5107 result = output->GetCaps( &outCaps );
\r
5108 if ( FAILED( result ) ) {
\r
5109 output->Release();
\r
5110 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5111 errorText_ = errorStream_.str();
\r
5112 error( RtAudioError::WARNING );
\r
5116 // Get output channel information.
\r
5117 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5119 // Get sample rate information.
\r
5120 info.sampleRates.clear();
\r
5121 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5122 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5123 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5124 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5127 // Get format information.
\r
5128 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5129 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5131 output->Release();
\r
5133 if ( getDefaultOutputDevice() == device )
\r
5134 info.isDefaultOutput = true;
\r
5136 if ( dsDevices[ device ].validId[1] == false ) {
\r
5137 info.name = dsDevices[ device ].name;
\r
5138 info.probed = true;
\r
5144 LPDIRECTSOUNDCAPTURE input;
\r
5145 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5146 if ( FAILED( result ) ) {
\r
5147 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5148 errorText_ = errorStream_.str();
\r
5149 error( RtAudioError::WARNING );
\r
5154 inCaps.dwSize = sizeof( inCaps );
\r
5155 result = input->GetCaps( &inCaps );
\r
5156 if ( FAILED( result ) ) {
\r
5158 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5159 errorText_ = errorStream_.str();
\r
5160 error( RtAudioError::WARNING );
\r
5164 // Get input channel information.
\r
5165 info.inputChannels = inCaps.dwChannels;
\r
5167 // Get sample rate and format information.
\r
5168 std::vector<unsigned int> rates;
\r
5169 if ( inCaps.dwChannels >= 2 ) {
\r
5170 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5171 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5172 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5173 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5174 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5175 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5176 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5177 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5179 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5180 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5181 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5182 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5183 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5185 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5186 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5187 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5188 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5189 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5192 else if ( inCaps.dwChannels == 1 ) {
\r
5193 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5194 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5195 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5196 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5197 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5198 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5199 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5200 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5202 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5203 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5204 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5205 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5206 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5208 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5209 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5210 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5211 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5212 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5215 else info.inputChannels = 0; // technically, this would be an error
\r
5219 if ( info.inputChannels == 0 ) return info;
\r
5221 // Copy the supported rates to the info structure but avoid duplication.
\r
5223 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5225 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5226 if ( rates[i] == info.sampleRates[j] ) {
\r
5231 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5233 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5235 // If device opens for both playback and capture, we determine the channels.
\r
5236 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5237 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5239 if ( device == 0 ) info.isDefaultInput = true;
\r
5241 // Copy name and return.
\r
5242 info.name = dsDevices[ device ].name;
\r
5243 info.probed = true;
\r
5247 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5248 unsigned int firstChannel, unsigned int sampleRate,
\r
5249 RtAudioFormat format, unsigned int *bufferSize,
\r
5250 RtAudio::StreamOptions *options )
\r
5252 if ( channels + firstChannel > 2 ) {
\r
5253 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5257 size_t nDevices = dsDevices.size();
\r
5258 if ( nDevices == 0 ) {
\r
5259 // This should not happen because a check is made before this function is called.
\r
5260 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5264 if ( device >= nDevices ) {
\r
5265 // This should not happen because a check is made before this function is called.
\r
5266 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5270 if ( mode == OUTPUT ) {
\r
5271 if ( dsDevices[ device ].validId[0] == false ) {
\r
5272 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5273 errorText_ = errorStream_.str();
\r
5277 else { // mode == INPUT
\r
5278 if ( dsDevices[ device ].validId[1] == false ) {
\r
5279 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5280 errorText_ = errorStream_.str();
\r
5285 // According to a note in PortAudio, using GetDesktopWindow()
\r
5286 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5287 // that occur when the application's window is not the foreground
\r
5288 // window. Also, if the application window closes before the
\r
5289 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5290 // problems when using GetDesktopWindow() but it seems fine now
\r
5291 // (January 2010). I'll leave it commented here.
\r
5292 // HWND hWnd = GetForegroundWindow();
\r
5293 HWND hWnd = GetDesktopWindow();
\r
5295 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5296 // two. This is a judgement call and a value of two is probably too
\r
5297 // low for capture, but it should work for playback.
\r
5299 if ( options ) nBuffers = options->numberOfBuffers;
\r
5300 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5301 if ( nBuffers < 2 ) nBuffers = 3;
\r
5303 // Check the lower range of the user-specified buffer size and set
\r
5304 // (arbitrarily) to a lower bound of 32.
\r
5305 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5307 // Create the wave format structure. The data format setting will
\r
5308 // be determined later.
\r
5309 WAVEFORMATEX waveFormat;
\r
5310 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5311 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5312 waveFormat.nChannels = channels + firstChannel;
\r
5313 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5315 // Determine the device buffer size. By default, we'll use the value
\r
5316 // defined above (32K), but we will grow it to make allowances for
\r
5317 // very large software buffer sizes.
\r
5318 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5319 DWORD dsPointerLeadTime = 0;
\r
5321 void *ohandle = 0, *bhandle = 0;
\r
5323 if ( mode == OUTPUT ) {
\r
5325 LPDIRECTSOUND output;
\r
5326 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5327 if ( FAILED( result ) ) {
\r
5328 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5329 errorText_ = errorStream_.str();
\r
5334 outCaps.dwSize = sizeof( outCaps );
\r
5335 result = output->GetCaps( &outCaps );
\r
5336 if ( FAILED( result ) ) {
\r
5337 output->Release();
\r
5338 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5339 errorText_ = errorStream_.str();
\r
5343 // Check channel information.
\r
5344 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5345 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5346 errorText_ = errorStream_.str();
\r
5350 // Check format information. Use 16-bit format unless not
\r
5351 // supported or user requests 8-bit.
\r
5352 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5353 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5354 waveFormat.wBitsPerSample = 16;
\r
5355 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5358 waveFormat.wBitsPerSample = 8;
\r
5359 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5361 stream_.userFormat = format;
\r
5363 // Update wave format structure and buffer information.
\r
5364 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5365 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5366 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5368 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5369 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5370 dsBufferSize *= 2;
\r
5372 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5373 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5374 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5375 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5376 if ( FAILED( result ) ) {
\r
5377 output->Release();
\r
5378 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5379 errorText_ = errorStream_.str();
\r
5383 // Even though we will write to the secondary buffer, we need to
\r
5384 // access the primary buffer to set the correct output format
\r
5385 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5386 // buffer description.
\r
5387 DSBUFFERDESC bufferDescription;
\r
5388 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5389 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5390 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5392 // Obtain the primary buffer
\r
5393 LPDIRECTSOUNDBUFFER buffer;
\r
5394 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5395 if ( FAILED( result ) ) {
\r
5396 output->Release();
\r
5397 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5398 errorText_ = errorStream_.str();
\r
5402 // Set the primary DS buffer sound format.
\r
5403 result = buffer->SetFormat( &waveFormat );
\r
5404 if ( FAILED( result ) ) {
\r
5405 output->Release();
\r
5406 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5407 errorText_ = errorStream_.str();
\r
5411 // Setup the secondary DS buffer description.
\r
5412 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5413 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5414 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5415 DSBCAPS_GLOBALFOCUS |
\r
5416 DSBCAPS_GETCURRENTPOSITION2 |
\r
5417 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5418 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5419 bufferDescription.lpwfxFormat = &waveFormat;
\r
5421 // Try to create the secondary DS buffer. If that doesn't work,
\r
5422 // try to use software mixing. Otherwise, there's a problem.
\r
5423 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5424 if ( FAILED( result ) ) {
\r
5425 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5426 DSBCAPS_GLOBALFOCUS |
\r
5427 DSBCAPS_GETCURRENTPOSITION2 |
\r
5428 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5429 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5430 if ( FAILED( result ) ) {
\r
5431 output->Release();
\r
5432 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5433 errorText_ = errorStream_.str();
\r
5438 // Get the buffer size ... might be different from what we specified.
\r
5440 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5441 result = buffer->GetCaps( &dsbcaps );
\r
5442 if ( FAILED( result ) ) {
\r
5443 output->Release();
\r
5444 buffer->Release();
\r
5445 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5446 errorText_ = errorStream_.str();
\r
5450 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5452 // Lock the DS buffer
\r
5455 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5456 if ( FAILED( result ) ) {
\r
5457 output->Release();
\r
5458 buffer->Release();
\r
5459 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5460 errorText_ = errorStream_.str();
\r
5464 // Zero the DS buffer
\r
5465 ZeroMemory( audioPtr, dataLen );
\r
5467 // Unlock the DS buffer
\r
5468 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5469 if ( FAILED( result ) ) {
\r
5470 output->Release();
\r
5471 buffer->Release();
\r
5472 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5473 errorText_ = errorStream_.str();
\r
5477 ohandle = (void *) output;
\r
5478 bhandle = (void *) buffer;
\r
5481 if ( mode == INPUT ) {
\r
5483 LPDIRECTSOUNDCAPTURE input;
\r
5484 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5485 if ( FAILED( result ) ) {
\r
5486 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5487 errorText_ = errorStream_.str();
\r
5492 inCaps.dwSize = sizeof( inCaps );
\r
5493 result = input->GetCaps( &inCaps );
\r
5494 if ( FAILED( result ) ) {
\r
5496 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5497 errorText_ = errorStream_.str();
\r
5501 // Check channel information.
\r
5502 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5503 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5507 // Check format information. Use 16-bit format unless user
\r
5508 // requests 8-bit.
\r
5509 DWORD deviceFormats;
\r
5510 if ( channels + firstChannel == 2 ) {
\r
5511 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5512 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5513 waveFormat.wBitsPerSample = 8;
\r
5514 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5516 else { // assume 16-bit is supported
\r
5517 waveFormat.wBitsPerSample = 16;
\r
5518 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5521 else { // channel == 1
\r
5522 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5523 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5524 waveFormat.wBitsPerSample = 8;
\r
5525 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5527 else { // assume 16-bit is supported
\r
5528 waveFormat.wBitsPerSample = 16;
\r
5529 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5532 stream_.userFormat = format;
\r
5534 // Update wave format structure and buffer information.
\r
5535 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5536 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5537 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5539 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5540 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5541 dsBufferSize *= 2;
\r
5543 // Setup the secondary DS buffer description.
\r
5544 DSCBUFFERDESC bufferDescription;
\r
5545 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5546 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5547 bufferDescription.dwFlags = 0;
\r
5548 bufferDescription.dwReserved = 0;
\r
5549 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5550 bufferDescription.lpwfxFormat = &waveFormat;
\r
5552 // Create the capture buffer.
\r
5553 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5554 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5555 if ( FAILED( result ) ) {
\r
5557 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5558 errorText_ = errorStream_.str();
\r
5562 // Get the buffer size ... might be different from what we specified.
\r
5563 DSCBCAPS dscbcaps;
\r
5564 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5565 result = buffer->GetCaps( &dscbcaps );
\r
5566 if ( FAILED( result ) ) {
\r
5568 buffer->Release();
\r
5569 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5570 errorText_ = errorStream_.str();
\r
5574 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5576 // NOTE: We could have a problem here if this is a duplex stream
\r
5577 // and the play and capture hardware buffer sizes are different
\r
5578 // (I'm actually not sure if that is a problem or not).
\r
5579 // Currently, we are not verifying that.
\r
5581 // Lock the capture buffer
\r
5584 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5585 if ( FAILED( result ) ) {
\r
5587 buffer->Release();
\r
5588 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5589 errorText_ = errorStream_.str();
\r
5593 // Zero the buffer
\r
5594 ZeroMemory( audioPtr, dataLen );
\r
5596 // Unlock the buffer
\r
5597 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5598 if ( FAILED( result ) ) {
\r
5600 buffer->Release();
\r
5601 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5602 errorText_ = errorStream_.str();
\r
5606 ohandle = (void *) input;
\r
5607 bhandle = (void *) buffer;
\r
5610 // Set various stream parameters
\r
5611 DsHandle *handle = 0;
\r
5612 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5613 stream_.nUserChannels[mode] = channels;
\r
5614 stream_.bufferSize = *bufferSize;
\r
5615 stream_.channelOffset[mode] = firstChannel;
\r
5616 stream_.deviceInterleaved[mode] = true;
\r
5617 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5618 else stream_.userInterleaved = true;
\r
5620 // Set flag for buffer conversion
\r
5621 stream_.doConvertBuffer[mode] = false;
\r
5622 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5623 stream_.doConvertBuffer[mode] = true;
\r
5624 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5625 stream_.doConvertBuffer[mode] = true;
\r
5626 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5627 stream_.nUserChannels[mode] > 1 )
\r
5628 stream_.doConvertBuffer[mode] = true;
\r
5630 // Allocate necessary internal buffers
\r
5631 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5632 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5633 if ( stream_.userBuffer[mode] == NULL ) {
\r
5634 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5638 if ( stream_.doConvertBuffer[mode] ) {
\r
5640 bool makeBuffer = true;
\r
5641 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5642 if ( mode == INPUT ) {
\r
5643 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5644 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5645 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5649 if ( makeBuffer ) {
\r
5650 bufferBytes *= *bufferSize;
\r
5651 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5652 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5653 if ( stream_.deviceBuffer == NULL ) {
\r
5654 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5660 // Allocate our DsHandle structures for the stream.
\r
5661 if ( stream_.apiHandle == 0 ) {
\r
5663 handle = new DsHandle;
\r
5665 catch ( std::bad_alloc& ) {
\r
5666 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5670 // Create a manual-reset event.
\r
5671 handle->condition = CreateEvent( NULL, // no security
\r
5672 TRUE, // manual-reset
\r
5673 FALSE, // non-signaled initially
\r
5674 NULL ); // unnamed
\r
5675 stream_.apiHandle = (void *) handle;
\r
5678 handle = (DsHandle *) stream_.apiHandle;
\r
5679 handle->id[mode] = ohandle;
\r
5680 handle->buffer[mode] = bhandle;
\r
5681 handle->dsBufferSize[mode] = dsBufferSize;
\r
5682 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5684 stream_.device[mode] = device;
\r
5685 stream_.state = STREAM_STOPPED;
\r
5686 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5687 // We had already set up an output stream.
\r
5688 stream_.mode = DUPLEX;
\r
5690 stream_.mode = mode;
\r
5691 stream_.nBuffers = nBuffers;
\r
5692 stream_.sampleRate = sampleRate;
\r
5694 // Setup the buffer conversion information structure.
\r
5695 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5697 // Setup the callback thread.
\r
5698 if ( stream_.callbackInfo.isRunning == false ) {
\r
5699 unsigned threadId;
\r
5700 stream_.callbackInfo.isRunning = true;
\r
5701 stream_.callbackInfo.object = (void *) this;
\r
5702 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5703 &stream_.callbackInfo, 0, &threadId );
\r
5704 if ( stream_.callbackInfo.thread == 0 ) {
\r
5705 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5709 // Boost DS thread priority
\r
5710 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5716 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5717 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5718 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5719 if ( buffer ) buffer->Release();
\r
5720 object->Release();
\r
5722 if ( handle->buffer[1] ) {
\r
5723 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5724 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5725 if ( buffer ) buffer->Release();
\r
5726 object->Release();
\r
5728 CloseHandle( handle->condition );
\r
5730 stream_.apiHandle = 0;
\r
5733 for ( int i=0; i<2; i++ ) {
\r
5734 if ( stream_.userBuffer[i] ) {
\r
5735 free( stream_.userBuffer[i] );
\r
5736 stream_.userBuffer[i] = 0;
\r
5740 if ( stream_.deviceBuffer ) {
\r
5741 free( stream_.deviceBuffer );
\r
5742 stream_.deviceBuffer = 0;
\r
5745 stream_.state = STREAM_CLOSED;
\r
5749 void RtApiDs :: closeStream()
\r
5751 if ( stream_.state == STREAM_CLOSED ) {
\r
5752 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5753 error( RtAudioError::WARNING );
\r
5757 // Stop the callback thread.
\r
5758 stream_.callbackInfo.isRunning = false;
\r
5759 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5760 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5762 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5764 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5765 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5766 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5769 buffer->Release();
\r
5771 object->Release();
\r
5773 if ( handle->buffer[1] ) {
\r
5774 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5775 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5778 buffer->Release();
\r
5780 object->Release();
\r
5782 CloseHandle( handle->condition );
\r
5784 stream_.apiHandle = 0;
\r
5787 for ( int i=0; i<2; i++ ) {
\r
5788 if ( stream_.userBuffer[i] ) {
\r
5789 free( stream_.userBuffer[i] );
\r
5790 stream_.userBuffer[i] = 0;
\r
5794 if ( stream_.deviceBuffer ) {
\r
5795 free( stream_.deviceBuffer );
\r
5796 stream_.deviceBuffer = 0;
\r
5799 stream_.mode = UNINITIALIZED;
\r
5800 stream_.state = STREAM_CLOSED;
\r
5803 void RtApiDs :: startStream()
\r
5806 if ( stream_.state == STREAM_RUNNING ) {
\r
5807 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5808 error( RtAudioError::WARNING );
\r
5812 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5814 // Increase scheduler frequency on lesser windows (a side-effect of
\r
5815 // increasing timer accuracy). On greater windows (Win2K or later),
\r
5816 // this is already in effect.
\r
5817 timeBeginPeriod( 1 );
\r
5819 buffersRolling = false;
\r
5820 duplexPrerollBytes = 0;
\r
5822 if ( stream_.mode == DUPLEX ) {
\r
5823 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
5824 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
5827 HRESULT result = 0;
\r
5828 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
5830 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5831 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
5832 if ( FAILED( result ) ) {
\r
5833 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
5834 errorText_ = errorStream_.str();
\r
5839 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
5841 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5842 result = buffer->Start( DSCBSTART_LOOPING );
\r
5843 if ( FAILED( result ) ) {
\r
5844 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
5845 errorText_ = errorStream_.str();
\r
5850 handle->drainCounter = 0;
\r
5851 handle->internalDrain = false;
\r
5852 ResetEvent( handle->condition );
\r
5853 stream_.state = STREAM_RUNNING;
\r
5856 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
5859 void RtApiDs :: stopStream()
\r
5862 if ( stream_.state == STREAM_STOPPED ) {
\r
5863 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
5864 error( RtAudioError::WARNING );
\r
5868 HRESULT result = 0;
\r
5871 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5872 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
5873 if ( handle->drainCounter == 0 ) {
\r
5874 handle->drainCounter = 2;
\r
5875 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
5878 stream_.state = STREAM_STOPPED;
\r
5880 // Stop the buffer and clear memory
\r
5881 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5882 result = buffer->Stop();
\r
5883 if ( FAILED( result ) ) {
\r
5884 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
5885 errorText_ = errorStream_.str();
\r
5889 // Lock the buffer and clear it so that if we start to play again,
\r
5890 // we won't have old data playing.
\r
5891 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5892 if ( FAILED( result ) ) {
\r
5893 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
5894 errorText_ = errorStream_.str();
\r
5898 // Zero the DS buffer
\r
5899 ZeroMemory( audioPtr, dataLen );
\r
5901 // Unlock the DS buffer
\r
5902 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5903 if ( FAILED( result ) ) {
\r
5904 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
5905 errorText_ = errorStream_.str();
\r
5909 // If we start playing again, we must begin at beginning of buffer.
\r
5910 handle->bufferPointer[0] = 0;
\r
5913 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
5914 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5918 stream_.state = STREAM_STOPPED;
\r
5920 result = buffer->Stop();
\r
5921 if ( FAILED( result ) ) {
\r
5922 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
5923 errorText_ = errorStream_.str();
\r
5927 // Lock the buffer and clear it so that if we start to play again,
\r
5928 // we won't have old data playing.
\r
5929 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5930 if ( FAILED( result ) ) {
\r
5931 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
5932 errorText_ = errorStream_.str();
\r
5936 // Zero the DS buffer
\r
5937 ZeroMemory( audioPtr, dataLen );
\r
5939 // Unlock the DS buffer
\r
5940 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5941 if ( FAILED( result ) ) {
\r
5942 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
5943 errorText_ = errorStream_.str();
\r
5947 // If we start recording again, we must begin at beginning of buffer.
\r
5948 handle->bufferPointer[1] = 0;
\r
5952 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
5953 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
5956 void RtApiDs :: abortStream()
\r
5959 if ( stream_.state == STREAM_STOPPED ) {
\r
5960 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
5961 error( RtAudioError::WARNING );
\r
5965 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5966 handle->drainCounter = 2;
\r
5971 void RtApiDs :: callbackEvent()
\r
5973 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
5974 Sleep( 50 ); // sleep 50 milliseconds
\r
5978 if ( stream_.state == STREAM_CLOSED ) {
\r
5979 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
5980 error( RtAudioError::WARNING );
\r
5984 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
5985 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5987 // Check if we were draining the stream and signal is finished.
\r
5988 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
5990 stream_.state = STREAM_STOPPING;
\r
5991 if ( handle->internalDrain == false )
\r
5992 SetEvent( handle->condition );
\r
5998 // Invoke user callback to get fresh output data UNLESS we are
\r
5999 // draining stream.
\r
6000 if ( handle->drainCounter == 0 ) {
\r
6001 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6002 double streamTime = getStreamTime();
\r
6003 RtAudioStreamStatus status = 0;
\r
6004 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6005 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6006 handle->xrun[0] = false;
\r
6008 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6009 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6010 handle->xrun[1] = false;
\r
6012 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6013 stream_.bufferSize, streamTime, status, info->userData );
\r
6014 if ( cbReturnValue == 2 ) {
\r
6015 stream_.state = STREAM_STOPPING;
\r
6016 handle->drainCounter = 2;
\r
6020 else if ( cbReturnValue == 1 ) {
\r
6021 handle->drainCounter = 1;
\r
6022 handle->internalDrain = true;
\r
6027 DWORD currentWritePointer, safeWritePointer;
\r
6028 DWORD currentReadPointer, safeReadPointer;
\r
6029 UINT nextWritePointer;
\r
6031 LPVOID buffer1 = NULL;
\r
6032 LPVOID buffer2 = NULL;
\r
6033 DWORD bufferSize1 = 0;
\r
6034 DWORD bufferSize2 = 0;
\r
6039 if ( buffersRolling == false ) {
\r
6040 if ( stream_.mode == DUPLEX ) {
\r
6041 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6043 // It takes a while for the devices to get rolling. As a result,
\r
6044 // there's no guarantee that the capture and write device pointers
\r
6045 // will move in lockstep. Wait here for both devices to start
\r
6046 // rolling, and then set our buffer pointers accordingly.
\r
6047 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6048 // bytes later than the write buffer.
\r
6050 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6051 // take place between the two GetCurrentPosition calls... but I'm
\r
6052 // really not sure how to solve the problem. Temporarily boost to
\r
6053 // Realtime priority, maybe; but I'm not sure what priority the
\r
6054 // DirectSound service threads run at. We *should* be roughly
\r
6055 // within a ms or so of correct.
\r
6057 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6058 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6060 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6062 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6063 if ( FAILED( result ) ) {
\r
6064 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6065 errorText_ = errorStream_.str();
\r
6066 error( RtAudioError::SYSTEM_ERROR );
\r
6069 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6070 if ( FAILED( result ) ) {
\r
6071 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6072 errorText_ = errorStream_.str();
\r
6073 error( RtAudioError::SYSTEM_ERROR );
\r
6077 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6078 if ( FAILED( result ) ) {
\r
6079 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6080 errorText_ = errorStream_.str();
\r
6081 error( RtAudioError::SYSTEM_ERROR );
\r
6084 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6085 if ( FAILED( result ) ) {
\r
6086 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6087 errorText_ = errorStream_.str();
\r
6088 error( RtAudioError::SYSTEM_ERROR );
\r
6091 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6095 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6097 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6098 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6099 handle->bufferPointer[1] = safeReadPointer;
\r
6101 else if ( stream_.mode == OUTPUT ) {
\r
6103 // Set the proper nextWritePosition after initial startup.
\r
6104 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6105 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6106 if ( FAILED( result ) ) {
\r
6107 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6108 errorText_ = errorStream_.str();
\r
6109 error( RtAudioError::SYSTEM_ERROR );
\r
6112 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6113 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6116 buffersRolling = true;
\r
6119 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6121 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6123 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6124 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6125 bufferBytes *= formatBytes( stream_.userFormat );
\r
6126 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6129 // Setup parameters and do buffer conversion if necessary.
\r
6130 if ( stream_.doConvertBuffer[0] ) {
\r
6131 buffer = stream_.deviceBuffer;
\r
6132 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6133 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6134 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6137 buffer = stream_.userBuffer[0];
\r
6138 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6139 bufferBytes *= formatBytes( stream_.userFormat );
\r
6142 // No byte swapping necessary in DirectSound implementation.
\r
6144 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6145 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6147 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6148 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6150 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6151 nextWritePointer = handle->bufferPointer[0];
\r
6153 DWORD endWrite, leadPointer;
\r
6155 // Find out where the read and "safe write" pointers are.
\r
6156 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6157 if ( FAILED( result ) ) {
\r
6158 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6159 errorText_ = errorStream_.str();
\r
6160 error( RtAudioError::SYSTEM_ERROR );
\r
6164 // We will copy our output buffer into the region between
\r
6165 // safeWritePointer and leadPointer. If leadPointer is not
\r
6166 // beyond the next endWrite position, wait until it is.
\r
6167 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6168 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6169 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6170 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6171 endWrite = nextWritePointer + bufferBytes;
\r
6173 // Check whether the entire write region is behind the play pointer.
\r
6174 if ( leadPointer >= endWrite ) break;
\r
6176 // If we are here, then we must wait until the leadPointer advances
\r
6177 // beyond the end of our next write region. We use the
\r
6178 // Sleep() function to suspend operation until that happens.
\r
6179 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6180 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6181 if ( millis < 1.0 ) millis = 1.0;
\r
6182 Sleep( (DWORD) millis );
\r
6185 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6186 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6187 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6188 handle->xrun[0] = true;
\r
6189 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6190 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6191 handle->bufferPointer[0] = nextWritePointer;
\r
6192 endWrite = nextWritePointer + bufferBytes;
\r
6195 // Lock free space in the buffer
\r
6196 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6197 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6198 if ( FAILED( result ) ) {
\r
6199 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6200 errorText_ = errorStream_.str();
\r
6201 error( RtAudioError::SYSTEM_ERROR );
\r
6205 // Copy our buffer into the DS buffer
\r
6206 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6207 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6209 // Update our buffer offset and unlock sound buffer
\r
6210 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6211 if ( FAILED( result ) ) {
\r
6212 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6213 errorText_ = errorStream_.str();
\r
6214 error( RtAudioError::SYSTEM_ERROR );
\r
6217 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6218 handle->bufferPointer[0] = nextWritePointer;
\r
6220 if ( handle->drainCounter ) {
\r
6221 handle->drainCounter++;
\r
6226 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6228 // Setup parameters.
\r
6229 if ( stream_.doConvertBuffer[1] ) {
\r
6230 buffer = stream_.deviceBuffer;
\r
6231 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6232 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6235 buffer = stream_.userBuffer[1];
\r
6236 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6237 bufferBytes *= formatBytes( stream_.userFormat );
\r
6240 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6241 long nextReadPointer = handle->bufferPointer[1];
\r
6242 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6244 // Find out where the write and "safe read" pointers are.
\r
6245 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6246 if ( FAILED( result ) ) {
\r
6247 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6248 errorText_ = errorStream_.str();
\r
6249 error( RtAudioError::SYSTEM_ERROR );
\r
6253 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6254 DWORD endRead = nextReadPointer + bufferBytes;
\r
6256 // Handling depends on whether we are INPUT or DUPLEX.
\r
6257 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6258 // then a wait here will drag the write pointers into the forbidden zone.
\r
6260 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6261 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6262 // practical way to sync up the read and write pointers reliably, given the
\r
6263 // the very complex relationship between phase and increment of the read and write
\r
6266 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6267 // provide a pre-roll period of 0.5 seconds in which we return
\r
6268 // zeros from the read buffer while the pointers sync up.
\r
6270 if ( stream_.mode == DUPLEX ) {
\r
6271 if ( safeReadPointer < endRead ) {
\r
6272 if ( duplexPrerollBytes <= 0 ) {
\r
6273 // Pre-roll time over. Be more agressive.
\r
6274 int adjustment = endRead-safeReadPointer;
\r
6276 handle->xrun[1] = true;
\r
6278 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6279 // and perform fine adjustments later.
\r
6280 // - small adjustments: back off by twice as much.
\r
6281 if ( adjustment >= 2*bufferBytes )
\r
6282 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6284 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6286 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6290 // In pre=roll time. Just do it.
\r
6291 nextReadPointer = safeReadPointer - bufferBytes;
\r
6292 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6294 endRead = nextReadPointer + bufferBytes;
\r
6297 else { // mode == INPUT
\r
6298 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6299 // See comments for playback.
\r
6300 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6301 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6302 if ( millis < 1.0 ) millis = 1.0;
\r
6303 Sleep( (DWORD) millis );
\r
6305 // Wake up and find out where we are now.
\r
6306 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6307 if ( FAILED( result ) ) {
\r
6308 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6309 errorText_ = errorStream_.str();
\r
6310 error( RtAudioError::SYSTEM_ERROR );
\r
6314 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6318 // Lock free space in the buffer
\r
6319 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6320 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6321 if ( FAILED( result ) ) {
\r
6322 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6323 errorText_ = errorStream_.str();
\r
6324 error( RtAudioError::SYSTEM_ERROR );
\r
6328 if ( duplexPrerollBytes <= 0 ) {
\r
6329 // Copy our buffer into the DS buffer
\r
6330 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6331 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6334 memset( buffer, 0, bufferSize1 );
\r
6335 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6336 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6339 // Update our buffer offset and unlock sound buffer
\r
6340 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6341 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6342 if ( FAILED( result ) ) {
\r
6343 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6344 errorText_ = errorStream_.str();
\r
6345 error( RtAudioError::SYSTEM_ERROR );
\r
6348 handle->bufferPointer[1] = nextReadPointer;
\r
6350 // No byte swapping necessary in DirectSound implementation.
\r
6352 // If necessary, convert 8-bit data from unsigned to signed.
\r
6353 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6354 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6356 // Do buffer conversion if necessary.
\r
6357 if ( stream_.doConvertBuffer[1] )
\r
6358 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6362 RtApi::tickStreamTime();
\r
6365 // Definitions for utility functions and callbacks
\r
6366 // specific to the DirectSound implementation.
\r
6368 static unsigned __stdcall callbackHandler( void *ptr )
\r
6370 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6371 RtApiDs *object = (RtApiDs *) info->object;
\r
6372 bool* isRunning = &info->isRunning;
\r
6374 while ( *isRunning == true ) {
\r
6375 object->callbackEvent();
\r
6378 _endthreadex( 0 );
\r
6382 #include "tchar.h"
\r
6384 static std::string convertTChar( LPCTSTR name )
\r
6386 #if defined( UNICODE ) || defined( _UNICODE )
\r
6387 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6388 std::string s( length-1, '\0' );
\r
6389 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6391 std::string s( name );
\r
6397 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6398 LPCTSTR description,
\r
6399 LPCTSTR /*module*/,
\r
6400 LPVOID lpContext )
\r
6402 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6403 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6406 bool validDevice = false;
\r
6407 if ( probeInfo.isInput == true ) {
\r
6409 LPDIRECTSOUNDCAPTURE object;
\r
6411 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6412 if ( hr != DS_OK ) return TRUE;
\r
6414 caps.dwSize = sizeof(caps);
\r
6415 hr = object->GetCaps( &caps );
\r
6416 if ( hr == DS_OK ) {
\r
6417 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6418 validDevice = true;
\r
6420 object->Release();
\r
6424 LPDIRECTSOUND object;
\r
6425 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6426 if ( hr != DS_OK ) return TRUE;
\r
6428 caps.dwSize = sizeof(caps);
\r
6429 hr = object->GetCaps( &caps );
\r
6430 if ( hr == DS_OK ) {
\r
6431 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6432 validDevice = true;
\r
6434 object->Release();
\r
6437 // If good device, then save its name and guid.
\r
6438 std::string name = convertTChar( description );
\r
6439 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6440 if ( lpguid == NULL )
\r
6441 name = "Default Device";
\r
6442 if ( validDevice ) {
\r
6443 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6444 if ( dsDevices[i].name == name ) {
\r
6445 dsDevices[i].found = true;
\r
6446 if ( probeInfo.isInput ) {
\r
6447 dsDevices[i].id[1] = lpguid;
\r
6448 dsDevices[i].validId[1] = true;
\r
6451 dsDevices[i].id[0] = lpguid;
\r
6452 dsDevices[i].validId[0] = true;
\r
6459 device.name = name;
\r
6460 device.found = true;
\r
6461 if ( probeInfo.isInput ) {
\r
6462 device.id[1] = lpguid;
\r
6463 device.validId[1] = true;
\r
6466 device.id[0] = lpguid;
\r
6467 device.validId[0] = true;
\r
6469 dsDevices.push_back( device );
\r
6475 static const char* getErrorString( int code )
\r
6479 case DSERR_ALLOCATED:
\r
6480 return "Already allocated";
\r
6482 case DSERR_CONTROLUNAVAIL:
\r
6483 return "Control unavailable";
\r
6485 case DSERR_INVALIDPARAM:
\r
6486 return "Invalid parameter";
\r
6488 case DSERR_INVALIDCALL:
\r
6489 return "Invalid call";
\r
6491 case DSERR_GENERIC:
\r
6492 return "Generic error";
\r
6494 case DSERR_PRIOLEVELNEEDED:
\r
6495 return "Priority level needed";
\r
6497 case DSERR_OUTOFMEMORY:
\r
6498 return "Out of memory";
\r
6500 case DSERR_BADFORMAT:
\r
6501 return "The sample rate or the channel format is not supported";
\r
6503 case DSERR_UNSUPPORTED:
\r
6504 return "Not supported";
\r
6506 case DSERR_NODRIVER:
\r
6507 return "No driver";
\r
6509 case DSERR_ALREADYINITIALIZED:
\r
6510 return "Already initialized";
\r
6512 case DSERR_NOAGGREGATION:
\r
6513 return "No aggregation";
\r
6515 case DSERR_BUFFERLOST:
\r
6516 return "Buffer lost";
\r
6518 case DSERR_OTHERAPPHASPRIO:
\r
6519 return "Another application already has priority";
\r
6521 case DSERR_UNINITIALIZED:
\r
6522 return "Uninitialized";
\r
6525 return "DirectSound unknown error";
\r
6528 //******************** End of __WINDOWS_DS__ *********************//
\r
6532 #if defined(__LINUX_ALSA__)
\r
6534 #include <alsa/asoundlib.h>
\r
6535 #include <unistd.h>
\r
6537 // A structure to hold various information related to the ALSA API
\r
6538 // implementation.
\r
6539 struct AlsaHandle {
\r
6540 snd_pcm_t *handles[2];
\r
6541 bool synchronized;
\r
6543 pthread_cond_t runnable_cv;
\r
6547 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6550 static void *alsaCallbackHandler( void * ptr );
\r
6552 RtApiAlsa :: RtApiAlsa()
\r
6554 // Nothing to do here.
\r
6557 RtApiAlsa :: ~RtApiAlsa()
\r
6559 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6562 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6564 unsigned nDevices = 0;
\r
6565 int result, subdevice, card;
\r
6567 snd_ctl_t *handle;
\r
6569 // Count cards and devices
\r
6571 snd_card_next( &card );
\r
6572 while ( card >= 0 ) {
\r
6573 sprintf( name, "hw:%d", card );
\r
6574 result = snd_ctl_open( &handle, name, 0 );
\r
6575 if ( result < 0 ) {
\r
6576 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6577 errorText_ = errorStream_.str();
\r
6578 error( RtAudioError::WARNING );
\r
6583 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6584 if ( result < 0 ) {
\r
6585 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6586 errorText_ = errorStream_.str();
\r
6587 error( RtAudioError::WARNING );
\r
6590 if ( subdevice < 0 )
\r
6595 snd_ctl_close( handle );
\r
6596 snd_card_next( &card );
\r
6599 result = snd_ctl_open( &handle, "default", 0 );
\r
6600 if (result == 0) {
\r
6602 snd_ctl_close( handle );
\r
6608 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6610 RtAudio::DeviceInfo info;
\r
6611 info.probed = false;
\r
6613 unsigned nDevices = 0;
\r
6614 int result, subdevice, card;
\r
6616 snd_ctl_t *chandle;
\r
6618 // Count cards and devices
\r
6620 snd_card_next( &card );
\r
6621 while ( card >= 0 ) {
\r
6622 sprintf( name, "hw:%d", card );
\r
6623 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6624 if ( result < 0 ) {
\r
6625 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6626 errorText_ = errorStream_.str();
\r
6627 error( RtAudioError::WARNING );
\r
6632 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6633 if ( result < 0 ) {
\r
6634 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6635 errorText_ = errorStream_.str();
\r
6636 error( RtAudioError::WARNING );
\r
6639 if ( subdevice < 0 ) break;
\r
6640 if ( nDevices == device ) {
\r
6641 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6647 snd_ctl_close( chandle );
\r
6648 snd_card_next( &card );
\r
6651 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6652 if ( result == 0 ) {
\r
6653 if ( nDevices == device ) {
\r
6654 strcpy( name, "default" );
\r
6660 if ( nDevices == 0 ) {
\r
6661 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6662 error( RtAudioError::INVALID_USE );
\r
6666 if ( device >= nDevices ) {
\r
6667 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6668 error( RtAudioError::INVALID_USE );
\r
6674 // If a stream is already open, we cannot probe the stream devices.
\r
6675 // Thus, use the saved results.
\r
6676 if ( stream_.state != STREAM_CLOSED &&
\r
6677 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6678 snd_ctl_close( chandle );
\r
6679 if ( device >= devices_.size() ) {
\r
6680 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6681 error( RtAudioError::WARNING );
\r
6684 return devices_[ device ];
\r
6687 int openMode = SND_PCM_ASYNC;
\r
6688 snd_pcm_stream_t stream;
\r
6689 snd_pcm_info_t *pcminfo;
\r
6690 snd_pcm_info_alloca( &pcminfo );
\r
6691 snd_pcm_t *phandle;
\r
6692 snd_pcm_hw_params_t *params;
\r
6693 snd_pcm_hw_params_alloca( ¶ms );
\r
6695 // First try for playback unless default device (which has subdev -1)
\r
6696 stream = SND_PCM_STREAM_PLAYBACK;
\r
6697 snd_pcm_info_set_stream( pcminfo, stream );
\r
6698 if ( subdevice != -1 ) {
\r
6699 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6700 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6702 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6703 if ( result < 0 ) {
\r
6704 // Device probably doesn't support playback.
\r
6705 goto captureProbe;
\r
6709 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6710 if ( result < 0 ) {
\r
6711 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6712 errorText_ = errorStream_.str();
\r
6713 error( RtAudioError::WARNING );
\r
6714 goto captureProbe;
\r
6717 // The device is open ... fill the parameter structure.
\r
6718 result = snd_pcm_hw_params_any( phandle, params );
\r
6719 if ( result < 0 ) {
\r
6720 snd_pcm_close( phandle );
\r
6721 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6722 errorText_ = errorStream_.str();
\r
6723 error( RtAudioError::WARNING );
\r
6724 goto captureProbe;
\r
6727 // Get output channel information.
\r
6728 unsigned int value;
\r
6729 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6730 if ( result < 0 ) {
\r
6731 snd_pcm_close( phandle );
\r
6732 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6733 errorText_ = errorStream_.str();
\r
6734 error( RtAudioError::WARNING );
\r
6735 goto captureProbe;
\r
6737 info.outputChannels = value;
\r
6738 snd_pcm_close( phandle );
\r
6741 stream = SND_PCM_STREAM_CAPTURE;
\r
6742 snd_pcm_info_set_stream( pcminfo, stream );
\r
6744 // Now try for capture unless default device (with subdev = -1)
\r
6745 if ( subdevice != -1 ) {
\r
6746 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6747 snd_ctl_close( chandle );
\r
6748 if ( result < 0 ) {
\r
6749 // Device probably doesn't support capture.
\r
6750 if ( info.outputChannels == 0 ) return info;
\r
6751 goto probeParameters;
\r
6755 snd_ctl_close( chandle );
\r
6757 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6758 if ( result < 0 ) {
\r
6759 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6760 errorText_ = errorStream_.str();
\r
6761 error( RtAudioError::WARNING );
\r
6762 if ( info.outputChannels == 0 ) return info;
\r
6763 goto probeParameters;
\r
6766 // The device is open ... fill the parameter structure.
\r
6767 result = snd_pcm_hw_params_any( phandle, params );
\r
6768 if ( result < 0 ) {
\r
6769 snd_pcm_close( phandle );
\r
6770 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6771 errorText_ = errorStream_.str();
\r
6772 error( RtAudioError::WARNING );
\r
6773 if ( info.outputChannels == 0 ) return info;
\r
6774 goto probeParameters;
\r
6777 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6778 if ( result < 0 ) {
\r
6779 snd_pcm_close( phandle );
\r
6780 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6781 errorText_ = errorStream_.str();
\r
6782 error( RtAudioError::WARNING );
\r
6783 if ( info.outputChannels == 0 ) return info;
\r
6784 goto probeParameters;
\r
6786 info.inputChannels = value;
\r
6787 snd_pcm_close( phandle );
\r
6789 // If device opens for both playback and capture, we determine the channels.
\r
6790 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6791 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6793 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6794 if ( device == 0 && info.outputChannels > 0 )
\r
6795 info.isDefaultOutput = true;
\r
6796 if ( device == 0 && info.inputChannels > 0 )
\r
6797 info.isDefaultInput = true;
\r
6800 // At this point, we just need to figure out the supported data
\r
6801 // formats and sample rates. We'll proceed by opening the device in
\r
6802 // the direction with the maximum number of channels, or playback if
\r
6803 // they are equal. This might limit our sample rate options, but so
\r
6806 if ( info.outputChannels >= info.inputChannels )
\r
6807 stream = SND_PCM_STREAM_PLAYBACK;
\r
6809 stream = SND_PCM_STREAM_CAPTURE;
\r
6810 snd_pcm_info_set_stream( pcminfo, stream );
\r
6812 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6813 if ( result < 0 ) {
\r
6814 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6815 errorText_ = errorStream_.str();
\r
6816 error( RtAudioError::WARNING );
\r
6820 // The device is open ... fill the parameter structure.
\r
6821 result = snd_pcm_hw_params_any( phandle, params );
\r
6822 if ( result < 0 ) {
\r
6823 snd_pcm_close( phandle );
\r
6824 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6825 errorText_ = errorStream_.str();
\r
6826 error( RtAudioError::WARNING );
\r
6830 // Test our discrete set of sample rate values.
\r
6831 info.sampleRates.clear();
\r
6832 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
6833 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
6834 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
6836 if ( info.sampleRates.size() == 0 ) {
\r
6837 snd_pcm_close( phandle );
\r
6838 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
6839 errorText_ = errorStream_.str();
\r
6840 error( RtAudioError::WARNING );
\r
6844 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
6845 snd_pcm_format_t format;
\r
6846 info.nativeFormats = 0;
\r
6847 format = SND_PCM_FORMAT_S8;
\r
6848 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6849 info.nativeFormats |= RTAUDIO_SINT8;
\r
6850 format = SND_PCM_FORMAT_S16;
\r
6851 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6852 info.nativeFormats |= RTAUDIO_SINT16;
\r
6853 format = SND_PCM_FORMAT_S24;
\r
6854 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6855 info.nativeFormats |= RTAUDIO_SINT24;
\r
6856 format = SND_PCM_FORMAT_S32;
\r
6857 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6858 info.nativeFormats |= RTAUDIO_SINT32;
\r
6859 format = SND_PCM_FORMAT_FLOAT;
\r
6860 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6861 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6862 format = SND_PCM_FORMAT_FLOAT64;
\r
6863 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
6864 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
6866 // Check that we have at least one supported format
\r
6867 if ( info.nativeFormats == 0 ) {
\r
6868 snd_pcm_close( phandle );
\r
6869 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
6870 errorText_ = errorStream_.str();
\r
6871 error( RtAudioError::WARNING );
\r
6875 // Get the device name
\r
6877 result = snd_card_get_name( card, &cardname );
\r
6878 if ( result >= 0 ) {
\r
6879 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
6884 // That's all ... close the device and return
\r
6885 snd_pcm_close( phandle );
\r
6886 info.probed = true;
\r
6890 void RtApiAlsa :: saveDeviceInfo( void )
\r
6894 unsigned int nDevices = getDeviceCount();
\r
6895 devices_.resize( nDevices );
\r
6896 for ( unsigned int i=0; i<nDevices; i++ )
\r
6897 devices_[i] = getDeviceInfo( i );
\r
6900 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6901 unsigned int firstChannel, unsigned int sampleRate,
\r
6902 RtAudioFormat format, unsigned int *bufferSize,
\r
6903 RtAudio::StreamOptions *options )
\r
6906 #if defined(__RTAUDIO_DEBUG__)
\r
6907 snd_output_t *out;
\r
6908 snd_output_stdio_attach(&out, stderr, 0);
\r
6911 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
6913 unsigned nDevices = 0;
\r
6914 int result, subdevice, card;
\r
6916 snd_ctl_t *chandle;
\r
6918 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
6919 snprintf(name, sizeof(name), "%s", "default");
\r
6921 // Count cards and devices
\r
6923 snd_card_next( &card );
\r
6924 while ( card >= 0 ) {
\r
6925 sprintf( name, "hw:%d", card );
\r
6926 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6927 if ( result < 0 ) {
\r
6928 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6929 errorText_ = errorStream_.str();
\r
6934 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6935 if ( result < 0 ) break;
\r
6936 if ( subdevice < 0 ) break;
\r
6937 if ( nDevices == device ) {
\r
6938 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6939 snd_ctl_close( chandle );
\r
6944 snd_ctl_close( chandle );
\r
6945 snd_card_next( &card );
\r
6948 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6949 if ( result == 0 ) {
\r
6950 if ( nDevices == device ) {
\r
6951 strcpy( name, "default" );
\r
6957 if ( nDevices == 0 ) {
\r
6958 // This should not happen because a check is made before this function is called.
\r
6959 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
6963 if ( device >= nDevices ) {
\r
6964 // This should not happen because a check is made before this function is called.
\r
6965 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
6972 // The getDeviceInfo() function will not work for a device that is
\r
6973 // already open. Thus, we'll probe the system before opening a
\r
6974 // stream and save the results for use by getDeviceInfo().
\r
6975 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
6976 this->saveDeviceInfo();
\r
6978 snd_pcm_stream_t stream;
\r
6979 if ( mode == OUTPUT )
\r
6980 stream = SND_PCM_STREAM_PLAYBACK;
\r
6982 stream = SND_PCM_STREAM_CAPTURE;
\r
6984 snd_pcm_t *phandle;
\r
6985 int openMode = SND_PCM_ASYNC;
\r
6986 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
6987 if ( result < 0 ) {
\r
6988 if ( mode == OUTPUT )
\r
6989 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
6991 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
6992 errorText_ = errorStream_.str();
\r
6996 // Fill the parameter structure.
\r
6997 snd_pcm_hw_params_t *hw_params;
\r
6998 snd_pcm_hw_params_alloca( &hw_params );
\r
6999 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7000 if ( result < 0 ) {
\r
7001 snd_pcm_close( phandle );
\r
7002 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7003 errorText_ = errorStream_.str();
\r
7007 #if defined(__RTAUDIO_DEBUG__)
\r
7008 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7009 snd_pcm_hw_params_dump( hw_params, out );
\r
7012 // Set access ... check user preference.
\r
7013 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7014 stream_.userInterleaved = false;
\r
7015 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7016 if ( result < 0 ) {
\r
7017 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7018 stream_.deviceInterleaved[mode] = true;
\r
7021 stream_.deviceInterleaved[mode] = false;
\r
7024 stream_.userInterleaved = true;
\r
7025 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7026 if ( result < 0 ) {
\r
7027 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7028 stream_.deviceInterleaved[mode] = false;
\r
7031 stream_.deviceInterleaved[mode] = true;
\r
7034 if ( result < 0 ) {
\r
7035 snd_pcm_close( phandle );
\r
7036 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7037 errorText_ = errorStream_.str();
\r
7041 // Determine how to set the device format.
\r
7042 stream_.userFormat = format;
\r
7043 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7045 if ( format == RTAUDIO_SINT8 )
\r
7046 deviceFormat = SND_PCM_FORMAT_S8;
\r
7047 else if ( format == RTAUDIO_SINT16 )
\r
7048 deviceFormat = SND_PCM_FORMAT_S16;
\r
7049 else if ( format == RTAUDIO_SINT24 )
\r
7050 deviceFormat = SND_PCM_FORMAT_S24;
\r
7051 else if ( format == RTAUDIO_SINT32 )
\r
7052 deviceFormat = SND_PCM_FORMAT_S32;
\r
7053 else if ( format == RTAUDIO_FLOAT32 )
\r
7054 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7055 else if ( format == RTAUDIO_FLOAT64 )
\r
7056 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7058 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7059 stream_.deviceFormat[mode] = format;
\r
7063 // The user requested format is not natively supported by the device.
\r
7064 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7065 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7066 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7070 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7071 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7072 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7076 deviceFormat = SND_PCM_FORMAT_S32;
\r
7077 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7078 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7082 deviceFormat = SND_PCM_FORMAT_S24;
\r
7083 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7084 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7088 deviceFormat = SND_PCM_FORMAT_S16;
\r
7089 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7090 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7094 deviceFormat = SND_PCM_FORMAT_S8;
\r
7095 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7096 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7100 // If we get here, no supported format was found.
\r
7101 snd_pcm_close( phandle );
\r
7102 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7103 errorText_ = errorStream_.str();
\r
7107 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7108 if ( result < 0 ) {
\r
7109 snd_pcm_close( phandle );
\r
7110 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7111 errorText_ = errorStream_.str();
\r
7115 // Determine whether byte-swaping is necessary.
\r
7116 stream_.doByteSwap[mode] = false;
\r
7117 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7118 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7119 if ( result == 0 )
\r
7120 stream_.doByteSwap[mode] = true;
\r
7121 else if (result < 0) {
\r
7122 snd_pcm_close( phandle );
\r
7123 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7124 errorText_ = errorStream_.str();
\r
7129 // Set the sample rate.
\r
7130 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7131 if ( result < 0 ) {
\r
7132 snd_pcm_close( phandle );
\r
7133 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7134 errorText_ = errorStream_.str();
\r
7138 // Determine the number of channels for this device. We support a possible
\r
7139 // minimum device channel number > than the value requested by the user.
\r
7140 stream_.nUserChannels[mode] = channels;
\r
7141 unsigned int value;
\r
7142 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7143 unsigned int deviceChannels = value;
\r
7144 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7145 snd_pcm_close( phandle );
\r
7146 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7147 errorText_ = errorStream_.str();
\r
7151 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7152 if ( result < 0 ) {
\r
7153 snd_pcm_close( phandle );
\r
7154 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7155 errorText_ = errorStream_.str();
\r
7158 deviceChannels = value;
\r
7159 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7160 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7162 // Set the device channels.
\r
7163 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7164 if ( result < 0 ) {
\r
7165 snd_pcm_close( phandle );
\r
7166 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7167 errorText_ = errorStream_.str();
\r
7171 // Set the buffer (or period) size.
\r
7173 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7174 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7175 if ( result < 0 ) {
\r
7176 snd_pcm_close( phandle );
\r
7177 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7178 errorText_ = errorStream_.str();
\r
7181 *bufferSize = periodSize;
\r
7183 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7184 unsigned int periods = 0;
\r
7185 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7186 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7187 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7188 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7189 if ( result < 0 ) {
\r
7190 snd_pcm_close( phandle );
\r
7191 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7192 errorText_ = errorStream_.str();
\r
7196 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7197 // MUST be the same in both directions!
\r
7198 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7199 snd_pcm_close( phandle );
\r
7200 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7201 errorText_ = errorStream_.str();
\r
7205 stream_.bufferSize = *bufferSize;
\r
7207 // Install the hardware configuration
\r
7208 result = snd_pcm_hw_params( phandle, hw_params );
\r
7209 if ( result < 0 ) {
\r
7210 snd_pcm_close( phandle );
\r
7211 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7212 errorText_ = errorStream_.str();
\r
7216 #if defined(__RTAUDIO_DEBUG__)
\r
7217 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7218 snd_pcm_hw_params_dump( hw_params, out );
\r
7221 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7222 snd_pcm_sw_params_t *sw_params = NULL;
\r
7223 snd_pcm_sw_params_alloca( &sw_params );
\r
7224 snd_pcm_sw_params_current( phandle, sw_params );
\r
7225 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7226 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7227 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7229 // The following two settings were suggested by Theo Veenker
\r
7230 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7231 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7233 // here are two options for a fix
\r
7234 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7235 snd_pcm_uframes_t val;
\r
7236 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7237 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7239 result = snd_pcm_sw_params( phandle, sw_params );
\r
7240 if ( result < 0 ) {
\r
7241 snd_pcm_close( phandle );
\r
7242 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7243 errorText_ = errorStream_.str();
\r
7247 #if defined(__RTAUDIO_DEBUG__)
\r
7248 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7249 snd_pcm_sw_params_dump( sw_params, out );
\r
7252 // Set flags for buffer conversion
\r
7253 stream_.doConvertBuffer[mode] = false;
\r
7254 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7255 stream_.doConvertBuffer[mode] = true;
\r
7256 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7257 stream_.doConvertBuffer[mode] = true;
\r
7258 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7259 stream_.nUserChannels[mode] > 1 )
\r
7260 stream_.doConvertBuffer[mode] = true;
\r
7262 // Allocate the ApiHandle if necessary and then save.
\r
7263 AlsaHandle *apiInfo = 0;
\r
7264 if ( stream_.apiHandle == 0 ) {
\r
7266 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7268 catch ( std::bad_alloc& ) {
\r
7269 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7273 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7274 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7278 stream_.apiHandle = (void *) apiInfo;
\r
7279 apiInfo->handles[0] = 0;
\r
7280 apiInfo->handles[1] = 0;
\r
7283 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7285 apiInfo->handles[mode] = phandle;
\r
7288 // Allocate necessary internal buffers.
\r
7289 unsigned long bufferBytes;
\r
7290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7292 if ( stream_.userBuffer[mode] == NULL ) {
\r
7293 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7297 if ( stream_.doConvertBuffer[mode] ) {
\r
7299 bool makeBuffer = true;
\r
7300 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7301 if ( mode == INPUT ) {
\r
7302 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7303 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7304 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7308 if ( makeBuffer ) {
\r
7309 bufferBytes *= *bufferSize;
\r
7310 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7311 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7312 if ( stream_.deviceBuffer == NULL ) {
\r
7313 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7319 stream_.sampleRate = sampleRate;
\r
7320 stream_.nBuffers = periods;
\r
7321 stream_.device[mode] = device;
\r
7322 stream_.state = STREAM_STOPPED;
\r
7324 // Setup the buffer conversion information structure.
\r
7325 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7327 // Setup thread if necessary.
\r
7328 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7329 // We had already set up an output stream.
\r
7330 stream_.mode = DUPLEX;
\r
7331 // Link the streams if possible.
\r
7332 apiInfo->synchronized = false;
\r
7333 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7334 apiInfo->synchronized = true;
\r
7336 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7337 error( RtAudioError::WARNING );
\r
7341 stream_.mode = mode;
\r
7343 // Setup callback thread.
\r
7344 stream_.callbackInfo.object = (void *) this;
\r
7346 // Set the thread attributes for joinable and realtime scheduling
\r
7347 // priority (optional). The higher priority will only take affect
\r
7348 // if the program is run as root or suid. Note, under Linux
\r
7349 // processes with CAP_SYS_NICE privilege, a user can change
\r
7350 // scheduling policy and priority (thus need not be root). See
\r
7351 // POSIX "capabilities".
\r
7352 pthread_attr_t attr;
\r
7353 pthread_attr_init( &attr );
\r
7354 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7356 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7357 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7358 // We previously attempted to increase the audio callback priority
\r
7359 // to SCHED_RR here via the attributes. However, while no errors
\r
7360 // were reported in doing so, it did not work. So, now this is
\r
7361 // done in the alsaCallbackHandler function.
\r
7362 stream_.callbackInfo.doRealtime = true;
\r
7363 int priority = options->priority;
\r
7364 int min = sched_get_priority_min( SCHED_RR );
\r
7365 int max = sched_get_priority_max( SCHED_RR );
\r
7366 if ( priority < min ) priority = min;
\r
7367 else if ( priority > max ) priority = max;
\r
7368 stream_.callbackInfo.priority = priority;
\r
7372 stream_.callbackInfo.isRunning = true;
\r
7373 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7374 pthread_attr_destroy( &attr );
\r
7376 stream_.callbackInfo.isRunning = false;
\r
7377 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7386 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7387 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7388 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7390 stream_.apiHandle = 0;
\r
7393 if ( phandle) snd_pcm_close( phandle );
\r
7395 for ( int i=0; i<2; i++ ) {
\r
7396 if ( stream_.userBuffer[i] ) {
\r
7397 free( stream_.userBuffer[i] );
\r
7398 stream_.userBuffer[i] = 0;
\r
7402 if ( stream_.deviceBuffer ) {
\r
7403 free( stream_.deviceBuffer );
\r
7404 stream_.deviceBuffer = 0;
\r
7407 stream_.state = STREAM_CLOSED;
\r
7411 void RtApiAlsa :: closeStream()
\r
7413 if ( stream_.state == STREAM_CLOSED ) {
\r
7414 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7415 error( RtAudioError::WARNING );
\r
7419 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7420 stream_.callbackInfo.isRunning = false;
\r
7421 MUTEX_LOCK( &stream_.mutex );
\r
7422 if ( stream_.state == STREAM_STOPPED ) {
\r
7423 apiInfo->runnable = true;
\r
7424 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7426 MUTEX_UNLOCK( &stream_.mutex );
\r
7427 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7429 if ( stream_.state == STREAM_RUNNING ) {
\r
7430 stream_.state = STREAM_STOPPED;
\r
7431 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7432 snd_pcm_drop( apiInfo->handles[0] );
\r
7433 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7434 snd_pcm_drop( apiInfo->handles[1] );
\r
7438 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7439 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7440 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7442 stream_.apiHandle = 0;
\r
7445 for ( int i=0; i<2; i++ ) {
\r
7446 if ( stream_.userBuffer[i] ) {
\r
7447 free( stream_.userBuffer[i] );
\r
7448 stream_.userBuffer[i] = 0;
\r
7452 if ( stream_.deviceBuffer ) {
\r
7453 free( stream_.deviceBuffer );
\r
7454 stream_.deviceBuffer = 0;
\r
7457 stream_.mode = UNINITIALIZED;
\r
7458 stream_.state = STREAM_CLOSED;
\r
7461 void RtApiAlsa :: startStream()
\r
7463 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7466 if ( stream_.state == STREAM_RUNNING ) {
\r
7467 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7468 error( RtAudioError::WARNING );
\r
7472 MUTEX_LOCK( &stream_.mutex );
\r
7475 snd_pcm_state_t state;
\r
7476 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7477 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7478 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7479 state = snd_pcm_state( handle[0] );
\r
7480 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7481 result = snd_pcm_prepare( handle[0] );
\r
7482 if ( result < 0 ) {
\r
7483 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7484 errorText_ = errorStream_.str();
\r
7490 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7491 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7492 state = snd_pcm_state( handle[1] );
\r
7493 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7494 result = snd_pcm_prepare( handle[1] );
\r
7495 if ( result < 0 ) {
\r
7496 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7497 errorText_ = errorStream_.str();
\r
7503 stream_.state = STREAM_RUNNING;
\r
7506 apiInfo->runnable = true;
\r
7507 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7508 MUTEX_UNLOCK( &stream_.mutex );
\r
7510 if ( result >= 0 ) return;
\r
7511 error( RtAudioError::SYSTEM_ERROR );
\r
7514 void RtApiAlsa :: stopStream()
\r
7517 if ( stream_.state == STREAM_STOPPED ) {
\r
7518 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7519 error( RtAudioError::WARNING );
\r
7523 stream_.state = STREAM_STOPPED;
\r
7524 MUTEX_LOCK( &stream_.mutex );
\r
7527 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7528 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7529 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7530 if ( apiInfo->synchronized )
\r
7531 result = snd_pcm_drop( handle[0] );
\r
7533 result = snd_pcm_drain( handle[0] );
\r
7534 if ( result < 0 ) {
\r
7535 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7536 errorText_ = errorStream_.str();
\r
7541 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7542 result = snd_pcm_drop( handle[1] );
\r
7543 if ( result < 0 ) {
\r
7544 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7545 errorText_ = errorStream_.str();
\r
7551 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7552 MUTEX_UNLOCK( &stream_.mutex );
\r
7554 if ( result >= 0 ) return;
\r
7555 error( RtAudioError::SYSTEM_ERROR );
\r
7558 void RtApiAlsa :: abortStream()
\r
7561 if ( stream_.state == STREAM_STOPPED ) {
\r
7562 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7563 error( RtAudioError::WARNING );
\r
7567 stream_.state = STREAM_STOPPED;
\r
7568 MUTEX_LOCK( &stream_.mutex );
\r
7571 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7572 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7573 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7574 result = snd_pcm_drop( handle[0] );
\r
7575 if ( result < 0 ) {
\r
7576 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7577 errorText_ = errorStream_.str();
\r
7582 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7583 result = snd_pcm_drop( handle[1] );
\r
7584 if ( result < 0 ) {
\r
7585 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7586 errorText_ = errorStream_.str();
\r
7592 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7593 MUTEX_UNLOCK( &stream_.mutex );
\r
7595 if ( result >= 0 ) return;
\r
7596 error( RtAudioError::SYSTEM_ERROR );
\r
7599 void RtApiAlsa :: callbackEvent()
\r
7601 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7602 if ( stream_.state == STREAM_STOPPED ) {
\r
7603 MUTEX_LOCK( &stream_.mutex );
\r
7604 while ( !apiInfo->runnable )
\r
7605 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7607 if ( stream_.state != STREAM_RUNNING ) {
\r
7608 MUTEX_UNLOCK( &stream_.mutex );
\r
7611 MUTEX_UNLOCK( &stream_.mutex );
\r
7614 if ( stream_.state == STREAM_CLOSED ) {
\r
7615 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7616 error( RtAudioError::WARNING );
\r
7620 int doStopStream = 0;
\r
7621 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7622 double streamTime = getStreamTime();
\r
7623 RtAudioStreamStatus status = 0;
\r
7624 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7625 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7626 apiInfo->xrun[0] = false;
\r
7628 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7629 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7630 apiInfo->xrun[1] = false;
\r
7632 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7633 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7635 if ( doStopStream == 2 ) {
\r
7640 MUTEX_LOCK( &stream_.mutex );
\r
7642 // The state might change while waiting on a mutex.
\r
7643 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7648 snd_pcm_t **handle;
\r
7649 snd_pcm_sframes_t frames;
\r
7650 RtAudioFormat format;
\r
7651 handle = (snd_pcm_t **) apiInfo->handles;
\r
7653 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7655 // Setup parameters.
\r
7656 if ( stream_.doConvertBuffer[1] ) {
\r
7657 buffer = stream_.deviceBuffer;
\r
7658 channels = stream_.nDeviceChannels[1];
\r
7659 format = stream_.deviceFormat[1];
\r
7662 buffer = stream_.userBuffer[1];
\r
7663 channels = stream_.nUserChannels[1];
\r
7664 format = stream_.userFormat;
\r
7667 // Read samples from device in interleaved/non-interleaved format.
\r
7668 if ( stream_.deviceInterleaved[1] )
\r
7669 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7671 void *bufs[channels];
\r
7672 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7673 for ( int i=0; i<channels; i++ )
\r
7674 bufs[i] = (void *) (buffer + (i * offset));
\r
7675 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7678 if ( result < (int) stream_.bufferSize ) {
\r
7679 // Either an error or overrun occured.
\r
7680 if ( result == -EPIPE ) {
\r
7681 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7682 if ( state == SND_PCM_STATE_XRUN ) {
\r
7683 apiInfo->xrun[1] = true;
\r
7684 result = snd_pcm_prepare( handle[1] );
\r
7685 if ( result < 0 ) {
\r
7686 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7687 errorText_ = errorStream_.str();
\r
7691 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7692 errorText_ = errorStream_.str();
\r
7696 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7697 errorText_ = errorStream_.str();
\r
7699 error( RtAudioError::WARNING );
\r
7703 // Do byte swapping if necessary.
\r
7704 if ( stream_.doByteSwap[1] )
\r
7705 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7707 // Do buffer conversion if necessary.
\r
7708 if ( stream_.doConvertBuffer[1] )
\r
7709 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7711 // Check stream latency
\r
7712 result = snd_pcm_delay( handle[1], &frames );
\r
7713 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7718 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7720 // Setup parameters and do buffer conversion if necessary.
\r
7721 if ( stream_.doConvertBuffer[0] ) {
\r
7722 buffer = stream_.deviceBuffer;
\r
7723 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7724 channels = stream_.nDeviceChannels[0];
\r
7725 format = stream_.deviceFormat[0];
\r
7728 buffer = stream_.userBuffer[0];
\r
7729 channels = stream_.nUserChannels[0];
\r
7730 format = stream_.userFormat;
\r
7733 // Do byte swapping if necessary.
\r
7734 if ( stream_.doByteSwap[0] )
\r
7735 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7737 // Write samples to device in interleaved/non-interleaved format.
\r
7738 if ( stream_.deviceInterleaved[0] )
\r
7739 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7741 void *bufs[channels];
\r
7742 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7743 for ( int i=0; i<channels; i++ )
\r
7744 bufs[i] = (void *) (buffer + (i * offset));
\r
7745 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7748 if ( result < (int) stream_.bufferSize ) {
\r
7749 // Either an error or underrun occured.
\r
7750 if ( result == -EPIPE ) {
\r
7751 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7752 if ( state == SND_PCM_STATE_XRUN ) {
\r
7753 apiInfo->xrun[0] = true;
\r
7754 result = snd_pcm_prepare( handle[0] );
\r
7755 if ( result < 0 ) {
\r
7756 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7757 errorText_ = errorStream_.str();
\r
7761 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7762 errorText_ = errorStream_.str();
\r
7766 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7767 errorText_ = errorStream_.str();
\r
7769 error( RtAudioError::WARNING );
\r
7773 // Check stream latency
\r
7774 result = snd_pcm_delay( handle[0], &frames );
\r
7775 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7779 MUTEX_UNLOCK( &stream_.mutex );
\r
7781 RtApi::tickStreamTime();
\r
7782 if ( doStopStream == 1 ) this->stopStream();
\r
7785 static void *alsaCallbackHandler( void *ptr )
\r
7787 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7788 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7789 bool *isRunning = &info->isRunning;
\r
7791 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7792 if ( &info->doRealtime ) {
\r
7793 pthread_t tID = pthread_self(); // ID of this thread
\r
7794 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7795 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7799 while ( *isRunning == true ) {
\r
7800 pthread_testcancel();
\r
7801 object->callbackEvent();
\r
7804 pthread_exit( NULL );
\r
7807 //******************** End of __LINUX_ALSA__ *********************//
\r
7810 #if defined(__LINUX_PULSE__)
\r
7812 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
7813 // and Tristan Matthews.
\r
7815 #include <pulse/error.h>
\r
7816 #include <pulse/simple.h>
\r
7819 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
7820 44100, 48000, 96000, 0};
\r
7822 struct rtaudio_pa_format_mapping_t {
\r
7823 RtAudioFormat rtaudio_format;
\r
7824 pa_sample_format_t pa_format;
\r
7827 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
7828 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
7829 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
7830 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
7831 {0, PA_SAMPLE_INVALID}};
\r
7833 struct PulseAudioHandle {
\r
7834 pa_simple *s_play;
\r
7837 pthread_cond_t runnable_cv;
\r
7839 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
7842 RtApiPulse::~RtApiPulse()
\r
7844 if ( stream_.state != STREAM_CLOSED )
\r
7848 unsigned int RtApiPulse::getDeviceCount( void )
\r
7853 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
7855 RtAudio::DeviceInfo info;
\r
7856 info.probed = true;
\r
7857 info.name = "PulseAudio";
\r
7858 info.outputChannels = 2;
\r
7859 info.inputChannels = 2;
\r
7860 info.duplexChannels = 2;
\r
7861 info.isDefaultOutput = true;
\r
7862 info.isDefaultInput = true;
\r
7864 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
7865 info.sampleRates.push_back( *sr );
\r
7867 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
7872 static void *pulseaudio_callback( void * user )
\r
7874 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
7875 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
7876 volatile bool *isRunning = &cbi->isRunning;
\r
7878 while ( *isRunning ) {
\r
7879 pthread_testcancel();
\r
7880 context->callbackEvent();
\r
7883 pthread_exit( NULL );
\r
7886 void RtApiPulse::closeStream( void )
\r
7888 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
7890 stream_.callbackInfo.isRunning = false;
\r
7892 MUTEX_LOCK( &stream_.mutex );
\r
7893 if ( stream_.state == STREAM_STOPPED ) {
\r
7894 pah->runnable = true;
\r
7895 pthread_cond_signal( &pah->runnable_cv );
\r
7897 MUTEX_UNLOCK( &stream_.mutex );
\r
7899 pthread_join( pah->thread, 0 );
\r
7900 if ( pah->s_play ) {
\r
7901 pa_simple_flush( pah->s_play, NULL );
\r
7902 pa_simple_free( pah->s_play );
\r
7905 pa_simple_free( pah->s_rec );
\r
7907 pthread_cond_destroy( &pah->runnable_cv );
\r
7909 stream_.apiHandle = 0;
\r
7912 if ( stream_.userBuffer[0] ) {
\r
7913 free( stream_.userBuffer[0] );
\r
7914 stream_.userBuffer[0] = 0;
\r
7916 if ( stream_.userBuffer[1] ) {
\r
7917 free( stream_.userBuffer[1] );
\r
7918 stream_.userBuffer[1] = 0;
\r
7921 stream_.state = STREAM_CLOSED;
\r
7922 stream_.mode = UNINITIALIZED;
\r
7925 void RtApiPulse::callbackEvent( void )
\r
7927 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
7929 if ( stream_.state == STREAM_STOPPED ) {
\r
7930 MUTEX_LOCK( &stream_.mutex );
\r
7931 while ( !pah->runnable )
\r
7932 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
7934 if ( stream_.state != STREAM_RUNNING ) {
\r
7935 MUTEX_UNLOCK( &stream_.mutex );
\r
7938 MUTEX_UNLOCK( &stream_.mutex );
\r
7941 if ( stream_.state == STREAM_CLOSED ) {
\r
7942 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
7943 "this shouldn't happen!";
\r
7944 error( RtAudioError::WARNING );
\r
7948 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7949 double streamTime = getStreamTime();
\r
7950 RtAudioStreamStatus status = 0;
\r
7951 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
7952 stream_.bufferSize, streamTime, status,
\r
7953 stream_.callbackInfo.userData );
\r
7955 if ( doStopStream == 2 ) {
\r
7960 MUTEX_LOCK( &stream_.mutex );
\r
7961 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
7962 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
7964 if ( stream_.state != STREAM_RUNNING )
\r
7969 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7970 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
7971 convertBuffer( stream_.deviceBuffer,
\r
7972 stream_.userBuffer[OUTPUT],
\r
7973 stream_.convertInfo[OUTPUT] );
\r
7974 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
7975 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
7977 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
7978 formatBytes( stream_.userFormat );
\r
7980 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
7981 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
7982 pa_strerror( pa_error ) << ".";
\r
7983 errorText_ = errorStream_.str();
\r
7984 error( RtAudioError::WARNING );
\r
7988 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
7989 if ( stream_.doConvertBuffer[INPUT] )
\r
7990 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
7991 formatBytes( stream_.deviceFormat[INPUT] );
\r
7993 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
7994 formatBytes( stream_.userFormat );
\r
7996 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
7997 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
7998 pa_strerror( pa_error ) << ".";
\r
7999 errorText_ = errorStream_.str();
\r
8000 error( RtAudioError::WARNING );
\r
8002 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8003 convertBuffer( stream_.userBuffer[INPUT],
\r
8004 stream_.deviceBuffer,
\r
8005 stream_.convertInfo[INPUT] );
\r
8010 MUTEX_UNLOCK( &stream_.mutex );
\r
8011 RtApi::tickStreamTime();
\r
8013 if ( doStopStream == 1 )
\r
8017 void RtApiPulse::startStream( void )
\r
8019 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8021 if ( stream_.state == STREAM_CLOSED ) {
\r
8022 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8023 error( RtAudioError::INVALID_USE );
\r
8026 if ( stream_.state == STREAM_RUNNING ) {
\r
8027 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8028 error( RtAudioError::WARNING );
\r
8032 MUTEX_LOCK( &stream_.mutex );
\r
8034 stream_.state = STREAM_RUNNING;
\r
8036 pah->runnable = true;
\r
8037 pthread_cond_signal( &pah->runnable_cv );
\r
8038 MUTEX_UNLOCK( &stream_.mutex );
\r
8041 void RtApiPulse::stopStream( void )
\r
8043 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8045 if ( stream_.state == STREAM_CLOSED ) {
\r
8046 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8047 error( RtAudioError::INVALID_USE );
\r
8050 if ( stream_.state == STREAM_STOPPED ) {
\r
8051 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8052 error( RtAudioError::WARNING );
\r
8056 stream_.state = STREAM_STOPPED;
\r
8057 MUTEX_LOCK( &stream_.mutex );
\r
8059 if ( pah && pah->s_play ) {
\r
8061 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8062 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8063 pa_strerror( pa_error ) << ".";
\r
8064 errorText_ = errorStream_.str();
\r
8065 MUTEX_UNLOCK( &stream_.mutex );
\r
8066 error( RtAudioError::SYSTEM_ERROR );
\r
8071 stream_.state = STREAM_STOPPED;
\r
8072 MUTEX_UNLOCK( &stream_.mutex );
\r
8075 void RtApiPulse::abortStream( void )
\r
8077 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8079 if ( stream_.state == STREAM_CLOSED ) {
\r
8080 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8081 error( RtAudioError::INVALID_USE );
\r
8084 if ( stream_.state == STREAM_STOPPED ) {
\r
8085 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8086 error( RtAudioError::WARNING );
\r
8090 stream_.state = STREAM_STOPPED;
\r
8091 MUTEX_LOCK( &stream_.mutex );
\r
8093 if ( pah && pah->s_play ) {
\r
8095 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8096 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8097 pa_strerror( pa_error ) << ".";
\r
8098 errorText_ = errorStream_.str();
\r
8099 MUTEX_UNLOCK( &stream_.mutex );
\r
8100 error( RtAudioError::SYSTEM_ERROR );
\r
8105 stream_.state = STREAM_STOPPED;
\r
8106 MUTEX_UNLOCK( &stream_.mutex );
\r
8109 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8110 unsigned int channels, unsigned int firstChannel,
\r
8111 unsigned int sampleRate, RtAudioFormat format,
\r
8112 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8114 PulseAudioHandle *pah = 0;
\r
8115 unsigned long bufferBytes = 0;
\r
8116 pa_sample_spec ss;
\r
8118 if ( device != 0 ) return false;
\r
8119 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8120 if ( channels != 1 && channels != 2 ) {
\r
8121 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8124 ss.channels = channels;
\r
8126 if ( firstChannel != 0 ) return false;
\r
8128 bool sr_found = false;
\r
8129 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8130 if ( sampleRate == *sr ) {
\r
8132 stream_.sampleRate = sampleRate;
\r
8133 ss.rate = sampleRate;
\r
8137 if ( !sr_found ) {
\r
8138 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8142 bool sf_found = 0;
\r
8143 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8144 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8145 if ( format == sf->rtaudio_format ) {
\r
8147 stream_.userFormat = sf->rtaudio_format;
\r
8148 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8149 ss.format = sf->pa_format;
\r
8153 if ( !sf_found ) { // Use internal data format conversion.
\r
8154 stream_.userFormat = format;
\r
8155 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8156 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8159 // Set other stream parameters.
\r
8160 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8161 else stream_.userInterleaved = true;
\r
8162 stream_.deviceInterleaved[mode] = true;
\r
8163 stream_.nBuffers = 1;
\r
8164 stream_.doByteSwap[mode] = false;
\r
8165 stream_.nUserChannels[mode] = channels;
\r
8166 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8167 stream_.channelOffset[mode] = 0;
\r
8168 std::string streamName = "RtAudio";
\r
8170 // Set flags for buffer conversion.
\r
8171 stream_.doConvertBuffer[mode] = false;
\r
8172 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8173 stream_.doConvertBuffer[mode] = true;
\r
8174 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8175 stream_.doConvertBuffer[mode] = true;
\r
8177 // Allocate necessary internal buffers.
\r
8178 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8179 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8180 if ( stream_.userBuffer[mode] == NULL ) {
\r
8181 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8184 stream_.bufferSize = *bufferSize;
\r
8186 if ( stream_.doConvertBuffer[mode] ) {
\r
8188 bool makeBuffer = true;
\r
8189 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8190 if ( mode == INPUT ) {
\r
8191 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8192 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8193 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8197 if ( makeBuffer ) {
\r
8198 bufferBytes *= *bufferSize;
\r
8199 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8200 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8201 if ( stream_.deviceBuffer == NULL ) {
\r
8202 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8208 stream_.device[mode] = device;
\r
8210 // Setup the buffer conversion information structure.
\r
8211 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8213 if ( !stream_.apiHandle ) {
\r
8214 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8216 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8220 stream_.apiHandle = pah;
\r
8221 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8222 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8226 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8229 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8232 pa_buffer_attr buffer_attr;
\r
8233 buffer_attr.fragsize = bufferBytes;
\r
8234 buffer_attr.maxlength = -1;
\r
8236 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8237 if ( !pah->s_rec ) {
\r
8238 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8243 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8244 if ( !pah->s_play ) {
\r
8245 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8253 if ( stream_.mode == UNINITIALIZED )
\r
8254 stream_.mode = mode;
\r
8255 else if ( stream_.mode == mode )
\r
8258 stream_.mode = DUPLEX;
\r
8260 if ( !stream_.callbackInfo.isRunning ) {
\r
8261 stream_.callbackInfo.object = this;
\r
8262 stream_.callbackInfo.isRunning = true;
\r
8263 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8264 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8269 stream_.state = STREAM_STOPPED;
\r
8273 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8274 pthread_cond_destroy( &pah->runnable_cv );
\r
8276 stream_.apiHandle = 0;
\r
8279 for ( int i=0; i<2; i++ ) {
\r
8280 if ( stream_.userBuffer[i] ) {
\r
8281 free( stream_.userBuffer[i] );
\r
8282 stream_.userBuffer[i] = 0;
\r
8286 if ( stream_.deviceBuffer ) {
\r
8287 free( stream_.deviceBuffer );
\r
8288 stream_.deviceBuffer = 0;
\r
8294 //******************** End of __LINUX_PULSE__ *********************//
\r
8297 #if defined(__LINUX_OSS__)
\r
8299 #include <unistd.h>
\r
8300 #include <sys/ioctl.h>
\r
8301 #include <unistd.h>
\r
8302 #include <fcntl.h>
\r
8303 #include <sys/soundcard.h>
\r
8304 #include <errno.h>
\r
8307 static void *ossCallbackHandler(void * ptr);
\r
8309 // A structure to hold various information related to the OSS API
\r
8310 // implementation.
\r
8311 struct OssHandle {
\r
8312 int id[2]; // device ids
\r
8315 pthread_cond_t runnable;
\r
8318 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8321 RtApiOss :: RtApiOss()
\r
8323 // Nothing to do here.
\r
8326 RtApiOss :: ~RtApiOss()
\r
8328 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8331 unsigned int RtApiOss :: getDeviceCount( void )
\r
8333 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8334 if ( mixerfd == -1 ) {
\r
8335 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8336 error( RtAudioError::WARNING );
\r
8340 oss_sysinfo sysinfo;
\r
8341 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8343 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8344 error( RtAudioError::WARNING );
\r
8349 return sysinfo.numaudios;
\r
8352 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8354 RtAudio::DeviceInfo info;
\r
8355 info.probed = false;
\r
8357 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8358 if ( mixerfd == -1 ) {
\r
8359 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8360 error( RtAudioError::WARNING );
\r
8364 oss_sysinfo sysinfo;
\r
8365 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8366 if ( result == -1 ) {
\r
8368 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8369 error( RtAudioError::WARNING );
\r
8373 unsigned nDevices = sysinfo.numaudios;
\r
8374 if ( nDevices == 0 ) {
\r
8376 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8377 error( RtAudioError::INVALID_USE );
\r
8381 if ( device >= nDevices ) {
\r
8383 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8384 error( RtAudioError::INVALID_USE );
\r
8388 oss_audioinfo ainfo;
\r
8389 ainfo.dev = device;
\r
8390 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8392 if ( result == -1 ) {
\r
8393 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8394 errorText_ = errorStream_.str();
\r
8395 error( RtAudioError::WARNING );
\r
8400 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8401 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8402 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8403 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8404 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8407 // Probe data formats ... do for input
\r
8408 unsigned long mask = ainfo.iformats;
\r
8409 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8410 info.nativeFormats |= RTAUDIO_SINT16;
\r
8411 if ( mask & AFMT_S8 )
\r
8412 info.nativeFormats |= RTAUDIO_SINT8;
\r
8413 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8414 info.nativeFormats |= RTAUDIO_SINT32;
\r
8415 if ( mask & AFMT_FLOAT )
\r
8416 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8417 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8418 info.nativeFormats |= RTAUDIO_SINT24;
\r
8420 // Check that we have at least one supported format
\r
8421 if ( info.nativeFormats == 0 ) {
\r
8422 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8423 errorText_ = errorStream_.str();
\r
8424 error( RtAudioError::WARNING );
\r
8428 // Probe the supported sample rates.
\r
8429 info.sampleRates.clear();
\r
8430 if ( ainfo.nrates ) {
\r
8431 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8432 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8433 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8434 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8441 // Check min and max rate values;
\r
8442 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8443 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8444 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8448 if ( info.sampleRates.size() == 0 ) {
\r
8449 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8450 errorText_ = errorStream_.str();
\r
8451 error( RtAudioError::WARNING );
\r
8454 info.probed = true;
\r
8455 info.name = ainfo.name;
\r
8462 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8463 unsigned int firstChannel, unsigned int sampleRate,
\r
8464 RtAudioFormat format, unsigned int *bufferSize,
\r
8465 RtAudio::StreamOptions *options )
\r
8467 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8468 if ( mixerfd == -1 ) {
\r
8469 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8473 oss_sysinfo sysinfo;
\r
8474 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8475 if ( result == -1 ) {
\r
8477 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8481 unsigned nDevices = sysinfo.numaudios;
\r
8482 if ( nDevices == 0 ) {
\r
8483 // This should not happen because a check is made before this function is called.
\r
8485 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8489 if ( device >= nDevices ) {
\r
8490 // This should not happen because a check is made before this function is called.
\r
8492 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8496 oss_audioinfo ainfo;
\r
8497 ainfo.dev = device;
\r
8498 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8500 if ( result == -1 ) {
\r
8501 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8502 errorText_ = errorStream_.str();
\r
8506 // Check if device supports input or output
\r
8507 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8508 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8509 if ( mode == OUTPUT )
\r
8510 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8512 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8513 errorText_ = errorStream_.str();
\r
8518 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8519 if ( mode == OUTPUT )
\r
8520 flags |= O_WRONLY;
\r
8521 else { // mode == INPUT
\r
8522 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8523 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8524 close( handle->id[0] );
\r
8525 handle->id[0] = 0;
\r
8526 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8527 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8528 errorText_ = errorStream_.str();
\r
8531 // Check that the number previously set channels is the same.
\r
8532 if ( stream_.nUserChannels[0] != channels ) {
\r
8533 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8534 errorText_ = errorStream_.str();
\r
8540 flags |= O_RDONLY;
\r
8543 // Set exclusive access if specified.
\r
8544 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8546 // Try to open the device.
\r
8548 fd = open( ainfo.devnode, flags, 0 );
\r
8550 if ( errno == EBUSY )
\r
8551 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8553 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8554 errorText_ = errorStream_.str();
\r
8558 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8560 if ( flags | O_RDWR ) {
\r
8561 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8562 if ( result == -1) {
\r
8563 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8564 errorText_ = errorStream_.str();
\r
8570 // Check the device channel support.
\r
8571 stream_.nUserChannels[mode] = channels;
\r
8572 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8574 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8575 errorText_ = errorStream_.str();
\r
8579 // Set the number of channels.
\r
8580 int deviceChannels = channels + firstChannel;
\r
8581 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8582 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8584 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8585 errorText_ = errorStream_.str();
\r
8588 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8590 // Get the data format mask
\r
8592 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8593 if ( result == -1 ) {
\r
8595 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8596 errorText_ = errorStream_.str();
\r
8600 // Determine how to set the device format.
\r
8601 stream_.userFormat = format;
\r
8602 int deviceFormat = -1;
\r
8603 stream_.doByteSwap[mode] = false;
\r
8604 if ( format == RTAUDIO_SINT8 ) {
\r
8605 if ( mask & AFMT_S8 ) {
\r
8606 deviceFormat = AFMT_S8;
\r
8607 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8610 else if ( format == RTAUDIO_SINT16 ) {
\r
8611 if ( mask & AFMT_S16_NE ) {
\r
8612 deviceFormat = AFMT_S16_NE;
\r
8613 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8615 else if ( mask & AFMT_S16_OE ) {
\r
8616 deviceFormat = AFMT_S16_OE;
\r
8617 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8618 stream_.doByteSwap[mode] = true;
\r
8621 else if ( format == RTAUDIO_SINT24 ) {
\r
8622 if ( mask & AFMT_S24_NE ) {
\r
8623 deviceFormat = AFMT_S24_NE;
\r
8624 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8626 else if ( mask & AFMT_S24_OE ) {
\r
8627 deviceFormat = AFMT_S24_OE;
\r
8628 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8629 stream_.doByteSwap[mode] = true;
\r
8632 else if ( format == RTAUDIO_SINT32 ) {
\r
8633 if ( mask & AFMT_S32_NE ) {
\r
8634 deviceFormat = AFMT_S32_NE;
\r
8635 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8637 else if ( mask & AFMT_S32_OE ) {
\r
8638 deviceFormat = AFMT_S32_OE;
\r
8639 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8640 stream_.doByteSwap[mode] = true;
\r
8644 if ( deviceFormat == -1 ) {
\r
8645 // The user requested format is not natively supported by the device.
\r
8646 if ( mask & AFMT_S16_NE ) {
\r
8647 deviceFormat = AFMT_S16_NE;
\r
8648 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8650 else if ( mask & AFMT_S32_NE ) {
\r
8651 deviceFormat = AFMT_S32_NE;
\r
8652 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8654 else if ( mask & AFMT_S24_NE ) {
\r
8655 deviceFormat = AFMT_S24_NE;
\r
8656 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8658 else if ( mask & AFMT_S16_OE ) {
\r
8659 deviceFormat = AFMT_S16_OE;
\r
8660 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8661 stream_.doByteSwap[mode] = true;
\r
8663 else if ( mask & AFMT_S32_OE ) {
\r
8664 deviceFormat = AFMT_S32_OE;
\r
8665 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8666 stream_.doByteSwap[mode] = true;
\r
8668 else if ( mask & AFMT_S24_OE ) {
\r
8669 deviceFormat = AFMT_S24_OE;
\r
8670 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8671 stream_.doByteSwap[mode] = true;
\r
8673 else if ( mask & AFMT_S8) {
\r
8674 deviceFormat = AFMT_S8;
\r
8675 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8679 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8680 // This really shouldn't happen ...
\r
8682 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8683 errorText_ = errorStream_.str();
\r
8687 // Set the data format.
\r
8688 int temp = deviceFormat;
\r
8689 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8690 if ( result == -1 || deviceFormat != temp ) {
\r
8692 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8693 errorText_ = errorStream_.str();
\r
8697 // Attempt to set the buffer size. According to OSS, the minimum
\r
8698 // number of buffers is two. The supposed minimum buffer size is 16
\r
8699 // bytes, so that will be our lower bound. The argument to this
\r
8700 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8701 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8702 // We'll check the actual value used near the end of the setup
\r
8704 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8705 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8707 if ( options ) buffers = options->numberOfBuffers;
\r
8708 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8709 if ( buffers < 2 ) buffers = 3;
\r
8710 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8711 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8712 if ( result == -1 ) {
\r
8714 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8715 errorText_ = errorStream_.str();
\r
8718 stream_.nBuffers = buffers;
\r
8720 // Save buffer size (in sample frames).
\r
8721 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8722 stream_.bufferSize = *bufferSize;
\r
8724 // Set the sample rate.
\r
8725 int srate = sampleRate;
\r
8726 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8727 if ( result == -1 ) {
\r
8729 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8730 errorText_ = errorStream_.str();
\r
8734 // Verify the sample rate setup worked.
\r
8735 if ( abs( srate - sampleRate ) > 100 ) {
\r
8737 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8738 errorText_ = errorStream_.str();
\r
8741 stream_.sampleRate = sampleRate;
\r
8743 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8744 // We're doing duplex setup here.
\r
8745 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8746 stream_.nDeviceChannels[0] = deviceChannels;
\r
8749 // Set interleaving parameters.
\r
8750 stream_.userInterleaved = true;
\r
8751 stream_.deviceInterleaved[mode] = true;
\r
8752 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8753 stream_.userInterleaved = false;
\r
8755 // Set flags for buffer conversion
\r
8756 stream_.doConvertBuffer[mode] = false;
\r
8757 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8758 stream_.doConvertBuffer[mode] = true;
\r
8759 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8760 stream_.doConvertBuffer[mode] = true;
\r
8761 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8762 stream_.nUserChannels[mode] > 1 )
\r
8763 stream_.doConvertBuffer[mode] = true;
\r
8765 // Allocate the stream handles if necessary and then save.
\r
8766 if ( stream_.apiHandle == 0 ) {
\r
8768 handle = new OssHandle;
\r
8770 catch ( std::bad_alloc& ) {
\r
8771 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8775 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8776 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8780 stream_.apiHandle = (void *) handle;
\r
8783 handle = (OssHandle *) stream_.apiHandle;
\r
8785 handle->id[mode] = fd;
\r
8787 // Allocate necessary internal buffers.
\r
8788 unsigned long bufferBytes;
\r
8789 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8790 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8791 if ( stream_.userBuffer[mode] == NULL ) {
\r
8792 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8796 if ( stream_.doConvertBuffer[mode] ) {
\r
8798 bool makeBuffer = true;
\r
8799 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8800 if ( mode == INPUT ) {
\r
8801 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8802 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8803 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8807 if ( makeBuffer ) {
\r
8808 bufferBytes *= *bufferSize;
\r
8809 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8810 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8811 if ( stream_.deviceBuffer == NULL ) {
\r
8812 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
8818 stream_.device[mode] = device;
\r
8819 stream_.state = STREAM_STOPPED;
\r
8821 // Setup the buffer conversion information structure.
\r
8822 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8824 // Setup thread if necessary.
\r
8825 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
8826 // We had already set up an output stream.
\r
8827 stream_.mode = DUPLEX;
\r
8828 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
8831 stream_.mode = mode;
\r
8833 // Setup callback thread.
\r
8834 stream_.callbackInfo.object = (void *) this;
\r
8836 // Set the thread attributes for joinable and realtime scheduling
\r
8837 // priority. The higher priority will only take affect if the
\r
8838 // program is run as root or suid.
\r
8839 pthread_attr_t attr;
\r
8840 pthread_attr_init( &attr );
\r
8841 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
8842 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8843 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
8844 struct sched_param param;
\r
8845 int priority = options->priority;
\r
8846 int min = sched_get_priority_min( SCHED_RR );
\r
8847 int max = sched_get_priority_max( SCHED_RR );
\r
8848 if ( priority < min ) priority = min;
\r
8849 else if ( priority > max ) priority = max;
\r
8850 param.sched_priority = priority;
\r
8851 pthread_attr_setschedparam( &attr, ¶m );
\r
8852 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
8855 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
8857 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
8860 stream_.callbackInfo.isRunning = true;
\r
8861 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
8862 pthread_attr_destroy( &attr );
\r
8864 stream_.callbackInfo.isRunning = false;
\r
8865 errorText_ = "RtApiOss::error creating callback thread!";
\r
8874 pthread_cond_destroy( &handle->runnable );
\r
8875 if ( handle->id[0] ) close( handle->id[0] );
\r
8876 if ( handle->id[1] ) close( handle->id[1] );
\r
8878 stream_.apiHandle = 0;
\r
8881 for ( int i=0; i<2; i++ ) {
\r
8882 if ( stream_.userBuffer[i] ) {
\r
8883 free( stream_.userBuffer[i] );
\r
8884 stream_.userBuffer[i] = 0;
\r
8888 if ( stream_.deviceBuffer ) {
\r
8889 free( stream_.deviceBuffer );
\r
8890 stream_.deviceBuffer = 0;
\r
8896 void RtApiOss :: closeStream()
\r
8898 if ( stream_.state == STREAM_CLOSED ) {
\r
8899 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
8900 error( RtAudioError::WARNING );
\r
8904 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8905 stream_.callbackInfo.isRunning = false;
\r
8906 MUTEX_LOCK( &stream_.mutex );
\r
8907 if ( stream_.state == STREAM_STOPPED )
\r
8908 pthread_cond_signal( &handle->runnable );
\r
8909 MUTEX_UNLOCK( &stream_.mutex );
\r
8910 pthread_join( stream_.callbackInfo.thread, NULL );
\r
8912 if ( stream_.state == STREAM_RUNNING ) {
\r
8913 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
8914 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
8916 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
8917 stream_.state = STREAM_STOPPED;
\r
8921 pthread_cond_destroy( &handle->runnable );
\r
8922 if ( handle->id[0] ) close( handle->id[0] );
\r
8923 if ( handle->id[1] ) close( handle->id[1] );
\r
8925 stream_.apiHandle = 0;
\r
8928 for ( int i=0; i<2; i++ ) {
\r
8929 if ( stream_.userBuffer[i] ) {
\r
8930 free( stream_.userBuffer[i] );
\r
8931 stream_.userBuffer[i] = 0;
\r
8935 if ( stream_.deviceBuffer ) {
\r
8936 free( stream_.deviceBuffer );
\r
8937 stream_.deviceBuffer = 0;
\r
8940 stream_.mode = UNINITIALIZED;
\r
8941 stream_.state = STREAM_CLOSED;
\r
8944 void RtApiOss :: startStream()
\r
8947 if ( stream_.state == STREAM_RUNNING ) {
\r
8948 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
8949 error( RtAudioError::WARNING );
\r
8953 MUTEX_LOCK( &stream_.mutex );
\r
8955 stream_.state = STREAM_RUNNING;
\r
8957 // No need to do anything else here ... OSS automatically starts
\r
8958 // when fed samples.
\r
8960 MUTEX_UNLOCK( &stream_.mutex );
\r
8962 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8963 pthread_cond_signal( &handle->runnable );
\r
8966 void RtApiOss :: stopStream()
\r
8969 if ( stream_.state == STREAM_STOPPED ) {
\r
8970 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
8971 error( RtAudioError::WARNING );
\r
8975 MUTEX_LOCK( &stream_.mutex );
\r
8977 // The state might change while waiting on a mutex.
\r
8978 if ( stream_.state == STREAM_STOPPED ) {
\r
8979 MUTEX_UNLOCK( &stream_.mutex );
\r
8984 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8985 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8987 // Flush the output with zeros a few times.
\r
8990 RtAudioFormat format;
\r
8992 if ( stream_.doConvertBuffer[0] ) {
\r
8993 buffer = stream_.deviceBuffer;
\r
8994 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
8995 format = stream_.deviceFormat[0];
\r
8998 buffer = stream_.userBuffer[0];
\r
8999 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9000 format = stream_.userFormat;
\r
9003 memset( buffer, 0, samples * formatBytes(format) );
\r
9004 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9005 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9006 if ( result == -1 ) {
\r
9007 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9008 error( RtAudioError::WARNING );
\r
9012 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9013 if ( result == -1 ) {
\r
9014 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9015 errorText_ = errorStream_.str();
\r
9018 handle->triggered = false;
\r
9021 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9022 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9023 if ( result == -1 ) {
\r
9024 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9025 errorText_ = errorStream_.str();
\r
9031 stream_.state = STREAM_STOPPED;
\r
9032 MUTEX_UNLOCK( &stream_.mutex );
\r
9034 if ( result != -1 ) return;
\r
9035 error( RtAudioError::SYSTEM_ERROR );
\r
9038 void RtApiOss :: abortStream()
\r
9041 if ( stream_.state == STREAM_STOPPED ) {
\r
9042 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9043 error( RtAudioError::WARNING );
\r
9047 MUTEX_LOCK( &stream_.mutex );
\r
9049 // The state might change while waiting on a mutex.
\r
9050 if ( stream_.state == STREAM_STOPPED ) {
\r
9051 MUTEX_UNLOCK( &stream_.mutex );
\r
9056 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9057 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9058 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9059 if ( result == -1 ) {
\r
9060 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9061 errorText_ = errorStream_.str();
\r
9064 handle->triggered = false;
\r
9067 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9068 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9069 if ( result == -1 ) {
\r
9070 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9071 errorText_ = errorStream_.str();
\r
9077 stream_.state = STREAM_STOPPED;
\r
9078 MUTEX_UNLOCK( &stream_.mutex );
\r
9080 if ( result != -1 ) return;
\r
9081 error( RtAudioError::SYSTEM_ERROR );
\r
9084 void RtApiOss :: callbackEvent()
\r
9086 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9087 if ( stream_.state == STREAM_STOPPED ) {
\r
9088 MUTEX_LOCK( &stream_.mutex );
\r
9089 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9090 if ( stream_.state != STREAM_RUNNING ) {
\r
9091 MUTEX_UNLOCK( &stream_.mutex );
\r
9094 MUTEX_UNLOCK( &stream_.mutex );
\r
9097 if ( stream_.state == STREAM_CLOSED ) {
\r
9098 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9099 error( RtAudioError::WARNING );
\r
9103 // Invoke user callback to get fresh output data.
\r
9104 int doStopStream = 0;
\r
9105 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9106 double streamTime = getStreamTime();
\r
9107 RtAudioStreamStatus status = 0;
\r
9108 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9109 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9110 handle->xrun[0] = false;
\r
9112 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9113 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9114 handle->xrun[1] = false;
\r
9116 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9117 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9118 if ( doStopStream == 2 ) {
\r
9119 this->abortStream();
\r
9123 MUTEX_LOCK( &stream_.mutex );
\r
9125 // The state might change while waiting on a mutex.
\r
9126 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9131 RtAudioFormat format;
\r
9133 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9135 // Setup parameters and do buffer conversion if necessary.
\r
9136 if ( stream_.doConvertBuffer[0] ) {
\r
9137 buffer = stream_.deviceBuffer;
\r
9138 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9139 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9140 format = stream_.deviceFormat[0];
\r
9143 buffer = stream_.userBuffer[0];
\r
9144 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9145 format = stream_.userFormat;
\r
9148 // Do byte swapping if necessary.
\r
9149 if ( stream_.doByteSwap[0] )
\r
9150 byteSwapBuffer( buffer, samples, format );
\r
9152 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9154 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9155 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9156 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9157 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9158 handle->triggered = true;
\r
9161 // Write samples to device.
\r
9162 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9164 if ( result == -1 ) {
\r
9165 // We'll assume this is an underrun, though there isn't a
\r
9166 // specific means for determining that.
\r
9167 handle->xrun[0] = true;
\r
9168 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9169 error( RtAudioError::WARNING );
\r
9170 // Continue on to input section.
\r
9174 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9176 // Setup parameters.
\r
9177 if ( stream_.doConvertBuffer[1] ) {
\r
9178 buffer = stream_.deviceBuffer;
\r
9179 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9180 format = stream_.deviceFormat[1];
\r
9183 buffer = stream_.userBuffer[1];
\r
9184 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9185 format = stream_.userFormat;
\r
9188 // Read samples from device.
\r
9189 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9191 if ( result == -1 ) {
\r
9192 // We'll assume this is an overrun, though there isn't a
\r
9193 // specific means for determining that.
\r
9194 handle->xrun[1] = true;
\r
9195 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9196 error( RtAudioError::WARNING );
\r
9200 // Do byte swapping if necessary.
\r
9201 if ( stream_.doByteSwap[1] )
\r
9202 byteSwapBuffer( buffer, samples, format );
\r
9204 // Do buffer conversion if necessary.
\r
9205 if ( stream_.doConvertBuffer[1] )
\r
9206 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9210 MUTEX_UNLOCK( &stream_.mutex );
\r
9212 RtApi::tickStreamTime();
\r
9213 if ( doStopStream == 1 ) this->stopStream();
\r
9216 static void *ossCallbackHandler( void *ptr )
\r
9218 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9219 RtApiOss *object = (RtApiOss *) info->object;
\r
9220 bool *isRunning = &info->isRunning;
\r
9222 while ( *isRunning == true ) {
\r
9223 pthread_testcancel();
\r
9224 object->callbackEvent();
\r
9227 pthread_exit( NULL );
\r
9230 //******************** End of __LINUX_OSS__ *********************//
\r
9234 // *************************************************** //
\r
9236 // Protected common (OS-independent) RtAudio methods.
\r
9238 // *************************************************** //
\r
9240 // This method can be modified to control the behavior of error
\r
9241 // message printing.
\r
9242 void RtApi :: error( RtAudioError::Type type )
\r
9244 errorStream_.str(""); // clear the ostringstream
\r
9246 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9247 if ( errorCallback ) {
\r
9248 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9250 if ( firstErrorOccurred_ )
\r
9253 firstErrorOccurred_ = true;
\r
9254 const std::string errorMessage = errorText_;
\r
9256 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9257 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9261 errorCallback( type, errorMessage );
\r
9262 firstErrorOccurred_ = false;
\r
9266 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9267 std::cerr << '\n' << errorText_ << "\n\n";
\r
9268 else if ( type != RtAudioError::WARNING )
\r
9269 throw( RtAudioError( errorText_, type ) );
\r
9272 void RtApi :: verifyStream()
\r
9274 if ( stream_.state == STREAM_CLOSED ) {
\r
9275 errorText_ = "RtApi:: a stream is not open!";
\r
9276 error( RtAudioError::INVALID_USE );
\r
9280 void RtApi :: clearStreamInfo()
\r
9282 stream_.mode = UNINITIALIZED;
\r
9283 stream_.state = STREAM_CLOSED;
\r
9284 stream_.sampleRate = 0;
\r
9285 stream_.bufferSize = 0;
\r
9286 stream_.nBuffers = 0;
\r
9287 stream_.userFormat = 0;
\r
9288 stream_.userInterleaved = true;
\r
9289 stream_.streamTime = 0.0;
\r
9290 stream_.apiHandle = 0;
\r
9291 stream_.deviceBuffer = 0;
\r
9292 stream_.callbackInfo.callback = 0;
\r
9293 stream_.callbackInfo.userData = 0;
\r
9294 stream_.callbackInfo.isRunning = false;
\r
9295 stream_.callbackInfo.errorCallback = 0;
\r
9296 for ( int i=0; i<2; i++ ) {
\r
9297 stream_.device[i] = 11111;
\r
9298 stream_.doConvertBuffer[i] = false;
\r
9299 stream_.deviceInterleaved[i] = true;
\r
9300 stream_.doByteSwap[i] = false;
\r
9301 stream_.nUserChannels[i] = 0;
\r
9302 stream_.nDeviceChannels[i] = 0;
\r
9303 stream_.channelOffset[i] = 0;
\r
9304 stream_.deviceFormat[i] = 0;
\r
9305 stream_.latency[i] = 0;
\r
9306 stream_.userBuffer[i] = 0;
\r
9307 stream_.convertInfo[i].channels = 0;
\r
9308 stream_.convertInfo[i].inJump = 0;
\r
9309 stream_.convertInfo[i].outJump = 0;
\r
9310 stream_.convertInfo[i].inFormat = 0;
\r
9311 stream_.convertInfo[i].outFormat = 0;
\r
9312 stream_.convertInfo[i].inOffset.clear();
\r
9313 stream_.convertInfo[i].outOffset.clear();
\r
9317 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9319 if ( format == RTAUDIO_SINT16 )
\r
9321 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9323 else if ( format == RTAUDIO_FLOAT64 )
\r
9325 else if ( format == RTAUDIO_SINT24 )
\r
9327 else if ( format == RTAUDIO_SINT8 )
\r
9330 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9331 error( RtAudioError::WARNING );
\r
9336 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9338 if ( mode == INPUT ) { // convert device to user buffer
\r
9339 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9340 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9341 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9342 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9344 else { // convert user to device buffer
\r
9345 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9346 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9347 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9348 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9351 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9352 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9354 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9356 // Set up the interleave/deinterleave offsets.
\r
9357 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9358 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9359 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9360 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9361 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9362 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9363 stream_.convertInfo[mode].inJump = 1;
\r
9367 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9368 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9369 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9370 stream_.convertInfo[mode].outJump = 1;
\r
9374 else { // no (de)interleaving
\r
9375 if ( stream_.userInterleaved ) {
\r
9376 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9377 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9378 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9382 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9383 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9384 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9385 stream_.convertInfo[mode].inJump = 1;
\r
9386 stream_.convertInfo[mode].outJump = 1;
\r
9391 // Add channel offset.
\r
9392 if ( firstChannel > 0 ) {
\r
9393 if ( stream_.deviceInterleaved[mode] ) {
\r
9394 if ( mode == OUTPUT ) {
\r
9395 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9396 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9399 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9400 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9404 if ( mode == OUTPUT ) {
\r
9405 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9406 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9409 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9410 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9416 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9418 // This function does format conversion, input/output channel compensation, and
\r
9419 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9420 // the lower three bytes of a 32-bit integer.
\r
9422 // Clear our device buffer when in/out duplex device channels are different
\r
9423 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9424 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9425 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9428 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9430 Float64 *out = (Float64 *)outBuffer;
\r
9432 if (info.inFormat == RTAUDIO_SINT8) {
\r
9433 signed char *in = (signed char *)inBuffer;
\r
9434 scale = 1.0 / 127.5;
\r
9435 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9436 for (j=0; j<info.channels; j++) {
\r
9437 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9438 out[info.outOffset[j]] += 0.5;
\r
9439 out[info.outOffset[j]] *= scale;
\r
9441 in += info.inJump;
\r
9442 out += info.outJump;
\r
9445 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9446 Int16 *in = (Int16 *)inBuffer;
\r
9447 scale = 1.0 / 32767.5;
\r
9448 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9449 for (j=0; j<info.channels; j++) {
\r
9450 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9451 out[info.outOffset[j]] += 0.5;
\r
9452 out[info.outOffset[j]] *= scale;
\r
9454 in += info.inJump;
\r
9455 out += info.outJump;
\r
9458 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9459 Int24 *in = (Int24 *)inBuffer;
\r
9460 scale = 1.0 / 8388607.5;
\r
9461 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9462 for (j=0; j<info.channels; j++) {
\r
9463 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9464 out[info.outOffset[j]] += 0.5;
\r
9465 out[info.outOffset[j]] *= scale;
\r
9467 in += info.inJump;
\r
9468 out += info.outJump;
\r
9471 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9472 Int32 *in = (Int32 *)inBuffer;
\r
9473 scale = 1.0 / 2147483647.5;
\r
9474 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9475 for (j=0; j<info.channels; j++) {
\r
9476 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9477 out[info.outOffset[j]] += 0.5;
\r
9478 out[info.outOffset[j]] *= scale;
\r
9480 in += info.inJump;
\r
9481 out += info.outJump;
\r
9484 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9485 Float32 *in = (Float32 *)inBuffer;
\r
9486 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9487 for (j=0; j<info.channels; j++) {
\r
9488 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9490 in += info.inJump;
\r
9491 out += info.outJump;
\r
9494 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9495 // Channel compensation and/or (de)interleaving only.
\r
9496 Float64 *in = (Float64 *)inBuffer;
\r
9497 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9498 for (j=0; j<info.channels; j++) {
\r
9499 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9501 in += info.inJump;
\r
9502 out += info.outJump;
\r
9506 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9508 Float32 *out = (Float32 *)outBuffer;
\r
9510 if (info.inFormat == RTAUDIO_SINT8) {
\r
9511 signed char *in = (signed char *)inBuffer;
\r
9512 scale = (Float32) ( 1.0 / 127.5 );
\r
9513 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9514 for (j=0; j<info.channels; j++) {
\r
9515 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9516 out[info.outOffset[j]] += 0.5;
\r
9517 out[info.outOffset[j]] *= scale;
\r
9519 in += info.inJump;
\r
9520 out += info.outJump;
\r
9523 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9524 Int16 *in = (Int16 *)inBuffer;
\r
9525 scale = (Float32) ( 1.0 / 32767.5 );
\r
9526 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9527 for (j=0; j<info.channels; j++) {
\r
9528 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9529 out[info.outOffset[j]] += 0.5;
\r
9530 out[info.outOffset[j]] *= scale;
\r
9532 in += info.inJump;
\r
9533 out += info.outJump;
\r
9536 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9537 Int24 *in = (Int24 *)inBuffer;
\r
9538 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9539 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9540 for (j=0; j<info.channels; j++) {
\r
9541 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9542 out[info.outOffset[j]] += 0.5;
\r
9543 out[info.outOffset[j]] *= scale;
\r
9545 in += info.inJump;
\r
9546 out += info.outJump;
\r
9549 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9550 Int32 *in = (Int32 *)inBuffer;
\r
9551 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9552 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9553 for (j=0; j<info.channels; j++) {
\r
9554 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9555 out[info.outOffset[j]] += 0.5;
\r
9556 out[info.outOffset[j]] *= scale;
\r
9558 in += info.inJump;
\r
9559 out += info.outJump;
\r
9562 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9563 // Channel compensation and/or (de)interleaving only.
\r
9564 Float32 *in = (Float32 *)inBuffer;
\r
9565 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9566 for (j=0; j<info.channels; j++) {
\r
9567 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9569 in += info.inJump;
\r
9570 out += info.outJump;
\r
9573 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9574 Float64 *in = (Float64 *)inBuffer;
\r
9575 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9576 for (j=0; j<info.channels; j++) {
\r
9577 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9579 in += info.inJump;
\r
9580 out += info.outJump;
\r
9584 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9585 Int32 *out = (Int32 *)outBuffer;
\r
9586 if (info.inFormat == RTAUDIO_SINT8) {
\r
9587 signed char *in = (signed char *)inBuffer;
\r
9588 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9589 for (j=0; j<info.channels; j++) {
\r
9590 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9591 out[info.outOffset[j]] <<= 24;
\r
9593 in += info.inJump;
\r
9594 out += info.outJump;
\r
9597 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9598 Int16 *in = (Int16 *)inBuffer;
\r
9599 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9600 for (j=0; j<info.channels; j++) {
\r
9601 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9602 out[info.outOffset[j]] <<= 16;
\r
9604 in += info.inJump;
\r
9605 out += info.outJump;
\r
9608 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9609 Int24 *in = (Int24 *)inBuffer;
\r
9610 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9611 for (j=0; j<info.channels; j++) {
\r
9612 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9613 out[info.outOffset[j]] <<= 8;
\r
9615 in += info.inJump;
\r
9616 out += info.outJump;
\r
9619 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9620 // Channel compensation and/or (de)interleaving only.
\r
9621 Int32 *in = (Int32 *)inBuffer;
\r
9622 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9623 for (j=0; j<info.channels; j++) {
\r
9624 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9626 in += info.inJump;
\r
9627 out += info.outJump;
\r
9630 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9631 Float32 *in = (Float32 *)inBuffer;
\r
9632 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9633 for (j=0; j<info.channels; j++) {
\r
9634 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9636 in += info.inJump;
\r
9637 out += info.outJump;
\r
9640 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9641 Float64 *in = (Float64 *)inBuffer;
\r
9642 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9643 for (j=0; j<info.channels; j++) {
\r
9644 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9646 in += info.inJump;
\r
9647 out += info.outJump;
\r
9651 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9652 Int24 *out = (Int24 *)outBuffer;
\r
9653 if (info.inFormat == RTAUDIO_SINT8) {
\r
9654 signed char *in = (signed char *)inBuffer;
\r
9655 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9656 for (j=0; j<info.channels; j++) {
\r
9657 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9658 //out[info.outOffset[j]] <<= 16;
\r
9660 in += info.inJump;
\r
9661 out += info.outJump;
\r
9664 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9665 Int16 *in = (Int16 *)inBuffer;
\r
9666 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9667 for (j=0; j<info.channels; j++) {
\r
9668 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9669 //out[info.outOffset[j]] <<= 8;
\r
9671 in += info.inJump;
\r
9672 out += info.outJump;
\r
9675 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9676 // Channel compensation and/or (de)interleaving only.
\r
9677 Int24 *in = (Int24 *)inBuffer;
\r
9678 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9679 for (j=0; j<info.channels; j++) {
\r
9680 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9682 in += info.inJump;
\r
9683 out += info.outJump;
\r
9686 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9687 Int32 *in = (Int32 *)inBuffer;
\r
9688 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9689 for (j=0; j<info.channels; j++) {
\r
9690 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9691 //out[info.outOffset[j]] >>= 8;
\r
9693 in += info.inJump;
\r
9694 out += info.outJump;
\r
9697 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9698 Float32 *in = (Float32 *)inBuffer;
\r
9699 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9700 for (j=0; j<info.channels; j++) {
\r
9701 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9703 in += info.inJump;
\r
9704 out += info.outJump;
\r
9707 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9708 Float64 *in = (Float64 *)inBuffer;
\r
9709 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9710 for (j=0; j<info.channels; j++) {
\r
9711 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9713 in += info.inJump;
\r
9714 out += info.outJump;
\r
9718 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9719 Int16 *out = (Int16 *)outBuffer;
\r
9720 if (info.inFormat == RTAUDIO_SINT8) {
\r
9721 signed char *in = (signed char *)inBuffer;
\r
9722 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9723 for (j=0; j<info.channels; j++) {
\r
9724 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9725 out[info.outOffset[j]] <<= 8;
\r
9727 in += info.inJump;
\r
9728 out += info.outJump;
\r
9731 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9732 // Channel compensation and/or (de)interleaving only.
\r
9733 Int16 *in = (Int16 *)inBuffer;
\r
9734 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9735 for (j=0; j<info.channels; j++) {
\r
9736 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9738 in += info.inJump;
\r
9739 out += info.outJump;
\r
9742 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9743 Int24 *in = (Int24 *)inBuffer;
\r
9744 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9745 for (j=0; j<info.channels; j++) {
\r
9746 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9748 in += info.inJump;
\r
9749 out += info.outJump;
\r
9752 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9753 Int32 *in = (Int32 *)inBuffer;
\r
9754 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9755 for (j=0; j<info.channels; j++) {
\r
9756 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9758 in += info.inJump;
\r
9759 out += info.outJump;
\r
9762 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9763 Float32 *in = (Float32 *)inBuffer;
\r
9764 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9765 for (j=0; j<info.channels; j++) {
\r
9766 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9768 in += info.inJump;
\r
9769 out += info.outJump;
\r
9772 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9773 Float64 *in = (Float64 *)inBuffer;
\r
9774 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9775 for (j=0; j<info.channels; j++) {
\r
9776 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9778 in += info.inJump;
\r
9779 out += info.outJump;
\r
9783 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9784 signed char *out = (signed char *)outBuffer;
\r
9785 if (info.inFormat == RTAUDIO_SINT8) {
\r
9786 // Channel compensation and/or (de)interleaving only.
\r
9787 signed char *in = (signed char *)inBuffer;
\r
9788 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9789 for (j=0; j<info.channels; j++) {
\r
9790 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9792 in += info.inJump;
\r
9793 out += info.outJump;
\r
9796 if (info.inFormat == RTAUDIO_SINT16) {
\r
9797 Int16 *in = (Int16 *)inBuffer;
\r
9798 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9799 for (j=0; j<info.channels; j++) {
\r
9800 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9802 in += info.inJump;
\r
9803 out += info.outJump;
\r
9806 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9807 Int24 *in = (Int24 *)inBuffer;
\r
9808 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9809 for (j=0; j<info.channels; j++) {
\r
9810 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
9812 in += info.inJump;
\r
9813 out += info.outJump;
\r
9816 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9817 Int32 *in = (Int32 *)inBuffer;
\r
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9819 for (j=0; j<info.channels; j++) {
\r
9820 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
9822 in += info.inJump;
\r
9823 out += info.outJump;
\r
9826 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9827 Float32 *in = (Float32 *)inBuffer;
\r
9828 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9829 for (j=0; j<info.channels; j++) {
\r
9830 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
9832 in += info.inJump;
\r
9833 out += info.outJump;
\r
9836 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9837 Float64 *in = (Float64 *)inBuffer;
\r
9838 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9839 for (j=0; j<info.channels; j++) {
\r
9840 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
9842 in += info.inJump;
\r
9843 out += info.outJump;
\r
9849 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
9850 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
9851 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
9853 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
9855 register char val;
\r
9856 register char *ptr;
\r
9859 if ( format == RTAUDIO_SINT16 ) {
\r
9860 for ( unsigned int i=0; i<samples; i++ ) {
\r
9861 // Swap 1st and 2nd bytes.
\r
9863 *(ptr) = *(ptr+1);
\r
9866 // Increment 2 bytes.
\r
9870 else if ( format == RTAUDIO_SINT32 ||
\r
9871 format == RTAUDIO_FLOAT32 ) {
\r
9872 for ( unsigned int i=0; i<samples; i++ ) {
\r
9873 // Swap 1st and 4th bytes.
\r
9875 *(ptr) = *(ptr+3);
\r
9878 // Swap 2nd and 3rd bytes.
\r
9881 *(ptr) = *(ptr+1);
\r
9884 // Increment 3 more bytes.
\r
9888 else if ( format == RTAUDIO_SINT24 ) {
\r
9889 for ( unsigned int i=0; i<samples; i++ ) {
\r
9890 // Swap 1st and 3rd bytes.
\r
9892 *(ptr) = *(ptr+2);
\r
9895 // Increment 2 more bytes.
\r
9899 else if ( format == RTAUDIO_FLOAT64 ) {
\r
9900 for ( unsigned int i=0; i<samples; i++ ) {
\r
9901 // Swap 1st and 8th bytes
\r
9903 *(ptr) = *(ptr+7);
\r
9906 // Swap 2nd and 7th bytes
\r
9909 *(ptr) = *(ptr+5);
\r
9912 // Swap 3rd and 6th bytes
\r
9915 *(ptr) = *(ptr+3);
\r
9918 // Swap 4th and 5th bytes
\r
9921 *(ptr) = *(ptr+1);
\r
9924 // Increment 5 more bytes.
\r
9930 // Indentation settings for Vim and Emacs
\r
9932 // Local Variables:
\r
9933 // c-basic-offset: 2
\r
9934 // indent-tabs-mode: nil
\r
9937 // vim: et sts=2 sw=2
\r