1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
64 static std::string convertCharPointerToStdString(const char *text)
\r
66 return std::string(text);
\r
69 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
71 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
72 std::string s( length-1, '\0' );
\r
73 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
77 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
79 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
80 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
81 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
82 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
84 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
85 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
88 // *************************************************** //
\r
90 // RtAudio definitions.
\r
92 // *************************************************** //
\r
94 std::string RtAudio :: getVersion( void ) throw()
\r
96 return RTAUDIO_VERSION;
\r
99 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
103 // The order here will control the order of RtAudio's API search in
\r
104 // the constructor.
\r
105 #if defined(__UNIX_JACK__)
\r
106 apis.push_back( UNIX_JACK );
\r
108 #if defined(__LINUX_ALSA__)
\r
109 apis.push_back( LINUX_ALSA );
\r
111 #if defined(__LINUX_PULSE__)
\r
112 apis.push_back( LINUX_PULSE );
\r
114 #if defined(__LINUX_OSS__)
\r
115 apis.push_back( LINUX_OSS );
\r
117 #if defined(__WINDOWS_ASIO__)
\r
118 apis.push_back( WINDOWS_ASIO );
\r
120 #if defined(__WINDOWS_WASAPI__)
\r
121 apis.push_back( WINDOWS_WASAPI );
\r
123 #if defined(__WINDOWS_DS__)
\r
124 apis.push_back( WINDOWS_DS );
\r
126 #if defined(__MACOSX_CORE__)
\r
127 apis.push_back( MACOSX_CORE );
\r
129 #if defined(__RTAUDIO_DUMMY__)
\r
130 apis.push_back( RTAUDIO_DUMMY );
\r
134 void RtAudio :: openRtApi( RtAudio::Api api )
\r
140 #if defined(__UNIX_JACK__)
\r
141 if ( api == UNIX_JACK )
\r
142 rtapi_ = new RtApiJack();
\r
144 #if defined(__LINUX_ALSA__)
\r
145 if ( api == LINUX_ALSA )
\r
146 rtapi_ = new RtApiAlsa();
\r
148 #if defined(__LINUX_PULSE__)
\r
149 if ( api == LINUX_PULSE )
\r
150 rtapi_ = new RtApiPulse();
\r
152 #if defined(__LINUX_OSS__)
\r
153 if ( api == LINUX_OSS )
\r
154 rtapi_ = new RtApiOss();
\r
156 #if defined(__WINDOWS_ASIO__)
\r
157 if ( api == WINDOWS_ASIO )
\r
158 rtapi_ = new RtApiAsio();
\r
160 #if defined(__WINDOWS_WASAPI__)
\r
161 if ( api == WINDOWS_WASAPI )
\r
162 rtapi_ = new RtApiWasapi();
\r
164 #if defined(__WINDOWS_DS__)
\r
165 if ( api == WINDOWS_DS )
\r
166 rtapi_ = new RtApiDs();
\r
168 #if defined(__MACOSX_CORE__)
\r
169 if ( api == MACOSX_CORE )
\r
170 rtapi_ = new RtApiCore();
\r
172 #if defined(__RTAUDIO_DUMMY__)
\r
173 if ( api == RTAUDIO_DUMMY )
\r
174 rtapi_ = new RtApiDummy();
\r
178 RtAudio :: RtAudio( RtAudio::Api api )
\r
182 if ( api != UNSPECIFIED ) {
\r
183 // Attempt to open the specified API.
\r
185 if ( rtapi_ ) return;
\r
187 // No compiled support for specified API value. Issue a debug
\r
188 // warning and continue as if no API was specified.
\r
189 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
192 // Iterate through the compiled APIs and return as soon as we find
\r
193 // one with at least one device or we reach the end of the list.
\r
194 std::vector< RtAudio::Api > apis;
\r
195 getCompiledApi( apis );
\r
196 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
197 openRtApi( apis[i] );
\r
198 if ( rtapi_->getDeviceCount() ) break;
\r
201 if ( rtapi_ ) return;
\r
203 // It should not be possible to get here because the preprocessor
\r
204 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
205 // API-specific definitions are passed to the compiler. But just in
\r
206 // case something weird happens, we'll thow an error.
\r
207 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
208 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
211 RtAudio :: ~RtAudio() throw()
\r
217 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
218 RtAudio::StreamParameters *inputParameters,
\r
219 RtAudioFormat format, unsigned int sampleRate,
\r
220 unsigned int *bufferFrames,
\r
221 RtAudioCallback callback, void *userData,
\r
222 RtAudio::StreamOptions *options,
\r
223 RtAudioErrorCallback errorCallback )
\r
225 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
226 sampleRate, bufferFrames, callback,
\r
227 userData, options, errorCallback );
\r
230 // *************************************************** //
\r
232 // Public RtApi definitions (see end of file for
\r
233 // private or protected utility functions).
\r
235 // *************************************************** //
\r
239 stream_.state = STREAM_CLOSED;
\r
240 stream_.mode = UNINITIALIZED;
\r
241 stream_.apiHandle = 0;
\r
242 stream_.userBuffer[0] = 0;
\r
243 stream_.userBuffer[1] = 0;
\r
244 MUTEX_INITIALIZE( &stream_.mutex );
\r
245 showWarnings_ = true;
\r
246 firstErrorOccurred_ = false;
\r
251 MUTEX_DESTROY( &stream_.mutex );
\r
254 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
255 RtAudio::StreamParameters *iParams,
\r
256 RtAudioFormat format, unsigned int sampleRate,
\r
257 unsigned int *bufferFrames,
\r
258 RtAudioCallback callback, void *userData,
\r
259 RtAudio::StreamOptions *options,
\r
260 RtAudioErrorCallback errorCallback )
\r
262 if ( stream_.state != STREAM_CLOSED ) {
\r
263 errorText_ = "RtApi::openStream: a stream is already open!";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 // Clear stream information potentially left from a previously open stream.
\r
271 if ( oParams && oParams->nChannels < 1 ) {
\r
272 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
273 error( RtAudioError::INVALID_USE );
\r
277 if ( iParams && iParams->nChannels < 1 ) {
\r
278 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
279 error( RtAudioError::INVALID_USE );
\r
283 if ( oParams == NULL && iParams == NULL ) {
\r
284 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
285 error( RtAudioError::INVALID_USE );
\r
289 if ( formatBytes(format) == 0 ) {
\r
290 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
291 error( RtAudioError::INVALID_USE );
\r
295 unsigned int nDevices = getDeviceCount();
\r
296 unsigned int oChannels = 0;
\r
298 oChannels = oParams->nChannels;
\r
299 if ( oParams->deviceId >= nDevices ) {
\r
300 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
301 error( RtAudioError::INVALID_USE );
\r
306 unsigned int iChannels = 0;
\r
308 iChannels = iParams->nChannels;
\r
309 if ( iParams->deviceId >= nDevices ) {
\r
310 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
311 error( RtAudioError::INVALID_USE );
\r
318 if ( oChannels > 0 ) {
\r
320 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
321 sampleRate, format, bufferFrames, options );
\r
322 if ( result == false ) {
\r
323 error( RtAudioError::SYSTEM_ERROR );
\r
328 if ( iChannels > 0 ) {
\r
330 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
331 sampleRate, format, bufferFrames, options );
\r
332 if ( result == false ) {
\r
333 if ( oChannels > 0 ) closeStream();
\r
334 error( RtAudioError::SYSTEM_ERROR );
\r
339 stream_.callbackInfo.callback = (void *) callback;
\r
340 stream_.callbackInfo.userData = userData;
\r
341 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
343 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
344 stream_.state = STREAM_STOPPED;
\r
347 unsigned int RtApi :: getDefaultInputDevice( void )
\r
349 // Should be implemented in subclasses if possible.
\r
353 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
355 // Should be implemented in subclasses if possible.
\r
359 void RtApi :: closeStream( void )
\r
361 // MUST be implemented in subclasses!
\r
365 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
366 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
367 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
368 RtAudio::StreamOptions * /*options*/ )
\r
370 // MUST be implemented in subclasses!
\r
374 void RtApi :: tickStreamTime( void )
\r
376 // Subclasses that do not provide their own implementation of
\r
377 // getStreamTime should call this function once per buffer I/O to
\r
378 // provide basic stream time support.
\r
380 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
382 #if defined( HAVE_GETTIMEOFDAY )
\r
383 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
387 long RtApi :: getStreamLatency( void )
\r
391 long totalLatency = 0;
\r
392 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
393 totalLatency = stream_.latency[0];
\r
394 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
395 totalLatency += stream_.latency[1];
\r
397 return totalLatency;
\r
400 double RtApi :: getStreamTime( void )
\r
404 #if defined( HAVE_GETTIMEOFDAY )
\r
405 // Return a very accurate estimate of the stream time by
\r
406 // adding in the elapsed time since the last tick.
\r
407 struct timeval then;
\r
408 struct timeval now;
\r
410 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
411 return stream_.streamTime;
\r
413 gettimeofday( &now, NULL );
\r
414 then = stream_.lastTickTimestamp;
\r
415 return stream_.streamTime +
\r
416 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
417 (then.tv_sec + 0.000001 * then.tv_usec));
\r
419 return stream_.streamTime;
\r
423 void RtApi :: setStreamTime( double time )
\r
428 stream_.streamTime = time;
\r
431 unsigned int RtApi :: getStreamSampleRate( void )
\r
435 return stream_.sampleRate;
\r
439 // *************************************************** //
\r
441 // OS/API-specific methods.
\r
443 // *************************************************** //
\r
445 #if defined(__MACOSX_CORE__)
\r
447 // The OS X CoreAudio API is designed to use a separate callback
\r
448 // procedure for each of its audio devices. A single RtAudio duplex
\r
449 // stream using two different devices is supported here, though it
\r
450 // cannot be guaranteed to always behave correctly because we cannot
\r
451 // synchronize these two callbacks.
\r
453 // A property listener is installed for over/underrun information.
\r
454 // However, no functionality is currently provided to allow property
\r
455 // listeners to trigger user handlers because it is unclear what could
\r
456 // be done if a critical stream parameter (buffer size, sample rate,
\r
457 // device disconnect) notification arrived. The listeners entail
\r
458 // quite a bit of extra code and most likely, a user program wouldn't
\r
459 // be prepared for the result anyway. However, we do provide a flag
\r
460 // to the client callback function to inform of an over/underrun.
\r
462 // A structure to hold various information related to the CoreAudio API
\r
464 struct CoreHandle {
\r
465 AudioDeviceID id[2]; // device ids
\r
466 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
467 AudioDeviceIOProcID procId[2];
\r
469 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
470 UInt32 nStreams[2]; // number of streams to use
\r
472 char *deviceBuffer;
\r
473 pthread_cond_t condition;
\r
474 int drainCounter; // Tracks callback counts when draining
\r
475 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
478 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
481 RtApiCore:: RtApiCore()
\r
483 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
484 // This is a largely undocumented but absolutely necessary
\r
485 // requirement starting with OS-X 10.6. If not called, queries and
\r
486 // updates to various audio device properties are not handled
\r
488 CFRunLoopRef theRunLoop = NULL;
\r
489 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
490 kAudioObjectPropertyScopeGlobal,
\r
491 kAudioObjectPropertyElementMaster };
\r
492 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
493 if ( result != noErr ) {
\r
494 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
495 error( RtAudioError::WARNING );
\r
500 RtApiCore :: ~RtApiCore()
\r
502 // The subclass destructor gets called before the base class
\r
503 // destructor, so close an existing stream before deallocating
\r
504 // apiDeviceId memory.
\r
505 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
508 unsigned int RtApiCore :: getDeviceCount( void )
\r
510 // Find out how many audio devices there are, if any.
\r
512 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
513 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
514 if ( result != noErr ) {
\r
515 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
516 error( RtAudioError::WARNING );
\r
520 return dataSize / sizeof( AudioDeviceID );
\r
523 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
525 unsigned int nDevices = getDeviceCount();
\r
526 if ( nDevices <= 1 ) return 0;
\r
529 UInt32 dataSize = sizeof( AudioDeviceID );
\r
530 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
531 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
532 if ( result != noErr ) {
\r
533 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
534 error( RtAudioError::WARNING );
\r
538 dataSize *= nDevices;
\r
539 AudioDeviceID deviceList[ nDevices ];
\r
540 property.mSelector = kAudioHardwarePropertyDevices;
\r
541 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
542 if ( result != noErr ) {
\r
543 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
544 error( RtAudioError::WARNING );
\r
548 for ( unsigned int i=0; i<nDevices; i++ )
\r
549 if ( id == deviceList[i] ) return i;
\r
551 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
552 error( RtAudioError::WARNING );
\r
556 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
558 unsigned int nDevices = getDeviceCount();
\r
559 if ( nDevices <= 1 ) return 0;
\r
562 UInt32 dataSize = sizeof( AudioDeviceID );
\r
563 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
564 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
565 if ( result != noErr ) {
\r
566 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
567 error( RtAudioError::WARNING );
\r
571 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
572 AudioDeviceID deviceList[ nDevices ];
\r
573 property.mSelector = kAudioHardwarePropertyDevices;
\r
574 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
575 if ( result != noErr ) {
\r
576 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
577 error( RtAudioError::WARNING );
\r
581 for ( unsigned int i=0; i<nDevices; i++ )
\r
582 if ( id == deviceList[i] ) return i;
\r
584 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
585 error( RtAudioError::WARNING );
\r
589 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
591 RtAudio::DeviceInfo info;
\r
592 info.probed = false;
\r
595 unsigned int nDevices = getDeviceCount();
\r
596 if ( nDevices == 0 ) {
\r
597 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
598 error( RtAudioError::INVALID_USE );
\r
602 if ( device >= nDevices ) {
\r
603 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
604 error( RtAudioError::INVALID_USE );
\r
608 AudioDeviceID deviceList[ nDevices ];
\r
609 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
610 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
611 kAudioObjectPropertyScopeGlobal,
\r
612 kAudioObjectPropertyElementMaster };
\r
613 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
614 0, NULL, &dataSize, (void *) &deviceList );
\r
615 if ( result != noErr ) {
\r
616 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
617 error( RtAudioError::WARNING );
\r
621 AudioDeviceID id = deviceList[ device ];
\r
623 // Get the device name.
\r
625 CFStringRef cfname;
\r
626 dataSize = sizeof( CFStringRef );
\r
627 property.mSelector = kAudioObjectPropertyManufacturer;
\r
628 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
629 if ( result != noErr ) {
\r
630 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
631 errorText_ = errorStream_.str();
\r
632 error( RtAudioError::WARNING );
\r
636 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
637 int length = CFStringGetLength(cfname);
\r
638 char *mname = (char *)malloc(length * 3 + 1);
\r
639 #if defined( UNICODE ) || defined( _UNICODE )
\r
640 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
642 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
644 info.name.append( (const char *)mname, strlen(mname) );
\r
645 info.name.append( ": " );
\r
646 CFRelease( cfname );
\r
649 property.mSelector = kAudioObjectPropertyName;
\r
650 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
651 if ( result != noErr ) {
\r
652 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
653 errorText_ = errorStream_.str();
\r
654 error( RtAudioError::WARNING );
\r
658 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
659 length = CFStringGetLength(cfname);
\r
660 char *name = (char *)malloc(length * 3 + 1);
\r
661 #if defined( UNICODE ) || defined( _UNICODE )
\r
662 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
664 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
666 info.name.append( (const char *)name, strlen(name) );
\r
667 CFRelease( cfname );
\r
670 // Get the output stream "configuration".
\r
671 AudioBufferList *bufferList = nil;
\r
672 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
673 property.mScope = kAudioDevicePropertyScopeOutput;
\r
674 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
676 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
677 if ( result != noErr || dataSize == 0 ) {
\r
678 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
679 errorText_ = errorStream_.str();
\r
680 error( RtAudioError::WARNING );
\r
684 // Allocate the AudioBufferList.
\r
685 bufferList = (AudioBufferList *) malloc( dataSize );
\r
686 if ( bufferList == NULL ) {
\r
687 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
688 error( RtAudioError::WARNING );
\r
692 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
693 if ( result != noErr || dataSize == 0 ) {
\r
694 free( bufferList );
\r
695 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
696 errorText_ = errorStream_.str();
\r
697 error( RtAudioError::WARNING );
\r
701 // Get output channel information.
\r
702 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
703 for ( i=0; i<nStreams; i++ )
\r
704 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
705 free( bufferList );
\r
707 // Get the input stream "configuration".
\r
708 property.mScope = kAudioDevicePropertyScopeInput;
\r
709 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
710 if ( result != noErr || dataSize == 0 ) {
\r
711 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
712 errorText_ = errorStream_.str();
\r
713 error( RtAudioError::WARNING );
\r
717 // Allocate the AudioBufferList.
\r
718 bufferList = (AudioBufferList *) malloc( dataSize );
\r
719 if ( bufferList == NULL ) {
\r
720 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
721 error( RtAudioError::WARNING );
\r
725 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
726 if (result != noErr || dataSize == 0) {
\r
727 free( bufferList );
\r
728 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
729 errorText_ = errorStream_.str();
\r
730 error( RtAudioError::WARNING );
\r
734 // Get input channel information.
\r
735 nStreams = bufferList->mNumberBuffers;
\r
736 for ( i=0; i<nStreams; i++ )
\r
737 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
738 free( bufferList );
\r
740 // If device opens for both playback and capture, we determine the channels.
\r
741 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
742 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
744 // Probe the device sample rates.
\r
745 bool isInput = false;
\r
746 if ( info.outputChannels == 0 ) isInput = true;
\r
748 // Determine the supported sample rates.
\r
749 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
750 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
751 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
752 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
753 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
754 errorText_ = errorStream_.str();
\r
755 error( RtAudioError::WARNING );
\r
759 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
760 AudioValueRange rangeList[ nRanges ];
\r
761 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
762 if ( result != kAudioHardwareNoError ) {
\r
763 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
764 errorText_ = errorStream_.str();
\r
765 error( RtAudioError::WARNING );
\r
769 // The sample rate reporting mechanism is a bit of a mystery. It
\r
770 // seems that it can either return individual rates or a range of
\r
771 // rates. I assume that if the min / max range values are the same,
\r
772 // then that represents a single supported rate and if the min / max
\r
773 // range values are different, the device supports an arbitrary
\r
774 // range of values (though there might be multiple ranges, so we'll
\r
775 // use the most conservative range).
\r
776 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
777 bool haveValueRange = false;
\r
778 info.sampleRates.clear();
\r
779 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
780 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
781 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
783 haveValueRange = true;
\r
784 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
785 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
789 if ( haveValueRange ) {
\r
790 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
791 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
792 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
796 // Sort and remove any redundant values
\r
797 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
798 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
800 if ( info.sampleRates.size() == 0 ) {
\r
801 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
802 errorText_ = errorStream_.str();
\r
803 error( RtAudioError::WARNING );
\r
807 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
808 // Thus, any other "physical" formats supported by the device are of
\r
809 // no interest to the client.
\r
810 info.nativeFormats = RTAUDIO_FLOAT32;
\r
812 if ( info.outputChannels > 0 )
\r
813 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
814 if ( info.inputChannels > 0 )
\r
815 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
817 info.probed = true;
\r
821 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
822 const AudioTimeStamp* /*inNow*/,
\r
823 const AudioBufferList* inInputData,
\r
824 const AudioTimeStamp* /*inInputTime*/,
\r
825 AudioBufferList* outOutputData,
\r
826 const AudioTimeStamp* /*inOutputTime*/,
\r
827 void* infoPointer )
\r
829 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
831 RtApiCore *object = (RtApiCore *) info->object;
\r
832 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
833 return kAudioHardwareUnspecifiedError;
\r
835 return kAudioHardwareNoError;
\r
838 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
840 const AudioObjectPropertyAddress properties[],
\r
841 void* handlePointer )
\r
843 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
844 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
845 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
846 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
847 handle->xrun[1] = true;
\r
849 handle->xrun[0] = true;
\r
853 return kAudioHardwareNoError;
\r
856 static OSStatus rateListener( AudioObjectID inDevice,
\r
857 UInt32 /*nAddresses*/,
\r
858 const AudioObjectPropertyAddress /*properties*/[],
\r
859 void* ratePointer )
\r
861 Float64 *rate = (Float64 *) ratePointer;
\r
862 UInt32 dataSize = sizeof( Float64 );
\r
863 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
864 kAudioObjectPropertyScopeGlobal,
\r
865 kAudioObjectPropertyElementMaster };
\r
866 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
867 return kAudioHardwareNoError;
\r
870 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
871 unsigned int firstChannel, unsigned int sampleRate,
\r
872 RtAudioFormat format, unsigned int *bufferSize,
\r
873 RtAudio::StreamOptions *options )
\r
876 unsigned int nDevices = getDeviceCount();
\r
877 if ( nDevices == 0 ) {
\r
878 // This should not happen because a check is made before this function is called.
\r
879 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
883 if ( device >= nDevices ) {
\r
884 // This should not happen because a check is made before this function is called.
\r
885 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
889 AudioDeviceID deviceList[ nDevices ];
\r
890 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
891 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
892 kAudioObjectPropertyScopeGlobal,
\r
893 kAudioObjectPropertyElementMaster };
\r
894 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
895 0, NULL, &dataSize, (void *) &deviceList );
\r
896 if ( result != noErr ) {
\r
897 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
901 AudioDeviceID id = deviceList[ device ];
\r
903 // Setup for stream mode.
\r
904 bool isInput = false;
\r
905 if ( mode == INPUT ) {
\r
907 property.mScope = kAudioDevicePropertyScopeInput;
\r
910 property.mScope = kAudioDevicePropertyScopeOutput;
\r
912 // Get the stream "configuration".
\r
913 AudioBufferList *bufferList = nil;
\r
915 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
916 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
917 if ( result != noErr || dataSize == 0 ) {
\r
918 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
919 errorText_ = errorStream_.str();
\r
923 // Allocate the AudioBufferList.
\r
924 bufferList = (AudioBufferList *) malloc( dataSize );
\r
925 if ( bufferList == NULL ) {
\r
926 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
930 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
931 if (result != noErr || dataSize == 0) {
\r
932 free( bufferList );
\r
933 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
934 errorText_ = errorStream_.str();
\r
938 // Search for one or more streams that contain the desired number of
\r
939 // channels. CoreAudio devices can have an arbitrary number of
\r
940 // streams and each stream can have an arbitrary number of channels.
\r
941 // For each stream, a single buffer of interleaved samples is
\r
942 // provided. RtAudio prefers the use of one stream of interleaved
\r
943 // data or multiple consecutive single-channel streams. However, we
\r
944 // now support multiple consecutive multi-channel streams of
\r
945 // interleaved data as well.
\r
946 UInt32 iStream, offsetCounter = firstChannel;
\r
947 UInt32 nStreams = bufferList->mNumberBuffers;
\r
948 bool monoMode = false;
\r
949 bool foundStream = false;
\r
951 // First check that the device supports the requested number of
\r
953 UInt32 deviceChannels = 0;
\r
954 for ( iStream=0; iStream<nStreams; iStream++ )
\r
955 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
957 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
958 free( bufferList );
\r
959 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
960 errorText_ = errorStream_.str();
\r
964 // Look for a single stream meeting our needs.
\r
965 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
966 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
967 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
968 if ( streamChannels >= channels + offsetCounter ) {
\r
969 firstStream = iStream;
\r
970 channelOffset = offsetCounter;
\r
971 foundStream = true;
\r
974 if ( streamChannels > offsetCounter ) break;
\r
975 offsetCounter -= streamChannels;
\r
978 // If we didn't find a single stream above, then we should be able
\r
979 // to meet the channel specification with multiple streams.
\r
980 if ( foundStream == false ) {
\r
982 offsetCounter = firstChannel;
\r
983 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
984 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
985 if ( streamChannels > offsetCounter ) break;
\r
986 offsetCounter -= streamChannels;
\r
989 firstStream = iStream;
\r
990 channelOffset = offsetCounter;
\r
991 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
993 if ( streamChannels > 1 ) monoMode = false;
\r
994 while ( channelCounter > 0 ) {
\r
995 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
996 if ( streamChannels > 1 ) monoMode = false;
\r
997 channelCounter -= streamChannels;
\r
1002 free( bufferList );
\r
1004 // Determine the buffer size.
\r
1005 AudioValueRange bufferRange;
\r
1006 dataSize = sizeof( AudioValueRange );
\r
1007 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1008 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1010 if ( result != noErr ) {
\r
1011 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1012 errorText_ = errorStream_.str();
\r
1016 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1017 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1018 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1020 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1021 // need to make this setting for the master channel.
\r
1022 UInt32 theSize = (UInt32) *bufferSize;
\r
1023 dataSize = sizeof( UInt32 );
\r
1024 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1025 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1029 errorText_ = errorStream_.str();
\r
1033 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1034 // MUST be the same in both directions!
\r
1035 *bufferSize = theSize;
\r
1036 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1038 errorText_ = errorStream_.str();
\r
1042 stream_.bufferSize = *bufferSize;
\r
1043 stream_.nBuffers = 1;
\r
1045 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1046 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1048 dataSize = sizeof( hog_pid );
\r
1049 property.mSelector = kAudioDevicePropertyHogMode;
\r
1050 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1051 if ( result != noErr ) {
\r
1052 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1053 errorText_ = errorStream_.str();
\r
1057 if ( hog_pid != getpid() ) {
\r
1058 hog_pid = getpid();
\r
1059 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1060 if ( result != noErr ) {
\r
1061 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1062 errorText_ = errorStream_.str();
\r
1068 // Check and if necessary, change the sample rate for the device.
\r
1069 Float64 nominalRate;
\r
1070 dataSize = sizeof( Float64 );
\r
1071 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1072 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1073 if ( result != noErr ) {
\r
1074 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1075 errorText_ = errorStream_.str();
\r
1079 // Only change the sample rate if off by more than 1 Hz.
\r
1080 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1082 // Set a property listener for the sample rate change
\r
1083 Float64 reportedRate = 0.0;
\r
1084 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1085 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1086 if ( result != noErr ) {
\r
1087 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1088 errorText_ = errorStream_.str();
\r
1092 nominalRate = (Float64) sampleRate;
\r
1093 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1094 if ( result != noErr ) {
\r
1095 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1097 errorText_ = errorStream_.str();
\r
1101 // Now wait until the reported nominal rate is what we just set.
\r
1102 UInt32 microCounter = 0;
\r
1103 while ( reportedRate != nominalRate ) {
\r
1104 microCounter += 5000;
\r
1105 if ( microCounter > 5000000 ) break;
\r
1109 // Remove the property listener.
\r
1110 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1112 if ( microCounter > 5000000 ) {
\r
1113 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1114 errorText_ = errorStream_.str();
\r
1119 // Now set the stream format for all streams. Also, check the
\r
1120 // physical format of the device and change that if necessary.
\r
1121 AudioStreamBasicDescription description;
\r
1122 dataSize = sizeof( AudioStreamBasicDescription );
\r
1123 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1124 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1125 if ( result != noErr ) {
\r
1126 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1127 errorText_ = errorStream_.str();
\r
1131 // Set the sample rate and data format id. However, only make the
\r
1132 // change if the sample rate is not within 1.0 of the desired
\r
1133 // rate and the format is not linear pcm.
\r
1134 bool updateFormat = false;
\r
1135 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1136 description.mSampleRate = (Float64) sampleRate;
\r
1137 updateFormat = true;
\r
1140 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1141 description.mFormatID = kAudioFormatLinearPCM;
\r
1142 updateFormat = true;
\r
1145 if ( updateFormat ) {
\r
1146 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1147 if ( result != noErr ) {
\r
1148 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1149 errorText_ = errorStream_.str();
\r
1154 // Now check the physical format.
\r
1155 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1156 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1163 //std::cout << "Current physical stream format:" << std::endl;
\r
1164 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1165 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1166 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1167 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1169 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1170 description.mFormatID = kAudioFormatLinearPCM;
\r
1171 //description.mSampleRate = (Float64) sampleRate;
\r
1172 AudioStreamBasicDescription testDescription = description;
\r
1173 UInt32 formatFlags;
\r
1175 // We'll try higher bit rates first and then work our way down.
\r
1176 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1177 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1178 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1179 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1180 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1181 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1182 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1183 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1184 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1185 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1186 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1187 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1190 bool setPhysicalFormat = false;
\r
1191 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1192 testDescription = description;
\r
1193 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1194 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1195 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1196 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1198 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1199 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1200 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1201 if ( result == noErr ) {
\r
1202 setPhysicalFormat = true;
\r
1203 //std::cout << "Updated physical stream format:" << std::endl;
\r
1204 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1205 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1206 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1207 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1212 if ( !setPhysicalFormat ) {
\r
1213 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1214 errorText_ = errorStream_.str();
\r
1217 } // done setting virtual/physical formats.
\r
1219 // Get the stream / device latency.
\r
1221 dataSize = sizeof( UInt32 );
\r
1222 property.mSelector = kAudioDevicePropertyLatency;
\r
1223 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1224 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1225 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1228 errorText_ = errorStream_.str();
\r
1229 error( RtAudioError::WARNING );
\r
1233 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1234 // always be presented in native-endian format, so we should never
\r
1235 // need to byte swap.
\r
1236 stream_.doByteSwap[mode] = false;
\r
1238 // From the CoreAudio documentation, PCM data must be supplied as
\r
1240 stream_.userFormat = format;
\r
1241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1243 if ( streamCount == 1 )
\r
1244 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1245 else // multiple streams
\r
1246 stream_.nDeviceChannels[mode] = channels;
\r
1247 stream_.nUserChannels[mode] = channels;
\r
1248 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1249 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1250 else stream_.userInterleaved = true;
\r
1251 stream_.deviceInterleaved[mode] = true;
\r
1252 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1254 // Set flags for buffer conversion.
\r
1255 stream_.doConvertBuffer[mode] = false;
\r
1256 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1257 stream_.doConvertBuffer[mode] = true;
\r
1258 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1259 stream_.doConvertBuffer[mode] = true;
\r
1260 if ( streamCount == 1 ) {
\r
1261 if ( stream_.nUserChannels[mode] > 1 &&
\r
1262 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1263 stream_.doConvertBuffer[mode] = true;
\r
1265 else if ( monoMode && stream_.userInterleaved )
\r
1266 stream_.doConvertBuffer[mode] = true;
\r
1268 // Allocate our CoreHandle structure for the stream.
\r
1269 CoreHandle *handle = 0;
\r
1270 if ( stream_.apiHandle == 0 ) {
\r
1272 handle = new CoreHandle;
\r
1274 catch ( std::bad_alloc& ) {
\r
1275 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1279 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1280 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1283 stream_.apiHandle = (void *) handle;
\r
1286 handle = (CoreHandle *) stream_.apiHandle;
\r
1287 handle->iStream[mode] = firstStream;
\r
1288 handle->nStreams[mode] = streamCount;
\r
1289 handle->id[mode] = id;
\r
1291 // Allocate necessary internal buffers.
\r
1292 unsigned long bufferBytes;
\r
1293 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1294 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1295 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1296 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1297 if ( stream_.userBuffer[mode] == NULL ) {
\r
1298 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1302 // If possible, we will make use of the CoreAudio stream buffers as
\r
1303 // "device buffers". However, we can't do this if using multiple
\r
1305 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1307 bool makeBuffer = true;
\r
1308 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1309 if ( mode == INPUT ) {
\r
1310 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1311 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1312 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1316 if ( makeBuffer ) {
\r
1317 bufferBytes *= *bufferSize;
\r
1318 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1319 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1320 if ( stream_.deviceBuffer == NULL ) {
\r
1321 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1327 stream_.sampleRate = sampleRate;
\r
1328 stream_.device[mode] = device;
\r
1329 stream_.state = STREAM_STOPPED;
\r
1330 stream_.callbackInfo.object = (void *) this;
\r
1332 // Setup the buffer conversion information structure.
\r
1333 if ( stream_.doConvertBuffer[mode] ) {
\r
1334 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1335 else setConvertInfo( mode, channelOffset );
\r
1338 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1339 // Only one callback procedure per device.
\r
1340 stream_.mode = DUPLEX;
\r
1342 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1343 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1345 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1346 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1348 if ( result != noErr ) {
\r
1349 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1350 errorText_ = errorStream_.str();
\r
1353 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1354 stream_.mode = DUPLEX;
\r
1356 stream_.mode = mode;
\r
1359 // Setup the device property listener for over/underload.
\r
1360 property.mSelector = kAudioDeviceProcessorOverload;
\r
1361 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1362 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1368 pthread_cond_destroy( &handle->condition );
\r
1370 stream_.apiHandle = 0;
\r
1373 for ( int i=0; i<2; i++ ) {
\r
1374 if ( stream_.userBuffer[i] ) {
\r
1375 free( stream_.userBuffer[i] );
\r
1376 stream_.userBuffer[i] = 0;
\r
1380 if ( stream_.deviceBuffer ) {
\r
1381 free( stream_.deviceBuffer );
\r
1382 stream_.deviceBuffer = 0;
\r
1385 stream_.state = STREAM_CLOSED;
\r
1389 void RtApiCore :: closeStream( void )
\r
1391 if ( stream_.state == STREAM_CLOSED ) {
\r
1392 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1393 error( RtAudioError::WARNING );
\r
1397 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1398 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1399 if ( stream_.state == STREAM_RUNNING )
\r
1400 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1401 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1402 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1404 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1405 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1409 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1410 if ( stream_.state == STREAM_RUNNING )
\r
1411 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1413 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1415 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1416 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1420 for ( int i=0; i<2; i++ ) {
\r
1421 if ( stream_.userBuffer[i] ) {
\r
1422 free( stream_.userBuffer[i] );
\r
1423 stream_.userBuffer[i] = 0;
\r
1427 if ( stream_.deviceBuffer ) {
\r
1428 free( stream_.deviceBuffer );
\r
1429 stream_.deviceBuffer = 0;
\r
1432 // Destroy pthread condition variable.
\r
1433 pthread_cond_destroy( &handle->condition );
\r
1435 stream_.apiHandle = 0;
\r
1437 stream_.mode = UNINITIALIZED;
\r
1438 stream_.state = STREAM_CLOSED;
\r
1441 void RtApiCore :: startStream( void )
\r
1444 if ( stream_.state == STREAM_RUNNING ) {
\r
1445 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1446 error( RtAudioError::WARNING );
\r
1450 OSStatus result = noErr;
\r
1451 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1452 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1454 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1455 if ( result != noErr ) {
\r
1456 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1457 errorText_ = errorStream_.str();
\r
1462 if ( stream_.mode == INPUT ||
\r
1463 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1465 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1466 if ( result != noErr ) {
\r
1467 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1468 errorText_ = errorStream_.str();
\r
1473 handle->drainCounter = 0;
\r
1474 handle->internalDrain = false;
\r
1475 stream_.state = STREAM_RUNNING;
\r
1478 if ( result == noErr ) return;
\r
1479 error( RtAudioError::SYSTEM_ERROR );
\r
1482 void RtApiCore :: stopStream( void )
\r
1485 if ( stream_.state == STREAM_STOPPED ) {
\r
1486 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1487 error( RtAudioError::WARNING );
\r
1491 OSStatus result = noErr;
\r
1492 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1493 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1495 if ( handle->drainCounter == 0 ) {
\r
1496 handle->drainCounter = 2;
\r
1497 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1500 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1501 if ( result != noErr ) {
\r
1502 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1503 errorText_ = errorStream_.str();
\r
1508 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1510 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1511 if ( result != noErr ) {
\r
1512 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1513 errorText_ = errorStream_.str();
\r
1518 stream_.state = STREAM_STOPPED;
\r
1521 if ( result == noErr ) return;
\r
1522 error( RtAudioError::SYSTEM_ERROR );
\r
1525 void RtApiCore :: abortStream( void )
\r
1528 if ( stream_.state == STREAM_STOPPED ) {
\r
1529 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1530 error( RtAudioError::WARNING );
\r
1534 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1535 handle->drainCounter = 2;
\r
1540 // This function will be called by a spawned thread when the user
\r
1541 // callback function signals that the stream should be stopped or
\r
1542 // aborted. It is better to handle it this way because the
\r
1543 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1544 // function is called.
\r
1545 static void *coreStopStream( void *ptr )
\r
1547 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1548 RtApiCore *object = (RtApiCore *) info->object;
\r
1550 object->stopStream();
\r
1551 pthread_exit( NULL );
\r
1554 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1555 const AudioBufferList *inBufferList,
\r
1556 const AudioBufferList *outBufferList )
\r
1558 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1559 if ( stream_.state == STREAM_CLOSED ) {
\r
1560 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1561 error( RtAudioError::WARNING );
\r
1565 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1566 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1568 // Check if we were draining the stream and signal is finished.
\r
1569 if ( handle->drainCounter > 3 ) {
\r
1570 ThreadHandle threadId;
\r
1572 stream_.state = STREAM_STOPPING;
\r
1573 if ( handle->internalDrain == true )
\r
1574 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1575 else // external call to stopStream()
\r
1576 pthread_cond_signal( &handle->condition );
\r
1580 AudioDeviceID outputDevice = handle->id[0];
\r
1582 // Invoke user callback to get fresh output data UNLESS we are
\r
1583 // draining stream or duplex mode AND the input/output devices are
\r
1584 // different AND this function is called for the input device.
\r
1585 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1586 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1587 double streamTime = getStreamTime();
\r
1588 RtAudioStreamStatus status = 0;
\r
1589 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1590 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1591 handle->xrun[0] = false;
\r
1593 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1594 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1595 handle->xrun[1] = false;
\r
1598 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1599 stream_.bufferSize, streamTime, status, info->userData );
\r
1600 if ( cbReturnValue == 2 ) {
\r
1601 stream_.state = STREAM_STOPPING;
\r
1602 handle->drainCounter = 2;
\r
1606 else if ( cbReturnValue == 1 ) {
\r
1607 handle->drainCounter = 1;
\r
1608 handle->internalDrain = true;
\r
1612 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1614 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1616 if ( handle->nStreams[0] == 1 ) {
\r
1617 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1619 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1621 else { // fill multiple streams with zeros
\r
1622 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1623 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1625 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1629 else if ( handle->nStreams[0] == 1 ) {
\r
1630 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1631 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1632 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1634 else { // copy from user buffer
\r
1635 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1636 stream_.userBuffer[0],
\r
1637 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1640 else { // fill multiple streams
\r
1641 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1642 if ( stream_.doConvertBuffer[0] ) {
\r
1643 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1644 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1647 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1648 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1649 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1650 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1651 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1654 else { // fill multiple multi-channel streams with interleaved data
\r
1655 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1656 Float32 *out, *in;
\r
1658 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1659 UInt32 inChannels = stream_.nUserChannels[0];
\r
1660 if ( stream_.doConvertBuffer[0] ) {
\r
1661 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1662 inChannels = stream_.nDeviceChannels[0];
\r
1665 if ( inInterleaved ) inOffset = 1;
\r
1666 else inOffset = stream_.bufferSize;
\r
1668 channelsLeft = inChannels;
\r
1669 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1671 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1672 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1675 // Account for possible channel offset in first stream
\r
1676 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1677 streamChannels -= stream_.channelOffset[0];
\r
1678 outJump = stream_.channelOffset[0];
\r
1682 // Account for possible unfilled channels at end of the last stream
\r
1683 if ( streamChannels > channelsLeft ) {
\r
1684 outJump = streamChannels - channelsLeft;
\r
1685 streamChannels = channelsLeft;
\r
1688 // Determine input buffer offsets and skips
\r
1689 if ( inInterleaved ) {
\r
1690 inJump = inChannels;
\r
1691 in += inChannels - channelsLeft;
\r
1695 in += (inChannels - channelsLeft) * inOffset;
\r
1698 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1699 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1700 *out++ = in[j*inOffset];
\r
1705 channelsLeft -= streamChannels;
\r
1711 // Don't bother draining input
\r
1712 if ( handle->drainCounter ) {
\r
1713 handle->drainCounter++;
\r
1717 AudioDeviceID inputDevice;
\r
1718 inputDevice = handle->id[1];
\r
1719 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1721 if ( handle->nStreams[1] == 1 ) {
\r
1722 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1723 convertBuffer( stream_.userBuffer[1],
\r
1724 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1725 stream_.convertInfo[1] );
\r
1727 else { // copy to user buffer
\r
1728 memcpy( stream_.userBuffer[1],
\r
1729 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1730 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1733 else { // read from multiple streams
\r
1734 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1735 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1737 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1738 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1739 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1740 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1741 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1744 else { // read from multiple multi-channel streams
\r
1745 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1746 Float32 *out, *in;
\r
1748 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1749 UInt32 outChannels = stream_.nUserChannels[1];
\r
1750 if ( stream_.doConvertBuffer[1] ) {
\r
1751 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1752 outChannels = stream_.nDeviceChannels[1];
\r
1755 if ( outInterleaved ) outOffset = 1;
\r
1756 else outOffset = stream_.bufferSize;
\r
1758 channelsLeft = outChannels;
\r
1759 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1761 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1762 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1765 // Account for possible channel offset in first stream
\r
1766 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1767 streamChannels -= stream_.channelOffset[1];
\r
1768 inJump = stream_.channelOffset[1];
\r
1772 // Account for possible unread channels at end of the last stream
\r
1773 if ( streamChannels > channelsLeft ) {
\r
1774 inJump = streamChannels - channelsLeft;
\r
1775 streamChannels = channelsLeft;
\r
1778 // Determine output buffer offsets and skips
\r
1779 if ( outInterleaved ) {
\r
1780 outJump = outChannels;
\r
1781 out += outChannels - channelsLeft;
\r
1785 out += (outChannels - channelsLeft) * outOffset;
\r
1788 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1789 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1790 out[j*outOffset] = *in++;
\r
1795 channelsLeft -= streamChannels;
\r
1799 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1800 convertBuffer( stream_.userBuffer[1],
\r
1801 stream_.deviceBuffer,
\r
1802 stream_.convertInfo[1] );
\r
1808 //MUTEX_UNLOCK( &stream_.mutex );
\r
1810 RtApi::tickStreamTime();
\r
1814 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1818 case kAudioHardwareNotRunningError:
\r
1819 return "kAudioHardwareNotRunningError";
\r
1821 case kAudioHardwareUnspecifiedError:
\r
1822 return "kAudioHardwareUnspecifiedError";
\r
1824 case kAudioHardwareUnknownPropertyError:
\r
1825 return "kAudioHardwareUnknownPropertyError";
\r
1827 case kAudioHardwareBadPropertySizeError:
\r
1828 return "kAudioHardwareBadPropertySizeError";
\r
1830 case kAudioHardwareIllegalOperationError:
\r
1831 return "kAudioHardwareIllegalOperationError";
\r
1833 case kAudioHardwareBadObjectError:
\r
1834 return "kAudioHardwareBadObjectError";
\r
1836 case kAudioHardwareBadDeviceError:
\r
1837 return "kAudioHardwareBadDeviceError";
\r
1839 case kAudioHardwareBadStreamError:
\r
1840 return "kAudioHardwareBadStreamError";
\r
1842 case kAudioHardwareUnsupportedOperationError:
\r
1843 return "kAudioHardwareUnsupportedOperationError";
\r
1845 case kAudioDeviceUnsupportedFormatError:
\r
1846 return "kAudioDeviceUnsupportedFormatError";
\r
1848 case kAudioDevicePermissionsError:
\r
1849 return "kAudioDevicePermissionsError";
\r
1852 return "CoreAudio unknown error";
\r
1856 //******************** End of __MACOSX_CORE__ *********************//
\r
1859 #if defined(__UNIX_JACK__)
\r
1861 // JACK is a low-latency audio server, originally written for the
\r
1862 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1863 // connect a number of different applications to an audio device, as
\r
1864 // well as allowing them to share audio between themselves.
\r
1866 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1867 // have ports connected to the server. The JACK server is typically
\r
1868 // started in a terminal as follows:
\r
1870 // .jackd -d alsa -d hw:0
\r
1872 // or through an interface program such as qjackctl. Many of the
\r
1873 // parameters normally set for a stream are fixed by the JACK server
\r
1874 // and can be specified when the JACK server is started. In
\r
1877 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1879 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1880 // frames, and number of buffers = 4. Once the server is running, it
\r
1881 // is not possible to override these values. If the values are not
\r
1882 // specified in the command-line, the JACK server uses default values.
\r
1884 // The JACK server does not have to be running when an instance of
\r
1885 // RtApiJack is created, though the function getDeviceCount() will
\r
1886 // report 0 devices found until JACK has been started. When no
\r
1887 // devices are available (i.e., the JACK server is not running), a
\r
1888 // stream cannot be opened.
\r
1890 #include <jack/jack.h>
\r
1891 #include <unistd.h>
\r
1894 // A structure to hold various information related to the Jack API
\r
1895 // implementation.
\r
1896 struct JackHandle {
\r
1897 jack_client_t *client;
\r
1898 jack_port_t **ports[2];
\r
1899 std::string deviceName[2];
\r
1901 pthread_cond_t condition;
\r
1902 int drainCounter; // Tracks callback counts when draining
\r
1903 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1906 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1909 static void jackSilentError( const char * ) {};
\r
1911 RtApiJack :: RtApiJack()
\r
1913 // Nothing to do here.
\r
1914 #if !defined(__RTAUDIO_DEBUG__)
\r
1915 // Turn off Jack's internal error reporting.
\r
1916 jack_set_error_function( &jackSilentError );
\r
1920 RtApiJack :: ~RtApiJack()
\r
1922 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1925 unsigned int RtApiJack :: getDeviceCount( void )
\r
1927 // See if we can become a jack client.
\r
1928 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1929 jack_status_t *status = NULL;
\r
1930 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1931 if ( client == 0 ) return 0;
\r
1933 const char **ports;
\r
1934 std::string port, previousPort;
\r
1935 unsigned int nChannels = 0, nDevices = 0;
\r
1936 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1938 // Parse the port names up to the first colon (:).
\r
1939 size_t iColon = 0;
\r
1941 port = (char *) ports[ nChannels ];
\r
1942 iColon = port.find(":");
\r
1943 if ( iColon != std::string::npos ) {
\r
1944 port = port.substr( 0, iColon + 1 );
\r
1945 if ( port != previousPort ) {
\r
1947 previousPort = port;
\r
1950 } while ( ports[++nChannels] );
\r
1954 jack_client_close( client );
\r
1958 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1960 RtAudio::DeviceInfo info;
\r
1961 info.probed = false;
\r
1963 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1964 jack_status_t *status = NULL;
\r
1965 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1966 if ( client == 0 ) {
\r
1967 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1968 error( RtAudioError::WARNING );
\r
1972 const char **ports;
\r
1973 std::string port, previousPort;
\r
1974 unsigned int nPorts = 0, nDevices = 0;
\r
1975 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1977 // Parse the port names up to the first colon (:).
\r
1978 size_t iColon = 0;
\r
1980 port = (char *) ports[ nPorts ];
\r
1981 iColon = port.find(":");
\r
1982 if ( iColon != std::string::npos ) {
\r
1983 port = port.substr( 0, iColon );
\r
1984 if ( port != previousPort ) {
\r
1985 if ( nDevices == device ) info.name = port;
\r
1987 previousPort = port;
\r
1990 } while ( ports[++nPorts] );
\r
1994 if ( device >= nDevices ) {
\r
1995 jack_client_close( client );
\r
1996 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1997 error( RtAudioError::INVALID_USE );
\r
2001 // Get the current jack server sample rate.
\r
2002 info.sampleRates.clear();
\r
2003 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
2005 // Count the available ports containing the client name as device
\r
2006 // channels. Jack "input ports" equal RtAudio output channels.
\r
2007 unsigned int nChannels = 0;
\r
2008 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2010 while ( ports[ nChannels ] ) nChannels++;
\r
2012 info.outputChannels = nChannels;
\r
2015 // Jack "output ports" equal RtAudio input channels.
\r
2017 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2019 while ( ports[ nChannels ] ) nChannels++;
\r
2021 info.inputChannels = nChannels;
\r
2024 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2025 jack_client_close(client);
\r
2026 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2027 error( RtAudioError::WARNING );
\r
2031 // If device opens for both playback and capture, we determine the channels.
\r
2032 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2033 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2035 // Jack always uses 32-bit floats.
\r
2036 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2038 // Jack doesn't provide default devices so we'll use the first available one.
\r
2039 if ( device == 0 && info.outputChannels > 0 )
\r
2040 info.isDefaultOutput = true;
\r
2041 if ( device == 0 && info.inputChannels > 0 )
\r
2042 info.isDefaultInput = true;
\r
2044 jack_client_close(client);
\r
2045 info.probed = true;
\r
2049 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2051 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2053 RtApiJack *object = (RtApiJack *) info->object;
\r
2054 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2059 // This function will be called by a spawned thread when the Jack
\r
2060 // server signals that it is shutting down. It is necessary to handle
\r
2061 // it this way because the jackShutdown() function must return before
\r
2062 // the jack_deactivate() function (in closeStream()) will return.
\r
2063 static void *jackCloseStream( void *ptr )
\r
2065 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2066 RtApiJack *object = (RtApiJack *) info->object;
\r
2068 object->closeStream();
\r
2070 pthread_exit( NULL );
\r
2072 static void jackShutdown( void *infoPointer )
\r
2074 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2075 RtApiJack *object = (RtApiJack *) info->object;
\r
2077 // Check current stream state. If stopped, then we'll assume this
\r
2078 // was called as a result of a call to RtApiJack::stopStream (the
\r
2079 // deactivation of a client handle causes this function to be called).
\r
2080 // If not, we'll assume the Jack server is shutting down or some
\r
2081 // other problem occurred and we should close the stream.
\r
2082 if ( object->isStreamRunning() == false ) return;
\r
2084 ThreadHandle threadId;
\r
2085 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2086 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2089 static int jackXrun( void *infoPointer )
\r
2091 JackHandle *handle = (JackHandle *) infoPointer;
\r
2093 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2094 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2099 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2100 unsigned int firstChannel, unsigned int sampleRate,
\r
2101 RtAudioFormat format, unsigned int *bufferSize,
\r
2102 RtAudio::StreamOptions *options )
\r
2104 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2106 // Look for jack server and try to become a client (only do once per stream).
\r
2107 jack_client_t *client = 0;
\r
2108 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2109 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2110 jack_status_t *status = NULL;
\r
2111 if ( options && !options->streamName.empty() )
\r
2112 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2114 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2115 if ( client == 0 ) {
\r
2116 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2117 error( RtAudioError::WARNING );
\r
2122 // The handle must have been created on an earlier pass.
\r
2123 client = handle->client;
\r
2126 const char **ports;
\r
2127 std::string port, previousPort, deviceName;
\r
2128 unsigned int nPorts = 0, nDevices = 0;
\r
2129 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2131 // Parse the port names up to the first colon (:).
\r
2132 size_t iColon = 0;
\r
2134 port = (char *) ports[ nPorts ];
\r
2135 iColon = port.find(":");
\r
2136 if ( iColon != std::string::npos ) {
\r
2137 port = port.substr( 0, iColon );
\r
2138 if ( port != previousPort ) {
\r
2139 if ( nDevices == device ) deviceName = port;
\r
2141 previousPort = port;
\r
2144 } while ( ports[++nPorts] );
\r
2148 if ( device >= nDevices ) {
\r
2149 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2153 // Count the available ports containing the client name as device
\r
2154 // channels. Jack "input ports" equal RtAudio output channels.
\r
2155 unsigned int nChannels = 0;
\r
2156 unsigned long flag = JackPortIsInput;
\r
2157 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2158 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2160 while ( ports[ nChannels ] ) nChannels++;
\r
2164 // Compare the jack ports for specified client to the requested number of channels.
\r
2165 if ( nChannels < (channels + firstChannel) ) {
\r
2166 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2167 errorText_ = errorStream_.str();
\r
2171 // Check the jack server sample rate.
\r
2172 unsigned int jackRate = jack_get_sample_rate( client );
\r
2173 if ( sampleRate != jackRate ) {
\r
2174 jack_client_close( client );
\r
2175 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2176 errorText_ = errorStream_.str();
\r
2179 stream_.sampleRate = jackRate;
\r
2181 // Get the latency of the JACK port.
\r
2182 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2183 if ( ports[ firstChannel ] ) {
\r
2184 // Added by Ge Wang
\r
2185 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2186 // the range (usually the min and max are equal)
\r
2187 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2188 // get the latency range
\r
2189 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2190 // be optimistic, use the min!
\r
2191 stream_.latency[mode] = latrange.min;
\r
2192 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2196 // The jack server always uses 32-bit floating-point data.
\r
2197 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2198 stream_.userFormat = format;
\r
2200 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2201 else stream_.userInterleaved = true;
\r
2203 // Jack always uses non-interleaved buffers.
\r
2204 stream_.deviceInterleaved[mode] = false;
\r
2206 // Jack always provides host byte-ordered data.
\r
2207 stream_.doByteSwap[mode] = false;
\r
2209 // Get the buffer size. The buffer size and number of buffers
\r
2210 // (periods) is set when the jack server is started.
\r
2211 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2212 *bufferSize = stream_.bufferSize;
\r
2214 stream_.nDeviceChannels[mode] = channels;
\r
2215 stream_.nUserChannels[mode] = channels;
\r
2217 // Set flags for buffer conversion.
\r
2218 stream_.doConvertBuffer[mode] = false;
\r
2219 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2220 stream_.doConvertBuffer[mode] = true;
\r
2221 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2222 stream_.nUserChannels[mode] > 1 )
\r
2223 stream_.doConvertBuffer[mode] = true;
\r
2225 // Allocate our JackHandle structure for the stream.
\r
2226 if ( handle == 0 ) {
\r
2228 handle = new JackHandle;
\r
2230 catch ( std::bad_alloc& ) {
\r
2231 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2235 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2236 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2239 stream_.apiHandle = (void *) handle;
\r
2240 handle->client = client;
\r
2242 handle->deviceName[mode] = deviceName;
\r
2244 // Allocate necessary internal buffers.
\r
2245 unsigned long bufferBytes;
\r
2246 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2247 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2248 if ( stream_.userBuffer[mode] == NULL ) {
\r
2249 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2253 if ( stream_.doConvertBuffer[mode] ) {
\r
2255 bool makeBuffer = true;
\r
2256 if ( mode == OUTPUT )
\r
2257 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2258 else { // mode == INPUT
\r
2259 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2260 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2261 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2262 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2266 if ( makeBuffer ) {
\r
2267 bufferBytes *= *bufferSize;
\r
2268 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2269 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2270 if ( stream_.deviceBuffer == NULL ) {
\r
2271 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2277 // Allocate memory for the Jack ports (channels) identifiers.
\r
2278 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2279 if ( handle->ports[mode] == NULL ) {
\r
2280 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2284 stream_.device[mode] = device;
\r
2285 stream_.channelOffset[mode] = firstChannel;
\r
2286 stream_.state = STREAM_STOPPED;
\r
2287 stream_.callbackInfo.object = (void *) this;
\r
2289 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2290 // We had already set up the stream for output.
\r
2291 stream_.mode = DUPLEX;
\r
2293 stream_.mode = mode;
\r
2294 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2295 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2296 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2299 // Register our ports.
\r
2301 if ( mode == OUTPUT ) {
\r
2302 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2303 snprintf( label, 64, "outport %d", i );
\r
2304 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2305 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2309 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2310 snprintf( label, 64, "inport %d", i );
\r
2311 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2312 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2316 // Setup the buffer conversion information structure. We don't use
\r
2317 // buffers to do channel offsets, so we override that parameter
\r
2319 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2325 pthread_cond_destroy( &handle->condition );
\r
2326 jack_client_close( handle->client );
\r
2328 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2329 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2332 stream_.apiHandle = 0;
\r
2335 for ( int i=0; i<2; i++ ) {
\r
2336 if ( stream_.userBuffer[i] ) {
\r
2337 free( stream_.userBuffer[i] );
\r
2338 stream_.userBuffer[i] = 0;
\r
2342 if ( stream_.deviceBuffer ) {
\r
2343 free( stream_.deviceBuffer );
\r
2344 stream_.deviceBuffer = 0;
\r
2350 void RtApiJack :: closeStream( void )
\r
2352 if ( stream_.state == STREAM_CLOSED ) {
\r
2353 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2354 error( RtAudioError::WARNING );
\r
2358 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2361 if ( stream_.state == STREAM_RUNNING )
\r
2362 jack_deactivate( handle->client );
\r
2364 jack_client_close( handle->client );
\r
2368 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2369 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2370 pthread_cond_destroy( &handle->condition );
\r
2372 stream_.apiHandle = 0;
\r
2375 for ( int i=0; i<2; i++ ) {
\r
2376 if ( stream_.userBuffer[i] ) {
\r
2377 free( stream_.userBuffer[i] );
\r
2378 stream_.userBuffer[i] = 0;
\r
2382 if ( stream_.deviceBuffer ) {
\r
2383 free( stream_.deviceBuffer );
\r
2384 stream_.deviceBuffer = 0;
\r
2387 stream_.mode = UNINITIALIZED;
\r
2388 stream_.state = STREAM_CLOSED;
\r
2391 void RtApiJack :: startStream( void )
\r
2394 if ( stream_.state == STREAM_RUNNING ) {
\r
2395 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2396 error( RtAudioError::WARNING );
\r
2400 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2401 int result = jack_activate( handle->client );
\r
2403 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2407 const char **ports;
\r
2409 // Get the list of available ports.
\r
2410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2412 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2413 if ( ports == NULL) {
\r
2414 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2418 // Now make the port connections. Since RtAudio wasn't designed to
\r
2419 // allow the user to select particular channels of a device, we'll
\r
2420 // just open the first "nChannels" ports with offset.
\r
2421 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2423 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2424 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2427 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2434 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2436 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2437 if ( ports == NULL) {
\r
2438 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2442 // Now make the port connections. See note above.
\r
2443 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2445 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2446 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2449 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2456 handle->drainCounter = 0;
\r
2457 handle->internalDrain = false;
\r
2458 stream_.state = STREAM_RUNNING;
\r
2461 if ( result == 0 ) return;
\r
2462 error( RtAudioError::SYSTEM_ERROR );
\r
2465 void RtApiJack :: stopStream( void )
\r
2468 if ( stream_.state == STREAM_STOPPED ) {
\r
2469 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2470 error( RtAudioError::WARNING );
\r
2474 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2475 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2477 if ( handle->drainCounter == 0 ) {
\r
2478 handle->drainCounter = 2;
\r
2479 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2483 jack_deactivate( handle->client );
\r
2484 stream_.state = STREAM_STOPPED;
\r
2487 void RtApiJack :: abortStream( void )
\r
2490 if ( stream_.state == STREAM_STOPPED ) {
\r
2491 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2492 error( RtAudioError::WARNING );
\r
2496 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2497 handle->drainCounter = 2;
\r
2502 // This function will be called by a spawned thread when the user
\r
2503 // callback function signals that the stream should be stopped or
\r
2504 // aborted. It is necessary to handle it this way because the
\r
2505 // callbackEvent() function must return before the jack_deactivate()
\r
2506 // function will return.
\r
2507 static void *jackStopStream( void *ptr )
\r
2509 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2510 RtApiJack *object = (RtApiJack *) info->object;
\r
2512 object->stopStream();
\r
2513 pthread_exit( NULL );
\r
2516 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2518 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2519 if ( stream_.state == STREAM_CLOSED ) {
\r
2520 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2521 error( RtAudioError::WARNING );
\r
2524 if ( stream_.bufferSize != nframes ) {
\r
2525 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2526 error( RtAudioError::WARNING );
\r
2530 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2531 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2533 // Check if we were draining the stream and signal is finished.
\r
2534 if ( handle->drainCounter > 3 ) {
\r
2535 ThreadHandle threadId;
\r
2537 stream_.state = STREAM_STOPPING;
\r
2538 if ( handle->internalDrain == true )
\r
2539 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2541 pthread_cond_signal( &handle->condition );
\r
2545 // Invoke user callback first, to get fresh output data.
\r
2546 if ( handle->drainCounter == 0 ) {
\r
2547 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2548 double streamTime = getStreamTime();
\r
2549 RtAudioStreamStatus status = 0;
\r
2550 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2551 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2552 handle->xrun[0] = false;
\r
2554 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2555 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2556 handle->xrun[1] = false;
\r
2558 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2559 stream_.bufferSize, streamTime, status, info->userData );
\r
2560 if ( cbReturnValue == 2 ) {
\r
2561 stream_.state = STREAM_STOPPING;
\r
2562 handle->drainCounter = 2;
\r
2564 pthread_create( &id, NULL, jackStopStream, info );
\r
2567 else if ( cbReturnValue == 1 ) {
\r
2568 handle->drainCounter = 1;
\r
2569 handle->internalDrain = true;
\r
2573 jack_default_audio_sample_t *jackbuffer;
\r
2574 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2575 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2577 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2579 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2580 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2581 memset( jackbuffer, 0, bufferBytes );
\r
2585 else if ( stream_.doConvertBuffer[0] ) {
\r
2587 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2589 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2590 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2591 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2594 else { // no buffer conversion
\r
2595 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2596 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2597 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2602 // Don't bother draining input
\r
2603 if ( handle->drainCounter ) {
\r
2604 handle->drainCounter++;
\r
2608 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2610 if ( stream_.doConvertBuffer[1] ) {
\r
2611 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2612 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2613 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2615 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2617 else { // no buffer conversion
\r
2618 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2619 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2620 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2626 RtApi::tickStreamTime();
\r
2629 //******************** End of __UNIX_JACK__ *********************//
\r
2632 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2634 // The ASIO API is designed around a callback scheme, so this
\r
2635 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2636 // Jack. The primary constraint with ASIO is that it only allows
\r
2637 // access to a single driver at a time. Thus, it is not possible to
\r
2638 // have more than one simultaneous RtAudio stream.
\r
2640 // This implementation also requires a number of external ASIO files
\r
2641 // and a few global variables. The ASIO callback scheme does not
\r
2642 // allow for the passing of user data, so we must create a global
\r
2643 // pointer to our callbackInfo structure.
\r
2645 // On unix systems, we make use of a pthread condition variable.
\r
2646 // Since there is no equivalent in Windows, I hacked something based
\r
2647 // on information found in
\r
2648 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2650 #include "asiosys.h"
\r
2652 #include "iasiothiscallresolver.h"
\r
2653 #include "asiodrivers.h"
\r
2656 static AsioDrivers drivers;
\r
2657 static ASIOCallbacks asioCallbacks;
\r
2658 static ASIODriverInfo driverInfo;
\r
2659 static CallbackInfo *asioCallbackInfo;
\r
2660 static bool asioXRun;
\r
2662 struct AsioHandle {
\r
2663 int drainCounter; // Tracks callback counts when draining
\r
2664 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2665 ASIOBufferInfo *bufferInfos;
\r
2669 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2672 // Function declarations (definitions at end of section)
\r
2673 static const char* getAsioErrorString( ASIOError result );
\r
2674 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2675 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2677 RtApiAsio :: RtApiAsio()
\r
2679 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2680 // CoInitialize beforehand, but it must be for appartment threading
\r
2681 // (in which case, CoInitilialize will return S_FALSE here).
\r
2682 coInitialized_ = false;
\r
2683 HRESULT hr = CoInitialize( NULL );
\r
2684 if ( FAILED(hr) ) {
\r
2685 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2686 error( RtAudioError::WARNING );
\r
2688 coInitialized_ = true;
\r
2690 drivers.removeCurrentDriver();
\r
2691 driverInfo.asioVersion = 2;
\r
2693 // See note in DirectSound implementation about GetDesktopWindow().
\r
2694 driverInfo.sysRef = GetForegroundWindow();
\r
2697 RtApiAsio :: ~RtApiAsio()
\r
2699 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2700 if ( coInitialized_ ) CoUninitialize();
\r
2703 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2705 return (unsigned int) drivers.asioGetNumDev();
\r
2708 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2710 RtAudio::DeviceInfo info;
\r
2711 info.probed = false;
\r
2714 unsigned int nDevices = getDeviceCount();
\r
2715 if ( nDevices == 0 ) {
\r
2716 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2717 error( RtAudioError::INVALID_USE );
\r
2721 if ( device >= nDevices ) {
\r
2722 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2723 error( RtAudioError::INVALID_USE );
\r
2727 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2728 if ( stream_.state != STREAM_CLOSED ) {
\r
2729 if ( device >= devices_.size() ) {
\r
2730 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2731 error( RtAudioError::WARNING );
\r
2734 return devices_[ device ];
\r
2737 char driverName[32];
\r
2738 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2739 if ( result != ASE_OK ) {
\r
2740 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2741 errorText_ = errorStream_.str();
\r
2742 error( RtAudioError::WARNING );
\r
2746 info.name = driverName;
\r
2748 if ( !drivers.loadDriver( driverName ) ) {
\r
2749 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2750 errorText_ = errorStream_.str();
\r
2751 error( RtAudioError::WARNING );
\r
2755 result = ASIOInit( &driverInfo );
\r
2756 if ( result != ASE_OK ) {
\r
2757 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2758 errorText_ = errorStream_.str();
\r
2759 error( RtAudioError::WARNING );
\r
2763 // Determine the device channel information.
\r
2764 long inputChannels, outputChannels;
\r
2765 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2766 if ( result != ASE_OK ) {
\r
2767 drivers.removeCurrentDriver();
\r
2768 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2769 errorText_ = errorStream_.str();
\r
2770 error( RtAudioError::WARNING );
\r
2774 info.outputChannels = outputChannels;
\r
2775 info.inputChannels = inputChannels;
\r
2776 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2777 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2779 // Determine the supported sample rates.
\r
2780 info.sampleRates.clear();
\r
2781 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2782 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2783 if ( result == ASE_OK )
\r
2784 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2787 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2788 ASIOChannelInfo channelInfo;
\r
2789 channelInfo.channel = 0;
\r
2790 channelInfo.isInput = true;
\r
2791 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2792 result = ASIOGetChannelInfo( &channelInfo );
\r
2793 if ( result != ASE_OK ) {
\r
2794 drivers.removeCurrentDriver();
\r
2795 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2797 error( RtAudioError::WARNING );
\r
2801 info.nativeFormats = 0;
\r
2802 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2803 info.nativeFormats |= RTAUDIO_SINT16;
\r
2804 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2805 info.nativeFormats |= RTAUDIO_SINT32;
\r
2806 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2807 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2808 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2809 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2810 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2811 info.nativeFormats |= RTAUDIO_SINT24;
\r
2813 if ( info.outputChannels > 0 )
\r
2814 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2815 if ( info.inputChannels > 0 )
\r
2816 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2818 info.probed = true;
\r
2819 drivers.removeCurrentDriver();
\r
2823 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2825 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2826 object->callbackEvent( index );
\r
2829 void RtApiAsio :: saveDeviceInfo( void )
\r
2833 unsigned int nDevices = getDeviceCount();
\r
2834 devices_.resize( nDevices );
\r
2835 for ( unsigned int i=0; i<nDevices; i++ )
\r
2836 devices_[i] = getDeviceInfo( i );
\r
2839 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2840 unsigned int firstChannel, unsigned int sampleRate,
\r
2841 RtAudioFormat format, unsigned int *bufferSize,
\r
2842 RtAudio::StreamOptions *options )
\r
2844 // For ASIO, a duplex stream MUST use the same driver.
\r
2845 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2846 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2850 char driverName[32];
\r
2851 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2852 if ( result != ASE_OK ) {
\r
2853 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2854 errorText_ = errorStream_.str();
\r
2858 // Only load the driver once for duplex stream.
\r
2859 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2860 // The getDeviceInfo() function will not work when a stream is open
\r
2861 // because ASIO does not allow multiple devices to run at the same
\r
2862 // time. Thus, we'll probe the system before opening a stream and
\r
2863 // save the results for use by getDeviceInfo().
\r
2864 this->saveDeviceInfo();
\r
2866 if ( !drivers.loadDriver( driverName ) ) {
\r
2867 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2868 errorText_ = errorStream_.str();
\r
2872 result = ASIOInit( &driverInfo );
\r
2873 if ( result != ASE_OK ) {
\r
2874 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2875 errorText_ = errorStream_.str();
\r
2880 // Check the device channel count.
\r
2881 long inputChannels, outputChannels;
\r
2882 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2883 if ( result != ASE_OK ) {
\r
2884 drivers.removeCurrentDriver();
\r
2885 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2886 errorText_ = errorStream_.str();
\r
2890 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2891 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2892 drivers.removeCurrentDriver();
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2894 errorText_ = errorStream_.str();
\r
2897 stream_.nDeviceChannels[mode] = channels;
\r
2898 stream_.nUserChannels[mode] = channels;
\r
2899 stream_.channelOffset[mode] = firstChannel;
\r
2901 // Verify the sample rate is supported.
\r
2902 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2903 if ( result != ASE_OK ) {
\r
2904 drivers.removeCurrentDriver();
\r
2905 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2906 errorText_ = errorStream_.str();
\r
2910 // Get the current sample rate
\r
2911 ASIOSampleRate currentRate;
\r
2912 result = ASIOGetSampleRate( ¤tRate );
\r
2913 if ( result != ASE_OK ) {
\r
2914 drivers.removeCurrentDriver();
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2916 errorText_ = errorStream_.str();
\r
2920 // Set the sample rate only if necessary
\r
2921 if ( currentRate != sampleRate ) {
\r
2922 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2923 if ( result != ASE_OK ) {
\r
2924 drivers.removeCurrentDriver();
\r
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2926 errorText_ = errorStream_.str();
\r
2931 // Determine the driver data type.
\r
2932 ASIOChannelInfo channelInfo;
\r
2933 channelInfo.channel = 0;
\r
2934 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2935 else channelInfo.isInput = true;
\r
2936 result = ASIOGetChannelInfo( &channelInfo );
\r
2937 if ( result != ASE_OK ) {
\r
2938 drivers.removeCurrentDriver();
\r
2939 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2940 errorText_ = errorStream_.str();
\r
2944 // Assuming WINDOWS host is always little-endian.
\r
2945 stream_.doByteSwap[mode] = false;
\r
2946 stream_.userFormat = format;
\r
2947 stream_.deviceFormat[mode] = 0;
\r
2948 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2949 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2950 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2952 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2953 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2954 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2956 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2957 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2958 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2960 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2961 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2962 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2964 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2965 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2966 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2969 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2970 drivers.removeCurrentDriver();
\r
2971 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2972 errorText_ = errorStream_.str();
\r
2976 // Set the buffer size. For a duplex stream, this will end up
\r
2977 // setting the buffer size based on the input constraints, which
\r
2979 long minSize, maxSize, preferSize, granularity;
\r
2980 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2981 if ( result != ASE_OK ) {
\r
2982 drivers.removeCurrentDriver();
\r
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2984 errorText_ = errorStream_.str();
\r
2988 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2989 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2990 else if ( granularity == -1 ) {
\r
2991 // Make sure bufferSize is a power of two.
\r
2992 int log2_of_min_size = 0;
\r
2993 int log2_of_max_size = 0;
\r
2995 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2996 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2997 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3000 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3001 int min_delta_num = log2_of_min_size;
\r
3003 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3004 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3005 if (current_delta < min_delta) {
\r
3006 min_delta = current_delta;
\r
3007 min_delta_num = i;
\r
3011 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3012 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3013 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3015 else if ( granularity != 0 ) {
\r
3016 // Set to an even multiple of granularity, rounding up.
\r
3017 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3020 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
3021 drivers.removeCurrentDriver();
\r
3022 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3026 stream_.bufferSize = *bufferSize;
\r
3027 stream_.nBuffers = 2;
\r
3029 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3030 else stream_.userInterleaved = true;
\r
3032 // ASIO always uses non-interleaved buffers.
\r
3033 stream_.deviceInterleaved[mode] = false;
\r
3035 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3036 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3037 if ( handle == 0 ) {
\r
3039 handle = new AsioHandle;
\r
3041 catch ( std::bad_alloc& ) {
\r
3042 //if ( handle == NULL ) {
\r
3043 drivers.removeCurrentDriver();
\r
3044 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3047 handle->bufferInfos = 0;
\r
3049 // Create a manual-reset event.
\r
3050 handle->condition = CreateEvent( NULL, // no security
\r
3051 TRUE, // manual-reset
\r
3052 FALSE, // non-signaled initially
\r
3053 NULL ); // unnamed
\r
3054 stream_.apiHandle = (void *) handle;
\r
3057 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3058 // and output separately, we'll have to dispose of previously
\r
3059 // created output buffers for a duplex stream.
\r
3060 long inputLatency, outputLatency;
\r
3061 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3062 ASIODisposeBuffers();
\r
3063 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3066 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3067 bool buffersAllocated = false;
\r
3068 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3069 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3070 if ( handle->bufferInfos == NULL ) {
\r
3071 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3072 errorText_ = errorStream_.str();
\r
3076 ASIOBufferInfo *infos;
\r
3077 infos = handle->bufferInfos;
\r
3078 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3079 infos->isInput = ASIOFalse;
\r
3080 infos->channelNum = i + stream_.channelOffset[0];
\r
3081 infos->buffers[0] = infos->buffers[1] = 0;
\r
3083 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3084 infos->isInput = ASIOTrue;
\r
3085 infos->channelNum = i + stream_.channelOffset[1];
\r
3086 infos->buffers[0] = infos->buffers[1] = 0;
\r
3089 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3090 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3091 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3092 asioCallbacks.asioMessage = &asioMessages;
\r
3093 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3094 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3095 if ( result != ASE_OK ) {
\r
3096 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3097 errorText_ = errorStream_.str();
\r
3100 buffersAllocated = true;
\r
3102 // Set flags for buffer conversion.
\r
3103 stream_.doConvertBuffer[mode] = false;
\r
3104 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3105 stream_.doConvertBuffer[mode] = true;
\r
3106 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3107 stream_.nUserChannels[mode] > 1 )
\r
3108 stream_.doConvertBuffer[mode] = true;
\r
3110 // Allocate necessary internal buffers
\r
3111 unsigned long bufferBytes;
\r
3112 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3113 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3114 if ( stream_.userBuffer[mode] == NULL ) {
\r
3115 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3119 if ( stream_.doConvertBuffer[mode] ) {
\r
3121 bool makeBuffer = true;
\r
3122 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3123 if ( mode == INPUT ) {
\r
3124 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3125 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3126 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3130 if ( makeBuffer ) {
\r
3131 bufferBytes *= *bufferSize;
\r
3132 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3133 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3134 if ( stream_.deviceBuffer == NULL ) {
\r
3135 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3141 stream_.sampleRate = sampleRate;
\r
3142 stream_.device[mode] = device;
\r
3143 stream_.state = STREAM_STOPPED;
\r
3144 asioCallbackInfo = &stream_.callbackInfo;
\r
3145 stream_.callbackInfo.object = (void *) this;
\r
3146 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3147 // We had already set up an output stream.
\r
3148 stream_.mode = DUPLEX;
\r
3150 stream_.mode = mode;
\r
3152 // Determine device latencies
\r
3153 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3154 if ( result != ASE_OK ) {
\r
3155 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3156 errorText_ = errorStream_.str();
\r
3157 error( RtAudioError::WARNING); // warn but don't fail
\r
3160 stream_.latency[0] = outputLatency;
\r
3161 stream_.latency[1] = inputLatency;
\r
3164 // Setup the buffer conversion information structure. We don't use
\r
3165 // buffers to do channel offsets, so we override that parameter
\r
3167 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3172 if ( buffersAllocated )
\r
3173 ASIODisposeBuffers();
\r
3174 drivers.removeCurrentDriver();
\r
3177 CloseHandle( handle->condition );
\r
3178 if ( handle->bufferInfos )
\r
3179 free( handle->bufferInfos );
\r
3181 stream_.apiHandle = 0;
\r
3184 for ( int i=0; i<2; i++ ) {
\r
3185 if ( stream_.userBuffer[i] ) {
\r
3186 free( stream_.userBuffer[i] );
\r
3187 stream_.userBuffer[i] = 0;
\r
3191 if ( stream_.deviceBuffer ) {
\r
3192 free( stream_.deviceBuffer );
\r
3193 stream_.deviceBuffer = 0;
\r
3199 void RtApiAsio :: closeStream()
\r
3201 if ( stream_.state == STREAM_CLOSED ) {
\r
3202 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3203 error( RtAudioError::WARNING );
\r
3207 if ( stream_.state == STREAM_RUNNING ) {
\r
3208 stream_.state = STREAM_STOPPED;
\r
3211 ASIODisposeBuffers();
\r
3212 drivers.removeCurrentDriver();
\r
3214 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3216 CloseHandle( handle->condition );
\r
3217 if ( handle->bufferInfos )
\r
3218 free( handle->bufferInfos );
\r
3220 stream_.apiHandle = 0;
\r
3223 for ( int i=0; i<2; i++ ) {
\r
3224 if ( stream_.userBuffer[i] ) {
\r
3225 free( stream_.userBuffer[i] );
\r
3226 stream_.userBuffer[i] = 0;
\r
3230 if ( stream_.deviceBuffer ) {
\r
3231 free( stream_.deviceBuffer );
\r
3232 stream_.deviceBuffer = 0;
\r
3235 stream_.mode = UNINITIALIZED;
\r
3236 stream_.state = STREAM_CLOSED;
\r
3239 bool stopThreadCalled = false;
\r
3241 void RtApiAsio :: startStream()
\r
3244 if ( stream_.state == STREAM_RUNNING ) {
\r
3245 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3246 error( RtAudioError::WARNING );
\r
3250 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3251 ASIOError result = ASIOStart();
\r
3252 if ( result != ASE_OK ) {
\r
3253 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3254 errorText_ = errorStream_.str();
\r
3258 handle->drainCounter = 0;
\r
3259 handle->internalDrain = false;
\r
3260 ResetEvent( handle->condition );
\r
3261 stream_.state = STREAM_RUNNING;
\r
3265 stopThreadCalled = false;
\r
3267 if ( result == ASE_OK ) return;
\r
3268 error( RtAudioError::SYSTEM_ERROR );
\r
3271 void RtApiAsio :: stopStream()
\r
3274 if ( stream_.state == STREAM_STOPPED ) {
\r
3275 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3276 error( RtAudioError::WARNING );
\r
3280 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3281 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3282 if ( handle->drainCounter == 0 ) {
\r
3283 handle->drainCounter = 2;
\r
3284 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3288 stream_.state = STREAM_STOPPED;
\r
3290 ASIOError result = ASIOStop();
\r
3291 if ( result != ASE_OK ) {
\r
3292 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3293 errorText_ = errorStream_.str();
\r
3296 if ( result == ASE_OK ) return;
\r
3297 error( RtAudioError::SYSTEM_ERROR );
\r
3300 void RtApiAsio :: abortStream()
\r
3303 if ( stream_.state == STREAM_STOPPED ) {
\r
3304 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3305 error( RtAudioError::WARNING );
\r
3309 // The following lines were commented-out because some behavior was
\r
3310 // noted where the device buffers need to be zeroed to avoid
\r
3311 // continuing sound, even when the device buffers are completely
\r
3312 // disposed. So now, calling abort is the same as calling stop.
\r
3313 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3314 // handle->drainCounter = 2;
\r
3318 // This function will be called by a spawned thread when the user
\r
3319 // callback function signals that the stream should be stopped or
\r
3320 // aborted. It is necessary to handle it this way because the
\r
3321 // callbackEvent() function must return before the ASIOStop()
\r
3322 // function will return.
\r
3323 static unsigned __stdcall asioStopStream( void *ptr )
\r
3325 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3326 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3328 object->stopStream();
\r
3329 _endthreadex( 0 );
\r
3333 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3335 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3336 if ( stream_.state == STREAM_CLOSED ) {
\r
3337 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3338 error( RtAudioError::WARNING );
\r
3342 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3343 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3345 // Check if we were draining the stream and signal if finished.
\r
3346 if ( handle->drainCounter > 3 ) {
\r
3348 stream_.state = STREAM_STOPPING;
\r
3349 if ( handle->internalDrain == false )
\r
3350 SetEvent( handle->condition );
\r
3351 else { // spawn a thread to stop the stream
\r
3352 unsigned threadId;
\r
3353 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3354 &stream_.callbackInfo, 0, &threadId );
\r
3359 // Invoke user callback to get fresh output data UNLESS we are
\r
3360 // draining stream.
\r
3361 if ( handle->drainCounter == 0 ) {
\r
3362 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3363 double streamTime = getStreamTime();
\r
3364 RtAudioStreamStatus status = 0;
\r
3365 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3366 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3369 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3370 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3373 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3374 stream_.bufferSize, streamTime, status, info->userData );
\r
3375 if ( cbReturnValue == 2 ) {
\r
3376 stream_.state = STREAM_STOPPING;
\r
3377 handle->drainCounter = 2;
\r
3378 unsigned threadId;
\r
3379 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3380 &stream_.callbackInfo, 0, &threadId );
\r
3383 else if ( cbReturnValue == 1 ) {
\r
3384 handle->drainCounter = 1;
\r
3385 handle->internalDrain = true;
\r
3389 unsigned int nChannels, bufferBytes, i, j;
\r
3390 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3391 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3393 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3395 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3397 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3398 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3399 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3403 else if ( stream_.doConvertBuffer[0] ) {
\r
3405 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3406 if ( stream_.doByteSwap[0] )
\r
3407 byteSwapBuffer( stream_.deviceBuffer,
\r
3408 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3409 stream_.deviceFormat[0] );
\r
3411 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3412 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3413 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3414 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3420 if ( stream_.doByteSwap[0] )
\r
3421 byteSwapBuffer( stream_.userBuffer[0],
\r
3422 stream_.bufferSize * stream_.nUserChannels[0],
\r
3423 stream_.userFormat );
\r
3425 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3426 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3427 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3428 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3434 // Don't bother draining input
\r
3435 if ( handle->drainCounter ) {
\r
3436 handle->drainCounter++;
\r
3440 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3442 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3444 if (stream_.doConvertBuffer[1]) {
\r
3446 // Always interleave ASIO input data.
\r
3447 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3448 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3449 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3450 handle->bufferInfos[i].buffers[bufferIndex],
\r
3454 if ( stream_.doByteSwap[1] )
\r
3455 byteSwapBuffer( stream_.deviceBuffer,
\r
3456 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3457 stream_.deviceFormat[1] );
\r
3458 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3462 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3463 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3464 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3465 handle->bufferInfos[i].buffers[bufferIndex],
\r
3470 if ( stream_.doByteSwap[1] )
\r
3471 byteSwapBuffer( stream_.userBuffer[1],
\r
3472 stream_.bufferSize * stream_.nUserChannels[1],
\r
3473 stream_.userFormat );
\r
3478 // The following call was suggested by Malte Clasen. While the API
\r
3479 // documentation indicates it should not be required, some device
\r
3480 // drivers apparently do not function correctly without it.
\r
3481 ASIOOutputReady();
\r
3483 RtApi::tickStreamTime();
\r
3487 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3489 // The ASIO documentation says that this usually only happens during
\r
3490 // external sync. Audio processing is not stopped by the driver,
\r
3491 // actual sample rate might not have even changed, maybe only the
\r
3492 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3495 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3497 object->stopStream();
\r
3499 catch ( RtAudioError &exception ) {
\r
3500 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3504 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3507 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3511 switch( selector ) {
\r
3512 case kAsioSelectorSupported:
\r
3513 if ( value == kAsioResetRequest
\r
3514 || value == kAsioEngineVersion
\r
3515 || value == kAsioResyncRequest
\r
3516 || value == kAsioLatenciesChanged
\r
3517 // The following three were added for ASIO 2.0, you don't
\r
3518 // necessarily have to support them.
\r
3519 || value == kAsioSupportsTimeInfo
\r
3520 || value == kAsioSupportsTimeCode
\r
3521 || value == kAsioSupportsInputMonitor)
\r
3524 case kAsioResetRequest:
\r
3525 // Defer the task and perform the reset of the driver during the
\r
3526 // next "safe" situation. You cannot reset the driver right now,
\r
3527 // as this code is called from the driver. Reset the driver is
\r
3528 // done by completely destruct is. I.e. ASIOStop(),
\r
3529 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3531 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3534 case kAsioResyncRequest:
\r
3535 // This informs the application that the driver encountered some
\r
3536 // non-fatal data loss. It is used for synchronization purposes
\r
3537 // of different media. Added mainly to work around the Win16Mutex
\r
3538 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3539 // which could lose data because the Mutex was held too long by
\r
3540 // another thread. However a driver can issue it in other
\r
3541 // situations, too.
\r
3542 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3546 case kAsioLatenciesChanged:
\r
3547 // This will inform the host application that the drivers were
\r
3548 // latencies changed. Beware, it this does not mean that the
\r
3549 // buffer sizes have changed! You might need to update internal
\r
3551 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3554 case kAsioEngineVersion:
\r
3555 // Return the supported ASIO version of the host application. If
\r
3556 // a host application does not implement this selector, ASIO 1.0
\r
3557 // is assumed by the driver.
\r
3560 case kAsioSupportsTimeInfo:
\r
3561 // Informs the driver whether the
\r
3562 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3563 // For compatibility with ASIO 1.0 drivers the host application
\r
3564 // should always support the "old" bufferSwitch method, too.
\r
3567 case kAsioSupportsTimeCode:
\r
3568 // Informs the driver whether application is interested in time
\r
3569 // code info. If an application does not need to know about time
\r
3570 // code, the driver has less work to do.
\r
3577 static const char* getAsioErrorString( ASIOError result )
\r
3582 const char*message;
\r
3585 static const Messages m[] =
\r
3587 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3588 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3589 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3590 { ASE_InvalidMode, "Invalid mode." },
\r
3591 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3592 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3593 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3596 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3597 if ( m[i].value == result ) return m[i].message;
\r
3599 return "Unknown error.";
\r
3602 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3606 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3608 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3609 // - Introduces support for the Windows WASAPI API
\r
3610 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3611 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3612 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3617 #include <audioclient.h>
\r
3619 #include <mmdeviceapi.h>
\r
3620 #include <functiondiscoverykeys_devpkey.h>
\r
3622 //=============================================================================
\r
3624 #define SAFE_RELEASE( objectPtr )\
\r
3627 objectPtr->Release();\
\r
3628 objectPtr = NULL;\
\r
3631 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3633 //-----------------------------------------------------------------------------
\r
3635 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3636 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3637 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3638 // provide intermediate storage for read / write synchronization.
\r
3639 class WasapiBuffer
\r
3643 : buffer_( NULL ),
\r
3652 // sets the length of the internal ring buffer
\r
3653 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3656 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3658 bufferSize_ = bufferSize;
\r
3663 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3664 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3666 if ( !buffer || // incoming buffer is NULL
\r
3667 bufferSize == 0 || // incoming buffer has no data
\r
3668 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3673 unsigned int relOutIndex = outIndex_;
\r
3674 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3675 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3676 relOutIndex += bufferSize_;
\r
3679 // "in" index can end on the "out" index but cannot begin at it
\r
3680 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3681 return false; // not enough space between "in" index and "out" index
\r
3684 // copy buffer from external to internal
\r
3685 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3686 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3687 int fromInSize = bufferSize - fromZeroSize;
\r
3691 case RTAUDIO_SINT8:
\r
3692 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3693 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3695 case RTAUDIO_SINT16:
\r
3696 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3697 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3699 case RTAUDIO_SINT24:
\r
3700 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3701 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3703 case RTAUDIO_SINT32:
\r
3704 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3705 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3707 case RTAUDIO_FLOAT32:
\r
3708 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3709 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3711 case RTAUDIO_FLOAT64:
\r
3712 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3713 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3717 // update "in" index
\r
3718 inIndex_ += bufferSize;
\r
3719 inIndex_ %= bufferSize_;
\r
3724 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3725 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3727 if ( !buffer || // incoming buffer is NULL
\r
3728 bufferSize == 0 || // incoming buffer has no data
\r
3729 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3734 unsigned int relInIndex = inIndex_;
\r
3735 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3736 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3737 relInIndex += bufferSize_;
\r
3740 // "out" index can begin at and end on the "in" index
\r
3741 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3742 return false; // not enough space between "out" index and "in" index
\r
3745 // copy buffer from internal to external
\r
3746 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3747 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3748 int fromOutSize = bufferSize - fromZeroSize;
\r
3752 case RTAUDIO_SINT8:
\r
3753 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3754 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3756 case RTAUDIO_SINT16:
\r
3757 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3758 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3760 case RTAUDIO_SINT24:
\r
3761 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3762 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3764 case RTAUDIO_SINT32:
\r
3765 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3766 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3768 case RTAUDIO_FLOAT32:
\r
3769 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3770 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3772 case RTAUDIO_FLOAT64:
\r
3773 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3774 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3778 // update "out" index
\r
3779 outIndex_ += bufferSize;
\r
3780 outIndex_ %= bufferSize_;
\r
3787 unsigned int bufferSize_;
\r
3788 unsigned int inIndex_;
\r
3789 unsigned int outIndex_;
\r
3792 //-----------------------------------------------------------------------------
\r
3794 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3795 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3796 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3797 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3798 // one rate and its multiple.
\r
3799 void convertBufferWasapi( char* outBuffer,
\r
3800 const char* inBuffer,
\r
3801 const unsigned int& channelCount,
\r
3802 const unsigned int& inSampleRate,
\r
3803 const unsigned int& outSampleRate,
\r
3804 const unsigned int& inSampleCount,
\r
3805 unsigned int& outSampleCount,
\r
3806 const RtAudioFormat& format )
\r
3808 // calculate the new outSampleCount and relative sampleStep
\r
3809 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3810 float sampleStep = 1.0f / sampleRatio;
\r
3811 float inSampleFraction = 0.0f;
\r
3813 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3815 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3816 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3818 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3822 case RTAUDIO_SINT8:
\r
3823 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3825 case RTAUDIO_SINT16:
\r
3826 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3828 case RTAUDIO_SINT24:
\r
3829 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3831 case RTAUDIO_SINT32:
\r
3832 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3834 case RTAUDIO_FLOAT32:
\r
3835 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3837 case RTAUDIO_FLOAT64:
\r
3838 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3842 // jump to next in sample
\r
3843 inSampleFraction += sampleStep;
\r
3847 //-----------------------------------------------------------------------------
\r
3849 // A structure to hold various information related to the WASAPI implementation.
\r
3850 struct WasapiHandle
\r
3852 IAudioClient* captureAudioClient;
\r
3853 IAudioClient* renderAudioClient;
\r
3854 IAudioCaptureClient* captureClient;
\r
3855 IAudioRenderClient* renderClient;
\r
3856 HANDLE captureEvent;
\r
3857 HANDLE renderEvent;
\r
3860 : captureAudioClient( NULL ),
\r
3861 renderAudioClient( NULL ),
\r
3862 captureClient( NULL ),
\r
3863 renderClient( NULL ),
\r
3864 captureEvent( NULL ),
\r
3865 renderEvent( NULL ) {}
\r
3868 //=============================================================================
\r
3870 RtApiWasapi::RtApiWasapi()
\r
3871 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3873 // WASAPI can run either apartment or multi-threaded
\r
3874 HRESULT hr = CoInitialize( NULL );
\r
3875 if ( !FAILED( hr ) )
\r
3876 coInitialized_ = true;
\r
3878 // Instantiate device enumerator
\r
3879 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3880 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3881 ( void** ) &deviceEnumerator_ );
\r
3883 if ( FAILED( hr ) ) {
\r
3884 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3885 error( RtAudioError::DRIVER_ERROR );
\r
3889 //-----------------------------------------------------------------------------
\r
3891 RtApiWasapi::~RtApiWasapi()
\r
3893 if ( stream_.state != STREAM_CLOSED )
\r
3896 SAFE_RELEASE( deviceEnumerator_ );
\r
3898 // If this object previously called CoInitialize()
\r
3899 if ( coInitialized_ )
\r
3903 //=============================================================================
\r
3905 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3907 unsigned int captureDeviceCount = 0;
\r
3908 unsigned int renderDeviceCount = 0;
\r
3910 IMMDeviceCollection* captureDevices = NULL;
\r
3911 IMMDeviceCollection* renderDevices = NULL;
\r
3913 // Count capture devices
\r
3914 errorText_.clear();
\r
3915 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3916 if ( FAILED( hr ) ) {
\r
3917 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3921 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3922 if ( FAILED( hr ) ) {
\r
3923 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3927 // Count render devices
\r
3928 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3929 if ( FAILED( hr ) ) {
\r
3930 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3934 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3935 if ( FAILED( hr ) ) {
\r
3936 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3941 // release all references
\r
3942 SAFE_RELEASE( captureDevices );
\r
3943 SAFE_RELEASE( renderDevices );
\r
3945 if ( errorText_.empty() )
\r
3946 return captureDeviceCount + renderDeviceCount;
\r
3948 error( RtAudioError::DRIVER_ERROR );
\r
3952 //-----------------------------------------------------------------------------
\r
3954 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3956 RtAudio::DeviceInfo info;
\r
3957 unsigned int captureDeviceCount = 0;
\r
3958 unsigned int renderDeviceCount = 0;
\r
3959 std::string defaultDeviceName;
\r
3960 bool isCaptureDevice = false;
\r
3962 PROPVARIANT deviceNameProp;
\r
3963 PROPVARIANT defaultDeviceNameProp;
\r
3965 IMMDeviceCollection* captureDevices = NULL;
\r
3966 IMMDeviceCollection* renderDevices = NULL;
\r
3967 IMMDevice* devicePtr = NULL;
\r
3968 IMMDevice* defaultDevicePtr = NULL;
\r
3969 IAudioClient* audioClient = NULL;
\r
3970 IPropertyStore* devicePropStore = NULL;
\r
3971 IPropertyStore* defaultDevicePropStore = NULL;
\r
3973 WAVEFORMATEX* deviceFormat = NULL;
\r
3974 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3977 info.probed = false;
\r
3979 // Count capture devices
\r
3980 errorText_.clear();
\r
3981 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3982 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3983 if ( FAILED( hr ) ) {
\r
3984 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3988 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3989 if ( FAILED( hr ) ) {
\r
3990 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3994 // Count render devices
\r
3995 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3996 if ( FAILED( hr ) ) {
\r
3997 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4001 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4002 if ( FAILED( hr ) ) {
\r
4003 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4007 // validate device index
\r
4008 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4009 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4010 errorType = RtAudioError::INVALID_USE;
\r
4014 // determine whether index falls within capture or render devices
\r
4015 if ( device >= renderDeviceCount ) {
\r
4016 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4017 if ( FAILED( hr ) ) {
\r
4018 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4021 isCaptureDevice = true;
\r
4024 hr = renderDevices->Item( device, &devicePtr );
\r
4025 if ( FAILED( hr ) ) {
\r
4026 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4029 isCaptureDevice = false;
\r
4032 // get default device name
\r
4033 if ( isCaptureDevice ) {
\r
4034 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4035 if ( FAILED( hr ) ) {
\r
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4041 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4042 if ( FAILED( hr ) ) {
\r
4043 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4048 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4049 if ( FAILED( hr ) ) {
\r
4050 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4053 PropVariantInit( &defaultDeviceNameProp );
\r
4055 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4056 if ( FAILED( hr ) ) {
\r
4057 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4061 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4064 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4065 if ( FAILED( hr ) ) {
\r
4066 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4070 PropVariantInit( &deviceNameProp );
\r
4072 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4073 if ( FAILED( hr ) ) {
\r
4074 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4078 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4081 if ( isCaptureDevice ) {
\r
4082 info.isDefaultInput = info.name == defaultDeviceName;
\r
4083 info.isDefaultOutput = false;
\r
4086 info.isDefaultInput = false;
\r
4087 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4091 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4092 if ( FAILED( hr ) ) {
\r
4093 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4097 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4098 if ( FAILED( hr ) ) {
\r
4099 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4103 if ( isCaptureDevice ) {
\r
4104 info.inputChannels = deviceFormat->nChannels;
\r
4105 info.outputChannels = 0;
\r
4106 info.duplexChannels = 0;
\r
4109 info.inputChannels = 0;
\r
4110 info.outputChannels = deviceFormat->nChannels;
\r
4111 info.duplexChannels = 0;
\r
4115 info.sampleRates.clear();
\r
4117 // allow support for all sample rates as we have a built-in sample rate converter
\r
4118 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4119 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4123 info.nativeFormats = 0;
\r
4125 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4126 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4127 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4129 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4130 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4132 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4133 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4136 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4137 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4138 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4140 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4141 info.nativeFormats |= RTAUDIO_SINT8;
\r
4143 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4144 info.nativeFormats |= RTAUDIO_SINT16;
\r
4146 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4147 info.nativeFormats |= RTAUDIO_SINT24;
\r
4149 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4150 info.nativeFormats |= RTAUDIO_SINT32;
\r
4155 info.probed = true;
\r
4158 // release all references
\r
4159 PropVariantClear( &deviceNameProp );
\r
4160 PropVariantClear( &defaultDeviceNameProp );
\r
4162 SAFE_RELEASE( captureDevices );
\r
4163 SAFE_RELEASE( renderDevices );
\r
4164 SAFE_RELEASE( devicePtr );
\r
4165 SAFE_RELEASE( defaultDevicePtr );
\r
4166 SAFE_RELEASE( audioClient );
\r
4167 SAFE_RELEASE( devicePropStore );
\r
4168 SAFE_RELEASE( defaultDevicePropStore );
\r
4170 CoTaskMemFree( deviceFormat );
\r
4171 CoTaskMemFree( closestMatchFormat );
\r
4173 if ( !errorText_.empty() )
\r
4174 error( errorType );
\r
4178 //-----------------------------------------------------------------------------
\r
4180 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4182 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4183 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4191 //-----------------------------------------------------------------------------
\r
4193 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4195 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4196 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4204 //-----------------------------------------------------------------------------
\r
4206 void RtApiWasapi::closeStream( void )
\r
4208 if ( stream_.state == STREAM_CLOSED ) {
\r
4209 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4210 error( RtAudioError::WARNING );
\r
4214 if ( stream_.state != STREAM_STOPPED )
\r
4217 // clean up stream memory
\r
4218 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4219 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4221 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4222 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4224 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4225 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4227 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4228 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4230 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4231 stream_.apiHandle = NULL;
\r
4233 for ( int i = 0; i < 2; i++ ) {
\r
4234 if ( stream_.userBuffer[i] ) {
\r
4235 free( stream_.userBuffer[i] );
\r
4236 stream_.userBuffer[i] = 0;
\r
4240 if ( stream_.deviceBuffer ) {
\r
4241 free( stream_.deviceBuffer );
\r
4242 stream_.deviceBuffer = 0;
\r
4245 // update stream state
\r
4246 stream_.state = STREAM_CLOSED;
\r
4249 //-----------------------------------------------------------------------------
\r
4251 void RtApiWasapi::startStream( void )
\r
4255 if ( stream_.state == STREAM_RUNNING ) {
\r
4256 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4257 error( RtAudioError::WARNING );
\r
4261 // update stream state
\r
4262 stream_.state = STREAM_RUNNING;
\r
4264 // create WASAPI stream thread
\r
4265 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4267 if ( !stream_.callbackInfo.thread ) {
\r
4268 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4269 error( RtAudioError::THREAD_ERROR );
\r
4272 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4273 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4277 //-----------------------------------------------------------------------------
\r
4279 void RtApiWasapi::stopStream( void )
\r
4283 if ( stream_.state == STREAM_STOPPED ) {
\r
4284 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4285 error( RtAudioError::WARNING );
\r
4289 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4290 stream_.state = STREAM_STOPPING;
\r
4292 // wait until stream thread is stopped
\r
4293 while( stream_.state != STREAM_STOPPED ) {
\r
4297 // Wait for the last buffer to play before stopping.
\r
4298 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4300 // stop capture client if applicable
\r
4301 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4302 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4303 if ( FAILED( hr ) ) {
\r
4304 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4305 error( RtAudioError::DRIVER_ERROR );
\r
4310 // stop render client if applicable
\r
4311 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4312 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4313 if ( FAILED( hr ) ) {
\r
4314 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4315 error( RtAudioError::DRIVER_ERROR );
\r
4320 // close thread handle
\r
4321 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4322 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4323 error( RtAudioError::THREAD_ERROR );
\r
4327 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4330 //-----------------------------------------------------------------------------
\r
4332 void RtApiWasapi::abortStream( void )
\r
4336 if ( stream_.state == STREAM_STOPPED ) {
\r
4337 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4338 error( RtAudioError::WARNING );
\r
4342 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4343 stream_.state = STREAM_STOPPING;
\r
4345 // wait until stream thread is stopped
\r
4346 while ( stream_.state != STREAM_STOPPED ) {
\r
4350 // stop capture client if applicable
\r
4351 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4352 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4353 if ( FAILED( hr ) ) {
\r
4354 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4355 error( RtAudioError::DRIVER_ERROR );
\r
4360 // stop render client if applicable
\r
4361 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4362 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4363 if ( FAILED( hr ) ) {
\r
4364 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4365 error( RtAudioError::DRIVER_ERROR );
\r
4370 // close thread handle
\r
4371 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4372 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4373 error( RtAudioError::THREAD_ERROR );
\r
4377 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4380 //-----------------------------------------------------------------------------
\r
4382 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4383 unsigned int firstChannel, unsigned int sampleRate,
\r
4384 RtAudioFormat format, unsigned int* bufferSize,
\r
4385 RtAudio::StreamOptions* options )
\r
4387 bool methodResult = FAILURE;
\r
4388 unsigned int captureDeviceCount = 0;
\r
4389 unsigned int renderDeviceCount = 0;
\r
4391 IMMDeviceCollection* captureDevices = NULL;
\r
4392 IMMDeviceCollection* renderDevices = NULL;
\r
4393 IMMDevice* devicePtr = NULL;
\r
4394 WAVEFORMATEX* deviceFormat = NULL;
\r
4395 unsigned int bufferBytes;
\r
4396 stream_.state = STREAM_STOPPED;
\r
4398 // create API Handle if not already created
\r
4399 if ( !stream_.apiHandle )
\r
4400 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4402 // Count capture devices
\r
4403 errorText_.clear();
\r
4404 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4405 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4406 if ( FAILED( hr ) ) {
\r
4407 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4411 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4412 if ( FAILED( hr ) ) {
\r
4413 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4417 // Count render devices
\r
4418 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4419 if ( FAILED( hr ) ) {
\r
4420 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4424 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4425 if ( FAILED( hr ) ) {
\r
4426 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4430 // validate device index
\r
4431 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4432 errorType = RtAudioError::INVALID_USE;
\r
4433 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4437 // determine whether index falls within capture or render devices
\r
4438 if ( device >= renderDeviceCount ) {
\r
4439 if ( mode != INPUT ) {
\r
4440 errorType = RtAudioError::INVALID_USE;
\r
4441 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4445 // retrieve captureAudioClient from devicePtr
\r
4446 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4448 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4449 if ( FAILED( hr ) ) {
\r
4450 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4454 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4455 NULL, ( void** ) &captureAudioClient );
\r
4456 if ( FAILED( hr ) ) {
\r
4457 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4461 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4462 if ( FAILED( hr ) ) {
\r
4463 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4467 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4468 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4471 if ( mode != OUTPUT ) {
\r
4472 errorType = RtAudioError::INVALID_USE;
\r
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4477 // retrieve renderAudioClient from devicePtr
\r
4478 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4480 hr = renderDevices->Item( device, &devicePtr );
\r
4481 if ( FAILED( hr ) ) {
\r
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4486 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4487 NULL, ( void** ) &renderAudioClient );
\r
4488 if ( FAILED( hr ) ) {
\r
4489 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4493 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4494 if ( FAILED( hr ) ) {
\r
4495 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4499 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4500 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4503 // fill stream data
\r
4504 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4505 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4506 stream_.mode = DUPLEX;
\r
4509 stream_.mode = mode;
\r
4512 stream_.device[mode] = device;
\r
4513 stream_.doByteSwap[mode] = false;
\r
4514 stream_.sampleRate = sampleRate;
\r
4515 stream_.bufferSize = *bufferSize;
\r
4516 stream_.nBuffers = 1;
\r
4517 stream_.nUserChannels[mode] = channels;
\r
4518 stream_.channelOffset[mode] = firstChannel;
\r
4519 stream_.userFormat = format;
\r
4520 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4522 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4523 stream_.userInterleaved = false;
\r
4525 stream_.userInterleaved = true;
\r
4526 stream_.deviceInterleaved[mode] = true;
\r
4528 // Set flags for buffer conversion.
\r
4529 stream_.doConvertBuffer[mode] = false;
\r
4530 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4531 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4532 stream_.doConvertBuffer[mode] = true;
\r
4533 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4534 stream_.nUserChannels[mode] > 1 )
\r
4535 stream_.doConvertBuffer[mode] = true;
\r
4537 if ( stream_.doConvertBuffer[mode] )
\r
4538 setConvertInfo( mode, 0 );
\r
4540 // Allocate necessary internal buffers
\r
4541 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4543 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4544 if ( !stream_.userBuffer[mode] ) {
\r
4545 errorType = RtAudioError::MEMORY_ERROR;
\r
4546 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4550 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4551 stream_.callbackInfo.priority = 15;
\r
4553 stream_.callbackInfo.priority = 0;
\r
4555 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4556 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4558 methodResult = SUCCESS;
\r
4562 SAFE_RELEASE( captureDevices );
\r
4563 SAFE_RELEASE( renderDevices );
\r
4564 SAFE_RELEASE( devicePtr );
\r
4565 CoTaskMemFree( deviceFormat );
\r
4567 // if method failed, close the stream
\r
4568 if ( methodResult == FAILURE )
\r
4571 if ( !errorText_.empty() )
\r
4572 error( errorType );
\r
4573 return methodResult;
\r
4576 //=============================================================================
\r
4578 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4581 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4586 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4589 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4594 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4597 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4602 //-----------------------------------------------------------------------------
\r
4604 void RtApiWasapi::wasapiThread()
\r
4606 // as this is a new thread, we must CoInitialize it
\r
4607 CoInitialize( NULL );
\r
4611 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4612 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4613 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4614 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4615 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4616 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4618 WAVEFORMATEX* captureFormat = NULL;
\r
4619 WAVEFORMATEX* renderFormat = NULL;
\r
4620 float captureSrRatio = 0.0f;
\r
4621 float renderSrRatio = 0.0f;
\r
4622 WasapiBuffer captureBuffer;
\r
4623 WasapiBuffer renderBuffer;
\r
4625 // declare local stream variables
\r
4626 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4627 BYTE* streamBuffer = NULL;
\r
4628 unsigned long captureFlags = 0;
\r
4629 unsigned int bufferFrameCount = 0;
\r
4630 unsigned int numFramesPadding = 0;
\r
4631 unsigned int convBufferSize = 0;
\r
4632 bool callbackPushed = false;
\r
4633 bool callbackPulled = false;
\r
4634 bool callbackStopped = false;
\r
4635 int callbackResult = 0;
\r
4637 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4638 char* convBuffer = NULL;
\r
4639 unsigned int convBuffSize = 0;
\r
4640 unsigned int deviceBuffSize = 0;
\r
4642 errorText_.clear();
\r
4643 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4645 // Attempt to assign "Pro Audio" characteristic to thread
\r
4646 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4648 DWORD taskIndex = 0;
\r
4649 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4650 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4651 FreeLibrary( AvrtDll );
\r
4654 // start capture stream if applicable
\r
4655 if ( captureAudioClient ) {
\r
4656 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4657 if ( FAILED( hr ) ) {
\r
4658 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4662 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4664 // initialize capture stream according to desire buffer size
\r
4665 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4666 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4668 if ( !captureClient ) {
\r
4669 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4670 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4671 desiredBufferPeriod,
\r
4672 desiredBufferPeriod,
\r
4675 if ( FAILED( hr ) ) {
\r
4676 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4680 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4681 ( void** ) &captureClient );
\r
4682 if ( FAILED( hr ) ) {
\r
4683 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4687 // configure captureEvent to trigger on every available capture buffer
\r
4688 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4689 if ( !captureEvent ) {
\r
4690 errorType = RtAudioError::SYSTEM_ERROR;
\r
4691 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4695 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4696 if ( FAILED( hr ) ) {
\r
4697 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4701 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4702 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4705 unsigned int inBufferSize = 0;
\r
4706 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4707 if ( FAILED( hr ) ) {
\r
4708 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4712 // scale outBufferSize according to stream->user sample rate ratio
\r
4713 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4714 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4716 // set captureBuffer size
\r
4717 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4719 // reset the capture stream
\r
4720 hr = captureAudioClient->Reset();
\r
4721 if ( FAILED( hr ) ) {
\r
4722 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4726 // start the capture stream
\r
4727 hr = captureAudioClient->Start();
\r
4728 if ( FAILED( hr ) ) {
\r
4729 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4734 // start render stream if applicable
\r
4735 if ( renderAudioClient ) {
\r
4736 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4737 if ( FAILED( hr ) ) {
\r
4738 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4742 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4744 // initialize render stream according to desire buffer size
\r
4745 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4746 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4748 if ( !renderClient ) {
\r
4749 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4750 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4751 desiredBufferPeriod,
\r
4752 desiredBufferPeriod,
\r
4755 if ( FAILED( hr ) ) {
\r
4756 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4760 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4761 ( void** ) &renderClient );
\r
4762 if ( FAILED( hr ) ) {
\r
4763 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4767 // configure renderEvent to trigger on every available render buffer
\r
4768 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4769 if ( !renderEvent ) {
\r
4770 errorType = RtAudioError::SYSTEM_ERROR;
\r
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4775 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4776 if ( FAILED( hr ) ) {
\r
4777 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4781 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4782 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4785 unsigned int outBufferSize = 0;
\r
4786 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4787 if ( FAILED( hr ) ) {
\r
4788 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4792 // scale inBufferSize according to user->stream sample rate ratio
\r
4793 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4794 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4796 // set renderBuffer size
\r
4797 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4799 // reset the render stream
\r
4800 hr = renderAudioClient->Reset();
\r
4801 if ( FAILED( hr ) ) {
\r
4802 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4806 // start the render stream
\r
4807 hr = renderAudioClient->Start();
\r
4808 if ( FAILED( hr ) ) {
\r
4809 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4814 if ( stream_.mode == INPUT ) {
\r
4815 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4816 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4818 else if ( stream_.mode == OUTPUT ) {
\r
4819 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4820 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4822 else if ( stream_.mode == DUPLEX ) {
\r
4823 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4824 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4825 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4826 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4829 convBuffer = ( char* ) malloc( convBuffSize );
\r
4830 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4831 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4832 errorType = RtAudioError::MEMORY_ERROR;
\r
4833 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4837 // stream process loop
\r
4838 while ( stream_.state != STREAM_STOPPING ) {
\r
4839 if ( !callbackPulled ) {
\r
4842 // 1. Pull callback buffer from inputBuffer
\r
4843 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4844 // Convert callback buffer to user format
\r
4846 if ( captureAudioClient ) {
\r
4847 // Pull callback buffer from inputBuffer
\r
4848 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4849 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4850 stream_.deviceFormat[INPUT] );
\r
4852 if ( callbackPulled ) {
\r
4853 // Convert callback buffer to user sample rate
\r
4854 convertBufferWasapi( stream_.deviceBuffer,
\r
4856 stream_.nDeviceChannels[INPUT],
\r
4857 captureFormat->nSamplesPerSec,
\r
4858 stream_.sampleRate,
\r
4859 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4861 stream_.deviceFormat[INPUT] );
\r
4863 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4864 // Convert callback buffer to user format
\r
4865 convertBuffer( stream_.userBuffer[INPUT],
\r
4866 stream_.deviceBuffer,
\r
4867 stream_.convertInfo[INPUT] );
\r
4870 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4871 memcpy( stream_.userBuffer[INPUT],
\r
4872 stream_.deviceBuffer,
\r
4873 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4878 // if there is no capture stream, set callbackPulled flag
\r
4879 callbackPulled = true;
\r
4882 // Execute Callback
\r
4883 // ================
\r
4884 // 1. Execute user callback method
\r
4885 // 2. Handle return value from callback
\r
4887 // if callback has not requested the stream to stop
\r
4888 if ( callbackPulled && !callbackStopped ) {
\r
4889 // Execute user callback method
\r
4890 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4891 stream_.userBuffer[INPUT],
\r
4892 stream_.bufferSize,
\r
4894 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4895 stream_.callbackInfo.userData );
\r
4897 // Handle return value from callback
\r
4898 if ( callbackResult == 1 ) {
\r
4899 // instantiate a thread to stop this thread
\r
4900 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4901 if ( !threadHandle ) {
\r
4902 errorType = RtAudioError::THREAD_ERROR;
\r
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4906 else if ( !CloseHandle( threadHandle ) ) {
\r
4907 errorType = RtAudioError::THREAD_ERROR;
\r
4908 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4912 callbackStopped = true;
\r
4914 else if ( callbackResult == 2 ) {
\r
4915 // instantiate a thread to stop this thread
\r
4916 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4917 if ( !threadHandle ) {
\r
4918 errorType = RtAudioError::THREAD_ERROR;
\r
4919 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4922 else if ( !CloseHandle( threadHandle ) ) {
\r
4923 errorType = RtAudioError::THREAD_ERROR;
\r
4924 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4928 callbackStopped = true;
\r
4933 // Callback Output
\r
4934 // ===============
\r
4935 // 1. Convert callback buffer to stream format
\r
4936 // 2. Convert callback buffer to stream sample rate and channel count
\r
4937 // 3. Push callback buffer into outputBuffer
\r
4939 if ( renderAudioClient && callbackPulled ) {
\r
4940 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4941 // Convert callback buffer to stream format
\r
4942 convertBuffer( stream_.deviceBuffer,
\r
4943 stream_.userBuffer[OUTPUT],
\r
4944 stream_.convertInfo[OUTPUT] );
\r
4948 // Convert callback buffer to stream sample rate
\r
4949 convertBufferWasapi( convBuffer,
\r
4950 stream_.deviceBuffer,
\r
4951 stream_.nDeviceChannels[OUTPUT],
\r
4952 stream_.sampleRate,
\r
4953 renderFormat->nSamplesPerSec,
\r
4954 stream_.bufferSize,
\r
4956 stream_.deviceFormat[OUTPUT] );
\r
4958 // Push callback buffer into outputBuffer
\r
4959 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4960 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4961 stream_.deviceFormat[OUTPUT] );
\r
4964 // if there is no render stream, set callbackPushed flag
\r
4965 callbackPushed = true;
\r
4970 // 1. Get capture buffer from stream
\r
4971 // 2. Push capture buffer into inputBuffer
\r
4972 // 3. If 2. was successful: Release capture buffer
\r
4974 if ( captureAudioClient ) {
\r
4975 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4976 if ( !callbackPulled ) {
\r
4977 WaitForSingleObject( captureEvent, INFINITE );
\r
4980 // Get capture buffer from stream
\r
4981 hr = captureClient->GetBuffer( &streamBuffer,
\r
4982 &bufferFrameCount,
\r
4983 &captureFlags, NULL, NULL );
\r
4984 if ( FAILED( hr ) ) {
\r
4985 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4989 if ( bufferFrameCount != 0 ) {
\r
4990 // Push capture buffer into inputBuffer
\r
4991 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4992 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4993 stream_.deviceFormat[INPUT] ) )
\r
4995 // Release capture buffer
\r
4996 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4997 if ( FAILED( hr ) ) {
\r
4998 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5004 // Inform WASAPI that capture was unsuccessful
\r
5005 hr = captureClient->ReleaseBuffer( 0 );
\r
5006 if ( FAILED( hr ) ) {
\r
5007 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5014 // Inform WASAPI that capture was unsuccessful
\r
5015 hr = captureClient->ReleaseBuffer( 0 );
\r
5016 if ( FAILED( hr ) ) {
\r
5017 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5025 // 1. Get render buffer from stream
\r
5026 // 2. Pull next buffer from outputBuffer
\r
5027 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5028 // Release render buffer
\r
5030 if ( renderAudioClient ) {
\r
5031 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5032 if ( callbackPulled && !callbackPushed ) {
\r
5033 WaitForSingleObject( renderEvent, INFINITE );
\r
5036 // Get render buffer from stream
\r
5037 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5038 if ( FAILED( hr ) ) {
\r
5039 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5043 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5044 if ( FAILED( hr ) ) {
\r
5045 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5049 bufferFrameCount -= numFramesPadding;
\r
5051 if ( bufferFrameCount != 0 ) {
\r
5052 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5053 if ( FAILED( hr ) ) {
\r
5054 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5058 // Pull next buffer from outputBuffer
\r
5059 // Fill render buffer with next buffer
\r
5060 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5061 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5062 stream_.deviceFormat[OUTPUT] ) )
\r
5064 // Release render buffer
\r
5065 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5066 if ( FAILED( hr ) ) {
\r
5067 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5073 // Inform WASAPI that render was unsuccessful
\r
5074 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5075 if ( FAILED( hr ) ) {
\r
5076 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5083 // Inform WASAPI that render was unsuccessful
\r
5084 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5085 if ( FAILED( hr ) ) {
\r
5086 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5092 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5093 if ( callbackPushed ) {
\r
5094 callbackPulled = false;
\r
5097 // tick stream time
\r
5098 RtApi::tickStreamTime();
\r
5103 CoTaskMemFree( captureFormat );
\r
5104 CoTaskMemFree( renderFormat );
\r
5106 free ( convBuffer );
\r
5110 // update stream state
\r
5111 stream_.state = STREAM_STOPPED;
\r
5113 if ( errorText_.empty() )
\r
5116 error( errorType );
\r
5119 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5123 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5125 // Modified by Robin Davies, October 2005
\r
5126 // - Improvements to DirectX pointer chasing.
\r
5127 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5128 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5129 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5130 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5132 #include <dsound.h>
\r
5133 #include <assert.h>
\r
5134 #include <algorithm>
\r
5136 #if defined(__MINGW32__)
\r
5137 // missing from latest mingw winapi
\r
5138 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5139 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5140 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5141 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5144 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5146 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5147 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5150 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5152 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5153 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5154 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5155 return pointer >= earlierPointer && pointer < laterPointer;
\r
5158 // A structure to hold various information related to the DirectSound
\r
5159 // API implementation.
\r
5161 unsigned int drainCounter; // Tracks callback counts when draining
\r
5162 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5166 UINT bufferPointer[2];
\r
5167 DWORD dsBufferSize[2];
\r
5168 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5172 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5175 // Declarations for utility functions, callbacks, and structures
\r
5176 // specific to the DirectSound implementation.
\r
5177 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5178 LPCTSTR description,
\r
5180 LPVOID lpContext );
\r
5182 static const char* getErrorString( int code );
\r
5184 static unsigned __stdcall callbackHandler( void *ptr );
\r
5193 : found(false) { validId[0] = false; validId[1] = false; }
\r
5196 struct DsProbeData {
\r
5198 std::vector<struct DsDevice>* dsDevices;
\r
5201 RtApiDs :: RtApiDs()
\r
5203 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5204 // accept whatever the mainline chose for a threading model.
\r
5205 coInitialized_ = false;
\r
5206 HRESULT hr = CoInitialize( NULL );
\r
5207 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5210 RtApiDs :: ~RtApiDs()
\r
5212 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5213 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5216 // The DirectSound default output is always the first device.
\r
5217 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5222 // The DirectSound default input is always the first input device,
\r
5223 // which is the first capture device enumerated.
\r
5224 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5229 unsigned int RtApiDs :: getDeviceCount( void )
\r
5231 // Set query flag for previously found devices to false, so that we
\r
5232 // can check for any devices that have disappeared.
\r
5233 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5234 dsDevices[i].found = false;
\r
5236 // Query DirectSound devices.
\r
5237 struct DsProbeData probeInfo;
\r
5238 probeInfo.isInput = false;
\r
5239 probeInfo.dsDevices = &dsDevices;
\r
5240 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5241 if ( FAILED( result ) ) {
\r
5242 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5243 errorText_ = errorStream_.str();
\r
5244 error( RtAudioError::WARNING );
\r
5247 // Query DirectSoundCapture devices.
\r
5248 probeInfo.isInput = true;
\r
5249 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5250 if ( FAILED( result ) ) {
\r
5251 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5252 errorText_ = errorStream_.str();
\r
5253 error( RtAudioError::WARNING );
\r
5256 // Clean out any devices that may have disappeared.
\r
5257 std::vector< int > indices;
\r
5258 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5259 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5260 //unsigned int nErased = 0;
\r
5261 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5262 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5263 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5265 return static_cast<unsigned int>(dsDevices.size());
\r
5268 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5270 RtAudio::DeviceInfo info;
\r
5271 info.probed = false;
\r
5273 if ( dsDevices.size() == 0 ) {
\r
5274 // Force a query of all devices
\r
5276 if ( dsDevices.size() == 0 ) {
\r
5277 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5278 error( RtAudioError::INVALID_USE );
\r
5283 if ( device >= dsDevices.size() ) {
\r
5284 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5285 error( RtAudioError::INVALID_USE );
\r
5290 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5292 LPDIRECTSOUND output;
\r
5294 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5295 if ( FAILED( result ) ) {
\r
5296 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5297 errorText_ = errorStream_.str();
\r
5298 error( RtAudioError::WARNING );
\r
5302 outCaps.dwSize = sizeof( outCaps );
\r
5303 result = output->GetCaps( &outCaps );
\r
5304 if ( FAILED( result ) ) {
\r
5305 output->Release();
\r
5306 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5307 errorText_ = errorStream_.str();
\r
5308 error( RtAudioError::WARNING );
\r
5312 // Get output channel information.
\r
5313 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5315 // Get sample rate information.
\r
5316 info.sampleRates.clear();
\r
5317 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5318 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5319 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5320 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5323 // Get format information.
\r
5324 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5325 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5327 output->Release();
\r
5329 if ( getDefaultOutputDevice() == device )
\r
5330 info.isDefaultOutput = true;
\r
5332 if ( dsDevices[ device ].validId[1] == false ) {
\r
5333 info.name = dsDevices[ device ].name;
\r
5334 info.probed = true;
\r
5340 LPDIRECTSOUNDCAPTURE input;
\r
5341 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5342 if ( FAILED( result ) ) {
\r
5343 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5344 errorText_ = errorStream_.str();
\r
5345 error( RtAudioError::WARNING );
\r
5350 inCaps.dwSize = sizeof( inCaps );
\r
5351 result = input->GetCaps( &inCaps );
\r
5352 if ( FAILED( result ) ) {
\r
5354 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5355 errorText_ = errorStream_.str();
\r
5356 error( RtAudioError::WARNING );
\r
5360 // Get input channel information.
\r
5361 info.inputChannels = inCaps.dwChannels;
\r
5363 // Get sample rate and format information.
\r
5364 std::vector<unsigned int> rates;
\r
5365 if ( inCaps.dwChannels >= 2 ) {
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5369 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5375 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5376 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5377 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5381 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5388 else if ( inCaps.dwChannels == 1 ) {
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5391 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5392 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5398 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5399 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5400 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5401 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5402 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5404 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5405 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5406 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5407 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5408 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5411 else info.inputChannels = 0; // technically, this would be an error
\r
5415 if ( info.inputChannels == 0 ) return info;
\r
5417 // Copy the supported rates to the info structure but avoid duplication.
\r
5419 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5421 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5422 if ( rates[i] == info.sampleRates[j] ) {
\r
5427 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5429 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5431 // If device opens for both playback and capture, we determine the channels.
\r
5432 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5433 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5435 if ( device == 0 ) info.isDefaultInput = true;
\r
5437 // Copy name and return.
\r
5438 info.name = dsDevices[ device ].name;
\r
5439 info.probed = true;
\r
5443 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5444 unsigned int firstChannel, unsigned int sampleRate,
\r
5445 RtAudioFormat format, unsigned int *bufferSize,
\r
5446 RtAudio::StreamOptions *options )
\r
5448 if ( channels + firstChannel > 2 ) {
\r
5449 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5453 size_t nDevices = dsDevices.size();
\r
5454 if ( nDevices == 0 ) {
\r
5455 // This should not happen because a check is made before this function is called.
\r
5456 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5460 if ( device >= nDevices ) {
\r
5461 // This should not happen because a check is made before this function is called.
\r
5462 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5466 if ( mode == OUTPUT ) {
\r
5467 if ( dsDevices[ device ].validId[0] == false ) {
\r
5468 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5469 errorText_ = errorStream_.str();
\r
5473 else { // mode == INPUT
\r
5474 if ( dsDevices[ device ].validId[1] == false ) {
\r
5475 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5476 errorText_ = errorStream_.str();
\r
5481 // According to a note in PortAudio, using GetDesktopWindow()
\r
5482 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5483 // that occur when the application's window is not the foreground
\r
5484 // window. Also, if the application window closes before the
\r
5485 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5486 // problems when using GetDesktopWindow() but it seems fine now
\r
5487 // (January 2010). I'll leave it commented here.
\r
5488 // HWND hWnd = GetForegroundWindow();
\r
5489 HWND hWnd = GetDesktopWindow();
\r
5491 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5492 // two. This is a judgement call and a value of two is probably too
\r
5493 // low for capture, but it should work for playback.
\r
5495 if ( options ) nBuffers = options->numberOfBuffers;
\r
5496 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5497 if ( nBuffers < 2 ) nBuffers = 3;
\r
5499 // Check the lower range of the user-specified buffer size and set
\r
5500 // (arbitrarily) to a lower bound of 32.
\r
5501 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5503 // Create the wave format structure. The data format setting will
\r
5504 // be determined later.
\r
5505 WAVEFORMATEX waveFormat;
\r
5506 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5507 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5508 waveFormat.nChannels = channels + firstChannel;
\r
5509 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5511 // Determine the device buffer size. By default, we'll use the value
\r
5512 // defined above (32K), but we will grow it to make allowances for
\r
5513 // very large software buffer sizes.
\r
5514 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5515 DWORD dsPointerLeadTime = 0;
\r
5517 void *ohandle = 0, *bhandle = 0;
\r
5519 if ( mode == OUTPUT ) {
\r
5521 LPDIRECTSOUND output;
\r
5522 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5523 if ( FAILED( result ) ) {
\r
5524 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5525 errorText_ = errorStream_.str();
\r
5530 outCaps.dwSize = sizeof( outCaps );
\r
5531 result = output->GetCaps( &outCaps );
\r
5532 if ( FAILED( result ) ) {
\r
5533 output->Release();
\r
5534 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5535 errorText_ = errorStream_.str();
\r
5539 // Check channel information.
\r
5540 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5541 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5542 errorText_ = errorStream_.str();
\r
5546 // Check format information. Use 16-bit format unless not
\r
5547 // supported or user requests 8-bit.
\r
5548 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5549 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5550 waveFormat.wBitsPerSample = 16;
\r
5551 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5554 waveFormat.wBitsPerSample = 8;
\r
5555 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5557 stream_.userFormat = format;
\r
5559 // Update wave format structure and buffer information.
\r
5560 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5561 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5562 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5564 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5565 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5566 dsBufferSize *= 2;
\r
5568 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5569 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5570 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5571 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5572 if ( FAILED( result ) ) {
\r
5573 output->Release();
\r
5574 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5575 errorText_ = errorStream_.str();
\r
5579 // Even though we will write to the secondary buffer, we need to
\r
5580 // access the primary buffer to set the correct output format
\r
5581 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5582 // buffer description.
\r
5583 DSBUFFERDESC bufferDescription;
\r
5584 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5585 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5586 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5588 // Obtain the primary buffer
\r
5589 LPDIRECTSOUNDBUFFER buffer;
\r
5590 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5591 if ( FAILED( result ) ) {
\r
5592 output->Release();
\r
5593 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5594 errorText_ = errorStream_.str();
\r
5598 // Set the primary DS buffer sound format.
\r
5599 result = buffer->SetFormat( &waveFormat );
\r
5600 if ( FAILED( result ) ) {
\r
5601 output->Release();
\r
5602 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5603 errorText_ = errorStream_.str();
\r
5607 // Setup the secondary DS buffer description.
\r
5608 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5609 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5610 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5611 DSBCAPS_GLOBALFOCUS |
\r
5612 DSBCAPS_GETCURRENTPOSITION2 |
\r
5613 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5614 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5615 bufferDescription.lpwfxFormat = &waveFormat;
\r
5617 // Try to create the secondary DS buffer. If that doesn't work,
\r
5618 // try to use software mixing. Otherwise, there's a problem.
\r
5619 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5620 if ( FAILED( result ) ) {
\r
5621 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5622 DSBCAPS_GLOBALFOCUS |
\r
5623 DSBCAPS_GETCURRENTPOSITION2 |
\r
5624 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5625 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5626 if ( FAILED( result ) ) {
\r
5627 output->Release();
\r
5628 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5629 errorText_ = errorStream_.str();
\r
5634 // Get the buffer size ... might be different from what we specified.
\r
5636 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5637 result = buffer->GetCaps( &dsbcaps );
\r
5638 if ( FAILED( result ) ) {
\r
5639 output->Release();
\r
5640 buffer->Release();
\r
5641 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5642 errorText_ = errorStream_.str();
\r
5646 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5648 // Lock the DS buffer
\r
5651 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5652 if ( FAILED( result ) ) {
\r
5653 output->Release();
\r
5654 buffer->Release();
\r
5655 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5656 errorText_ = errorStream_.str();
\r
5660 // Zero the DS buffer
\r
5661 ZeroMemory( audioPtr, dataLen );
\r
5663 // Unlock the DS buffer
\r
5664 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5665 if ( FAILED( result ) ) {
\r
5666 output->Release();
\r
5667 buffer->Release();
\r
5668 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5669 errorText_ = errorStream_.str();
\r
5673 ohandle = (void *) output;
\r
5674 bhandle = (void *) buffer;
\r
5677 if ( mode == INPUT ) {
\r
5679 LPDIRECTSOUNDCAPTURE input;
\r
5680 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5681 if ( FAILED( result ) ) {
\r
5682 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5683 errorText_ = errorStream_.str();
\r
5688 inCaps.dwSize = sizeof( inCaps );
\r
5689 result = input->GetCaps( &inCaps );
\r
5690 if ( FAILED( result ) ) {
\r
5692 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5693 errorText_ = errorStream_.str();
\r
5697 // Check channel information.
\r
5698 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5699 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5703 // Check format information. Use 16-bit format unless user
\r
5704 // requests 8-bit.
\r
5705 DWORD deviceFormats;
\r
5706 if ( channels + firstChannel == 2 ) {
\r
5707 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5708 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5709 waveFormat.wBitsPerSample = 8;
\r
5710 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5712 else { // assume 16-bit is supported
\r
5713 waveFormat.wBitsPerSample = 16;
\r
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5717 else { // channel == 1
\r
5718 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5719 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5720 waveFormat.wBitsPerSample = 8;
\r
5721 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5723 else { // assume 16-bit is supported
\r
5724 waveFormat.wBitsPerSample = 16;
\r
5725 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5728 stream_.userFormat = format;
\r
5730 // Update wave format structure and buffer information.
\r
5731 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5732 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5733 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5735 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5736 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5737 dsBufferSize *= 2;
\r
5739 // Setup the secondary DS buffer description.
\r
5740 DSCBUFFERDESC bufferDescription;
\r
5741 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5742 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5743 bufferDescription.dwFlags = 0;
\r
5744 bufferDescription.dwReserved = 0;
\r
5745 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5746 bufferDescription.lpwfxFormat = &waveFormat;
\r
5748 // Create the capture buffer.
\r
5749 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5750 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5751 if ( FAILED( result ) ) {
\r
5753 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5754 errorText_ = errorStream_.str();
\r
5758 // Get the buffer size ... might be different from what we specified.
\r
5759 DSCBCAPS dscbcaps;
\r
5760 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5761 result = buffer->GetCaps( &dscbcaps );
\r
5762 if ( FAILED( result ) ) {
\r
5764 buffer->Release();
\r
5765 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5766 errorText_ = errorStream_.str();
\r
5770 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5772 // NOTE: We could have a problem here if this is a duplex stream
\r
5773 // and the play and capture hardware buffer sizes are different
\r
5774 // (I'm actually not sure if that is a problem or not).
\r
5775 // Currently, we are not verifying that.
\r
5777 // Lock the capture buffer
\r
5780 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5781 if ( FAILED( result ) ) {
\r
5783 buffer->Release();
\r
5784 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5785 errorText_ = errorStream_.str();
\r
5789 // Zero the buffer
\r
5790 ZeroMemory( audioPtr, dataLen );
\r
5792 // Unlock the buffer
\r
5793 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5794 if ( FAILED( result ) ) {
\r
5796 buffer->Release();
\r
5797 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5798 errorText_ = errorStream_.str();
\r
5802 ohandle = (void *) input;
\r
5803 bhandle = (void *) buffer;
\r
5806 // Set various stream parameters
\r
5807 DsHandle *handle = 0;
\r
5808 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5809 stream_.nUserChannels[mode] = channels;
\r
5810 stream_.bufferSize = *bufferSize;
\r
5811 stream_.channelOffset[mode] = firstChannel;
\r
5812 stream_.deviceInterleaved[mode] = true;
\r
5813 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5814 else stream_.userInterleaved = true;
\r
5816 // Set flag for buffer conversion
\r
5817 stream_.doConvertBuffer[mode] = false;
\r
5818 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5819 stream_.doConvertBuffer[mode] = true;
\r
5820 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5821 stream_.doConvertBuffer[mode] = true;
\r
5822 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5823 stream_.nUserChannels[mode] > 1 )
\r
5824 stream_.doConvertBuffer[mode] = true;
\r
5826 // Allocate necessary internal buffers
\r
5827 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5828 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5829 if ( stream_.userBuffer[mode] == NULL ) {
\r
5830 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5834 if ( stream_.doConvertBuffer[mode] ) {
\r
5836 bool makeBuffer = true;
\r
5837 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5838 if ( mode == INPUT ) {
\r
5839 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5840 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5841 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5845 if ( makeBuffer ) {
\r
5846 bufferBytes *= *bufferSize;
\r
5847 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5848 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5849 if ( stream_.deviceBuffer == NULL ) {
\r
5850 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5856 // Allocate our DsHandle structures for the stream.
\r
5857 if ( stream_.apiHandle == 0 ) {
\r
5859 handle = new DsHandle;
\r
5861 catch ( std::bad_alloc& ) {
\r
5862 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5866 // Create a manual-reset event.
\r
5867 handle->condition = CreateEvent( NULL, // no security
\r
5868 TRUE, // manual-reset
\r
5869 FALSE, // non-signaled initially
\r
5870 NULL ); // unnamed
\r
5871 stream_.apiHandle = (void *) handle;
\r
5874 handle = (DsHandle *) stream_.apiHandle;
\r
5875 handle->id[mode] = ohandle;
\r
5876 handle->buffer[mode] = bhandle;
\r
5877 handle->dsBufferSize[mode] = dsBufferSize;
\r
5878 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5880 stream_.device[mode] = device;
\r
5881 stream_.state = STREAM_STOPPED;
\r
5882 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5883 // We had already set up an output stream.
\r
5884 stream_.mode = DUPLEX;
\r
5886 stream_.mode = mode;
\r
5887 stream_.nBuffers = nBuffers;
\r
5888 stream_.sampleRate = sampleRate;
\r
5890 // Setup the buffer conversion information structure.
\r
5891 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5893 // Setup the callback thread.
\r
5894 if ( stream_.callbackInfo.isRunning == false ) {
\r
5895 unsigned threadId;
\r
5896 stream_.callbackInfo.isRunning = true;
\r
5897 stream_.callbackInfo.object = (void *) this;
\r
5898 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5899 &stream_.callbackInfo, 0, &threadId );
\r
5900 if ( stream_.callbackInfo.thread == 0 ) {
\r
5901 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5905 // Boost DS thread priority
\r
5906 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5912 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5913 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5914 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5915 if ( buffer ) buffer->Release();
\r
5916 object->Release();
\r
5918 if ( handle->buffer[1] ) {
\r
5919 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5920 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5921 if ( buffer ) buffer->Release();
\r
5922 object->Release();
\r
5924 CloseHandle( handle->condition );
\r
5926 stream_.apiHandle = 0;
\r
5929 for ( int i=0; i<2; i++ ) {
\r
5930 if ( stream_.userBuffer[i] ) {
\r
5931 free( stream_.userBuffer[i] );
\r
5932 stream_.userBuffer[i] = 0;
\r
5936 if ( stream_.deviceBuffer ) {
\r
5937 free( stream_.deviceBuffer );
\r
5938 stream_.deviceBuffer = 0;
\r
5941 stream_.state = STREAM_CLOSED;
\r
5945 void RtApiDs :: closeStream()
\r
5947 if ( stream_.state == STREAM_CLOSED ) {
\r
5948 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5949 error( RtAudioError::WARNING );
\r
5953 // Stop the callback thread.
\r
5954 stream_.callbackInfo.isRunning = false;
\r
5955 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5956 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5958 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5960 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5961 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5962 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5965 buffer->Release();
\r
5967 object->Release();
\r
5969 if ( handle->buffer[1] ) {
\r
5970 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5971 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5974 buffer->Release();
\r
5976 object->Release();
\r
5978 CloseHandle( handle->condition );
\r
5980 stream_.apiHandle = 0;
\r
5983 for ( int i=0; i<2; i++ ) {
\r
5984 if ( stream_.userBuffer[i] ) {
\r
5985 free( stream_.userBuffer[i] );
\r
5986 stream_.userBuffer[i] = 0;
\r
5990 if ( stream_.deviceBuffer ) {
\r
5991 free( stream_.deviceBuffer );
\r
5992 stream_.deviceBuffer = 0;
\r
5995 stream_.mode = UNINITIALIZED;
\r
5996 stream_.state = STREAM_CLOSED;
\r
5999 void RtApiDs :: startStream()
\r
6002 if ( stream_.state == STREAM_RUNNING ) {
\r
6003 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6004 error( RtAudioError::WARNING );
\r
6008 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6010 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6011 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6012 // this is already in effect.
\r
6013 timeBeginPeriod( 1 );
\r
6015 buffersRolling = false;
\r
6016 duplexPrerollBytes = 0;
\r
6018 if ( stream_.mode == DUPLEX ) {
\r
6019 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6020 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6023 HRESULT result = 0;
\r
6024 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6026 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6027 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6028 if ( FAILED( result ) ) {
\r
6029 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6030 errorText_ = errorStream_.str();
\r
6035 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6037 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6038 result = buffer->Start( DSCBSTART_LOOPING );
\r
6039 if ( FAILED( result ) ) {
\r
6040 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6041 errorText_ = errorStream_.str();
\r
6046 handle->drainCounter = 0;
\r
6047 handle->internalDrain = false;
\r
6048 ResetEvent( handle->condition );
\r
6049 stream_.state = STREAM_RUNNING;
\r
6052 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6055 void RtApiDs :: stopStream()
\r
6058 if ( stream_.state == STREAM_STOPPED ) {
\r
6059 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6060 error( RtAudioError::WARNING );
\r
6064 HRESULT result = 0;
\r
6067 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6068 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6069 if ( handle->drainCounter == 0 ) {
\r
6070 handle->drainCounter = 2;
\r
6071 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6074 stream_.state = STREAM_STOPPED;
\r
6076 MUTEX_LOCK( &stream_.mutex );
\r
6078 // Stop the buffer and clear memory
\r
6079 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6080 result = buffer->Stop();
\r
6081 if ( FAILED( result ) ) {
\r
6082 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6083 errorText_ = errorStream_.str();
\r
6087 // Lock the buffer and clear it so that if we start to play again,
\r
6088 // we won't have old data playing.
\r
6089 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6090 if ( FAILED( result ) ) {
\r
6091 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6092 errorText_ = errorStream_.str();
\r
6096 // Zero the DS buffer
\r
6097 ZeroMemory( audioPtr, dataLen );
\r
6099 // Unlock the DS buffer
\r
6100 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6101 if ( FAILED( result ) ) {
\r
6102 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6103 errorText_ = errorStream_.str();
\r
6107 // If we start playing again, we must begin at beginning of buffer.
\r
6108 handle->bufferPointer[0] = 0;
\r
6111 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6112 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6116 stream_.state = STREAM_STOPPED;
\r
6118 if ( stream_.mode != DUPLEX )
\r
6119 MUTEX_LOCK( &stream_.mutex );
\r
6121 result = buffer->Stop();
\r
6122 if ( FAILED( result ) ) {
\r
6123 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6124 errorText_ = errorStream_.str();
\r
6128 // Lock the buffer and clear it so that if we start to play again,
\r
6129 // we won't have old data playing.
\r
6130 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6131 if ( FAILED( result ) ) {
\r
6132 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6133 errorText_ = errorStream_.str();
\r
6137 // Zero the DS buffer
\r
6138 ZeroMemory( audioPtr, dataLen );
\r
6140 // Unlock the DS buffer
\r
6141 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6142 if ( FAILED( result ) ) {
\r
6143 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6144 errorText_ = errorStream_.str();
\r
6148 // If we start recording again, we must begin at beginning of buffer.
\r
6149 handle->bufferPointer[1] = 0;
\r
6153 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6154 MUTEX_UNLOCK( &stream_.mutex );
\r
6156 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6159 void RtApiDs :: abortStream()
\r
6162 if ( stream_.state == STREAM_STOPPED ) {
\r
6163 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6164 error( RtAudioError::WARNING );
\r
6168 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6169 handle->drainCounter = 2;
\r
6174 void RtApiDs :: callbackEvent()
\r
6176 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6177 Sleep( 50 ); // sleep 50 milliseconds
\r
6181 if ( stream_.state == STREAM_CLOSED ) {
\r
6182 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6183 error( RtAudioError::WARNING );
\r
6187 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6188 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6190 // Check if we were draining the stream and signal is finished.
\r
6191 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6193 stream_.state = STREAM_STOPPING;
\r
6194 if ( handle->internalDrain == false )
\r
6195 SetEvent( handle->condition );
\r
6201 // Invoke user callback to get fresh output data UNLESS we are
\r
6202 // draining stream.
\r
6203 if ( handle->drainCounter == 0 ) {
\r
6204 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6205 double streamTime = getStreamTime();
\r
6206 RtAudioStreamStatus status = 0;
\r
6207 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6208 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6209 handle->xrun[0] = false;
\r
6211 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6212 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6213 handle->xrun[1] = false;
\r
6215 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6216 stream_.bufferSize, streamTime, status, info->userData );
\r
6217 if ( cbReturnValue == 2 ) {
\r
6218 stream_.state = STREAM_STOPPING;
\r
6219 handle->drainCounter = 2;
\r
6223 else if ( cbReturnValue == 1 ) {
\r
6224 handle->drainCounter = 1;
\r
6225 handle->internalDrain = true;
\r
6230 DWORD currentWritePointer, safeWritePointer;
\r
6231 DWORD currentReadPointer, safeReadPointer;
\r
6232 UINT nextWritePointer;
\r
6234 LPVOID buffer1 = NULL;
\r
6235 LPVOID buffer2 = NULL;
\r
6236 DWORD bufferSize1 = 0;
\r
6237 DWORD bufferSize2 = 0;
\r
6242 MUTEX_LOCK( &stream_.mutex );
\r
6243 if ( stream_.state == STREAM_STOPPED ) {
\r
6244 MUTEX_UNLOCK( &stream_.mutex );
\r
6248 if ( buffersRolling == false ) {
\r
6249 if ( stream_.mode == DUPLEX ) {
\r
6250 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6252 // It takes a while for the devices to get rolling. As a result,
\r
6253 // there's no guarantee that the capture and write device pointers
\r
6254 // will move in lockstep. Wait here for both devices to start
\r
6255 // rolling, and then set our buffer pointers accordingly.
\r
6256 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6257 // bytes later than the write buffer.
\r
6259 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6260 // take place between the two GetCurrentPosition calls... but I'm
\r
6261 // really not sure how to solve the problem. Temporarily boost to
\r
6262 // Realtime priority, maybe; but I'm not sure what priority the
\r
6263 // DirectSound service threads run at. We *should* be roughly
\r
6264 // within a ms or so of correct.
\r
6266 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6267 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6269 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6271 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6272 if ( FAILED( result ) ) {
\r
6273 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6274 errorText_ = errorStream_.str();
\r
6275 error( RtAudioError::SYSTEM_ERROR );
\r
6278 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6279 if ( FAILED( result ) ) {
\r
6280 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6281 errorText_ = errorStream_.str();
\r
6282 error( RtAudioError::SYSTEM_ERROR );
\r
6286 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6287 if ( FAILED( result ) ) {
\r
6288 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6289 errorText_ = errorStream_.str();
\r
6290 error( RtAudioError::SYSTEM_ERROR );
\r
6293 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6294 if ( FAILED( result ) ) {
\r
6295 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6296 errorText_ = errorStream_.str();
\r
6297 error( RtAudioError::SYSTEM_ERROR );
\r
6300 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6304 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6306 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6307 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6308 handle->bufferPointer[1] = safeReadPointer;
\r
6310 else if ( stream_.mode == OUTPUT ) {
\r
6312 // Set the proper nextWritePosition after initial startup.
\r
6313 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6314 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6315 if ( FAILED( result ) ) {
\r
6316 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6317 errorText_ = errorStream_.str();
\r
6318 error( RtAudioError::SYSTEM_ERROR );
\r
6321 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6322 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6325 buffersRolling = true;
\r
6328 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6330 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6332 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6333 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6334 bufferBytes *= formatBytes( stream_.userFormat );
\r
6335 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6338 // Setup parameters and do buffer conversion if necessary.
\r
6339 if ( stream_.doConvertBuffer[0] ) {
\r
6340 buffer = stream_.deviceBuffer;
\r
6341 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6342 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6343 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6346 buffer = stream_.userBuffer[0];
\r
6347 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6348 bufferBytes *= formatBytes( stream_.userFormat );
\r
6351 // No byte swapping necessary in DirectSound implementation.
\r
6353 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6354 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6356 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6357 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6359 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6360 nextWritePointer = handle->bufferPointer[0];
\r
6362 DWORD endWrite, leadPointer;
\r
6364 // Find out where the read and "safe write" pointers are.
\r
6365 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6366 if ( FAILED( result ) ) {
\r
6367 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6368 errorText_ = errorStream_.str();
\r
6369 error( RtAudioError::SYSTEM_ERROR );
\r
6373 // We will copy our output buffer into the region between
\r
6374 // safeWritePointer and leadPointer. If leadPointer is not
\r
6375 // beyond the next endWrite position, wait until it is.
\r
6376 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6377 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6378 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6379 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6380 endWrite = nextWritePointer + bufferBytes;
\r
6382 // Check whether the entire write region is behind the play pointer.
\r
6383 if ( leadPointer >= endWrite ) break;
\r
6385 // If we are here, then we must wait until the leadPointer advances
\r
6386 // beyond the end of our next write region. We use the
\r
6387 // Sleep() function to suspend operation until that happens.
\r
6388 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6389 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6390 if ( millis < 1.0 ) millis = 1.0;
\r
6391 Sleep( (DWORD) millis );
\r
6394 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6395 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6396 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6397 handle->xrun[0] = true;
\r
6398 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6399 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6400 handle->bufferPointer[0] = nextWritePointer;
\r
6401 endWrite = nextWritePointer + bufferBytes;
\r
6404 // Lock free space in the buffer
\r
6405 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6406 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6407 if ( FAILED( result ) ) {
\r
6408 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6409 errorText_ = errorStream_.str();
\r
6410 error( RtAudioError::SYSTEM_ERROR );
\r
6414 // Copy our buffer into the DS buffer
\r
6415 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6416 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6418 // Update our buffer offset and unlock sound buffer
\r
6419 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6420 if ( FAILED( result ) ) {
\r
6421 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6422 errorText_ = errorStream_.str();
\r
6423 error( RtAudioError::SYSTEM_ERROR );
\r
6426 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6427 handle->bufferPointer[0] = nextWritePointer;
\r
6430 // Don't bother draining input
\r
6431 if ( handle->drainCounter ) {
\r
6432 handle->drainCounter++;
\r
6436 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6438 // Setup parameters.
\r
6439 if ( stream_.doConvertBuffer[1] ) {
\r
6440 buffer = stream_.deviceBuffer;
\r
6441 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6442 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6445 buffer = stream_.userBuffer[1];
\r
6446 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6447 bufferBytes *= formatBytes( stream_.userFormat );
\r
6450 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6451 long nextReadPointer = handle->bufferPointer[1];
\r
6452 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6454 // Find out where the write and "safe read" pointers are.
\r
6455 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6456 if ( FAILED( result ) ) {
\r
6457 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6458 errorText_ = errorStream_.str();
\r
6459 error( RtAudioError::SYSTEM_ERROR );
\r
6463 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6464 DWORD endRead = nextReadPointer + bufferBytes;
\r
6466 // Handling depends on whether we are INPUT or DUPLEX.
\r
6467 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6468 // then a wait here will drag the write pointers into the forbidden zone.
\r
6470 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6471 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6472 // practical way to sync up the read and write pointers reliably, given the
\r
6473 // the very complex relationship between phase and increment of the read and write
\r
6476 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6477 // provide a pre-roll period of 0.5 seconds in which we return
\r
6478 // zeros from the read buffer while the pointers sync up.
\r
6480 if ( stream_.mode == DUPLEX ) {
\r
6481 if ( safeReadPointer < endRead ) {
\r
6482 if ( duplexPrerollBytes <= 0 ) {
\r
6483 // Pre-roll time over. Be more agressive.
\r
6484 int adjustment = endRead-safeReadPointer;
\r
6486 handle->xrun[1] = true;
\r
6488 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6489 // and perform fine adjustments later.
\r
6490 // - small adjustments: back off by twice as much.
\r
6491 if ( adjustment >= 2*bufferBytes )
\r
6492 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6494 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6496 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6500 // In pre=roll time. Just do it.
\r
6501 nextReadPointer = safeReadPointer - bufferBytes;
\r
6502 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6504 endRead = nextReadPointer + bufferBytes;
\r
6507 else { // mode == INPUT
\r
6508 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6509 // See comments for playback.
\r
6510 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6511 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6512 if ( millis < 1.0 ) millis = 1.0;
\r
6513 Sleep( (DWORD) millis );
\r
6515 // Wake up and find out where we are now.
\r
6516 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6517 if ( FAILED( result ) ) {
\r
6518 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6519 errorText_ = errorStream_.str();
\r
6520 error( RtAudioError::SYSTEM_ERROR );
\r
6524 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6528 // Lock free space in the buffer
\r
6529 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6530 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6531 if ( FAILED( result ) ) {
\r
6532 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6533 errorText_ = errorStream_.str();
\r
6534 error( RtAudioError::SYSTEM_ERROR );
\r
6538 if ( duplexPrerollBytes <= 0 ) {
\r
6539 // Copy our buffer into the DS buffer
\r
6540 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6541 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6544 memset( buffer, 0, bufferSize1 );
\r
6545 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6546 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6549 // Update our buffer offset and unlock sound buffer
\r
6550 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6551 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6552 if ( FAILED( result ) ) {
\r
6553 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6554 errorText_ = errorStream_.str();
\r
6555 error( RtAudioError::SYSTEM_ERROR );
\r
6558 handle->bufferPointer[1] = nextReadPointer;
\r
6560 // No byte swapping necessary in DirectSound implementation.
\r
6562 // If necessary, convert 8-bit data from unsigned to signed.
\r
6563 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6564 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6566 // Do buffer conversion if necessary.
\r
6567 if ( stream_.doConvertBuffer[1] )
\r
6568 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6572 MUTEX_UNLOCK( &stream_.mutex );
\r
6573 RtApi::tickStreamTime();
\r
6576 // Definitions for utility functions and callbacks
\r
6577 // specific to the DirectSound implementation.
\r
6579 static unsigned __stdcall callbackHandler( void *ptr )
\r
6581 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6582 RtApiDs *object = (RtApiDs *) info->object;
\r
6583 bool* isRunning = &info->isRunning;
\r
6585 while ( *isRunning == true ) {
\r
6586 object->callbackEvent();
\r
6589 _endthreadex( 0 );
\r
6593 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6594 LPCTSTR description,
\r
6595 LPCTSTR /*module*/,
\r
6596 LPVOID lpContext )
\r
6598 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6599 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6602 bool validDevice = false;
\r
6603 if ( probeInfo.isInput == true ) {
\r
6605 LPDIRECTSOUNDCAPTURE object;
\r
6607 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6608 if ( hr != DS_OK ) return TRUE;
\r
6610 caps.dwSize = sizeof(caps);
\r
6611 hr = object->GetCaps( &caps );
\r
6612 if ( hr == DS_OK ) {
\r
6613 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6614 validDevice = true;
\r
6616 object->Release();
\r
6620 LPDIRECTSOUND object;
\r
6621 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6622 if ( hr != DS_OK ) return TRUE;
\r
6624 caps.dwSize = sizeof(caps);
\r
6625 hr = object->GetCaps( &caps );
\r
6626 if ( hr == DS_OK ) {
\r
6627 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6628 validDevice = true;
\r
6630 object->Release();
\r
6633 // If good device, then save its name and guid.
\r
6634 std::string name = convertCharPointerToStdString( description );
\r
6635 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6636 if ( lpguid == NULL )
\r
6637 name = "Default Device";
\r
6638 if ( validDevice ) {
\r
6639 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6640 if ( dsDevices[i].name == name ) {
\r
6641 dsDevices[i].found = true;
\r
6642 if ( probeInfo.isInput ) {
\r
6643 dsDevices[i].id[1] = lpguid;
\r
6644 dsDevices[i].validId[1] = true;
\r
6647 dsDevices[i].id[0] = lpguid;
\r
6648 dsDevices[i].validId[0] = true;
\r
6655 device.name = name;
\r
6656 device.found = true;
\r
6657 if ( probeInfo.isInput ) {
\r
6658 device.id[1] = lpguid;
\r
6659 device.validId[1] = true;
\r
6662 device.id[0] = lpguid;
\r
6663 device.validId[0] = true;
\r
6665 dsDevices.push_back( device );
\r
6671 static const char* getErrorString( int code )
\r
6675 case DSERR_ALLOCATED:
\r
6676 return "Already allocated";
\r
6678 case DSERR_CONTROLUNAVAIL:
\r
6679 return "Control unavailable";
\r
6681 case DSERR_INVALIDPARAM:
\r
6682 return "Invalid parameter";
\r
6684 case DSERR_INVALIDCALL:
\r
6685 return "Invalid call";
\r
6687 case DSERR_GENERIC:
\r
6688 return "Generic error";
\r
6690 case DSERR_PRIOLEVELNEEDED:
\r
6691 return "Priority level needed";
\r
6693 case DSERR_OUTOFMEMORY:
\r
6694 return "Out of memory";
\r
6696 case DSERR_BADFORMAT:
\r
6697 return "The sample rate or the channel format is not supported";
\r
6699 case DSERR_UNSUPPORTED:
\r
6700 return "Not supported";
\r
6702 case DSERR_NODRIVER:
\r
6703 return "No driver";
\r
6705 case DSERR_ALREADYINITIALIZED:
\r
6706 return "Already initialized";
\r
6708 case DSERR_NOAGGREGATION:
\r
6709 return "No aggregation";
\r
6711 case DSERR_BUFFERLOST:
\r
6712 return "Buffer lost";
\r
6714 case DSERR_OTHERAPPHASPRIO:
\r
6715 return "Another application already has priority";
\r
6717 case DSERR_UNINITIALIZED:
\r
6718 return "Uninitialized";
\r
6721 return "DirectSound unknown error";
\r
6724 //******************** End of __WINDOWS_DS__ *********************//
\r
6728 #if defined(__LINUX_ALSA__)
\r
6730 #include <alsa/asoundlib.h>
\r
6731 #include <unistd.h>
\r
6733 // A structure to hold various information related to the ALSA API
\r
6734 // implementation.
\r
6735 struct AlsaHandle {
\r
6736 snd_pcm_t *handles[2];
\r
6737 bool synchronized;
\r
6739 pthread_cond_t runnable_cv;
\r
6743 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6746 static void *alsaCallbackHandler( void * ptr );
\r
6748 RtApiAlsa :: RtApiAlsa()
\r
6750 // Nothing to do here.
\r
6753 RtApiAlsa :: ~RtApiAlsa()
\r
6755 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6758 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6760 unsigned nDevices = 0;
\r
6761 int result, subdevice, card;
\r
6763 snd_ctl_t *handle;
\r
6765 // Count cards and devices
\r
6767 snd_card_next( &card );
\r
6768 while ( card >= 0 ) {
\r
6769 sprintf( name, "hw:%d", card );
\r
6770 result = snd_ctl_open( &handle, name, 0 );
\r
6771 if ( result < 0 ) {
\r
6772 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6773 errorText_ = errorStream_.str();
\r
6774 error( RtAudioError::WARNING );
\r
6779 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6780 if ( result < 0 ) {
\r
6781 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6782 errorText_ = errorStream_.str();
\r
6783 error( RtAudioError::WARNING );
\r
6786 if ( subdevice < 0 )
\r
6791 snd_ctl_close( handle );
\r
6792 snd_card_next( &card );
\r
6795 result = snd_ctl_open( &handle, "default", 0 );
\r
6796 if (result == 0) {
\r
6798 snd_ctl_close( handle );
\r
6804 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6806 RtAudio::DeviceInfo info;
\r
6807 info.probed = false;
\r
6809 unsigned nDevices = 0;
\r
6810 int result, subdevice, card;
\r
6812 snd_ctl_t *chandle;
\r
6814 // Count cards and devices
\r
6816 snd_card_next( &card );
\r
6817 while ( card >= 0 ) {
\r
6818 sprintf( name, "hw:%d", card );
\r
6819 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6820 if ( result < 0 ) {
\r
6821 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6822 errorText_ = errorStream_.str();
\r
6823 error( RtAudioError::WARNING );
\r
6828 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6829 if ( result < 0 ) {
\r
6830 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6831 errorText_ = errorStream_.str();
\r
6832 error( RtAudioError::WARNING );
\r
6835 if ( subdevice < 0 ) break;
\r
6836 if ( nDevices == device ) {
\r
6837 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6843 snd_ctl_close( chandle );
\r
6844 snd_card_next( &card );
\r
6847 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6848 if ( result == 0 ) {
\r
6849 if ( nDevices == device ) {
\r
6850 strcpy( name, "default" );
\r
6856 if ( nDevices == 0 ) {
\r
6857 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6858 error( RtAudioError::INVALID_USE );
\r
6862 if ( device >= nDevices ) {
\r
6863 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6864 error( RtAudioError::INVALID_USE );
\r
6870 // If a stream is already open, we cannot probe the stream devices.
\r
6871 // Thus, use the saved results.
\r
6872 if ( stream_.state != STREAM_CLOSED &&
\r
6873 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6874 snd_ctl_close( chandle );
\r
6875 if ( device >= devices_.size() ) {
\r
6876 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6877 error( RtAudioError::WARNING );
\r
6880 return devices_[ device ];
\r
6883 int openMode = SND_PCM_ASYNC;
\r
6884 snd_pcm_stream_t stream;
\r
6885 snd_pcm_info_t *pcminfo;
\r
6886 snd_pcm_info_alloca( &pcminfo );
\r
6887 snd_pcm_t *phandle;
\r
6888 snd_pcm_hw_params_t *params;
\r
6889 snd_pcm_hw_params_alloca( ¶ms );
\r
6891 // First try for playback unless default device (which has subdev -1)
\r
6892 stream = SND_PCM_STREAM_PLAYBACK;
\r
6893 snd_pcm_info_set_stream( pcminfo, stream );
\r
6894 if ( subdevice != -1 ) {
\r
6895 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6896 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6898 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6899 if ( result < 0 ) {
\r
6900 // Device probably doesn't support playback.
\r
6901 goto captureProbe;
\r
6905 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6906 if ( result < 0 ) {
\r
6907 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6908 errorText_ = errorStream_.str();
\r
6909 error( RtAudioError::WARNING );
\r
6910 goto captureProbe;
\r
6913 // The device is open ... fill the parameter structure.
\r
6914 result = snd_pcm_hw_params_any( phandle, params );
\r
6915 if ( result < 0 ) {
\r
6916 snd_pcm_close( phandle );
\r
6917 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6918 errorText_ = errorStream_.str();
\r
6919 error( RtAudioError::WARNING );
\r
6920 goto captureProbe;
\r
6923 // Get output channel information.
\r
6924 unsigned int value;
\r
6925 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6926 if ( result < 0 ) {
\r
6927 snd_pcm_close( phandle );
\r
6928 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6929 errorText_ = errorStream_.str();
\r
6930 error( RtAudioError::WARNING );
\r
6931 goto captureProbe;
\r
6933 info.outputChannels = value;
\r
6934 snd_pcm_close( phandle );
\r
6937 stream = SND_PCM_STREAM_CAPTURE;
\r
6938 snd_pcm_info_set_stream( pcminfo, stream );
\r
6940 // Now try for capture unless default device (with subdev = -1)
\r
6941 if ( subdevice != -1 ) {
\r
6942 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6943 snd_ctl_close( chandle );
\r
6944 if ( result < 0 ) {
\r
6945 // Device probably doesn't support capture.
\r
6946 if ( info.outputChannels == 0 ) return info;
\r
6947 goto probeParameters;
\r
6951 snd_ctl_close( chandle );
\r
6953 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6954 if ( result < 0 ) {
\r
6955 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6956 errorText_ = errorStream_.str();
\r
6957 error( RtAudioError::WARNING );
\r
6958 if ( info.outputChannels == 0 ) return info;
\r
6959 goto probeParameters;
\r
6962 // The device is open ... fill the parameter structure.
\r
6963 result = snd_pcm_hw_params_any( phandle, params );
\r
6964 if ( result < 0 ) {
\r
6965 snd_pcm_close( phandle );
\r
6966 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6967 errorText_ = errorStream_.str();
\r
6968 error( RtAudioError::WARNING );
\r
6969 if ( info.outputChannels == 0 ) return info;
\r
6970 goto probeParameters;
\r
6973 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6974 if ( result < 0 ) {
\r
6975 snd_pcm_close( phandle );
\r
6976 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6977 errorText_ = errorStream_.str();
\r
6978 error( RtAudioError::WARNING );
\r
6979 if ( info.outputChannels == 0 ) return info;
\r
6980 goto probeParameters;
\r
6982 info.inputChannels = value;
\r
6983 snd_pcm_close( phandle );
\r
6985 // If device opens for both playback and capture, we determine the channels.
\r
6986 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6987 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6989 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6990 if ( device == 0 && info.outputChannels > 0 )
\r
6991 info.isDefaultOutput = true;
\r
6992 if ( device == 0 && info.inputChannels > 0 )
\r
6993 info.isDefaultInput = true;
\r
6996 // At this point, we just need to figure out the supported data
\r
6997 // formats and sample rates. We'll proceed by opening the device in
\r
6998 // the direction with the maximum number of channels, or playback if
\r
6999 // they are equal. This might limit our sample rate options, but so
\r
7002 if ( info.outputChannels >= info.inputChannels )
\r
7003 stream = SND_PCM_STREAM_PLAYBACK;
\r
7005 stream = SND_PCM_STREAM_CAPTURE;
\r
7006 snd_pcm_info_set_stream( pcminfo, stream );
\r
7008 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7009 if ( result < 0 ) {
\r
7010 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7011 errorText_ = errorStream_.str();
\r
7012 error( RtAudioError::WARNING );
\r
7016 // The device is open ... fill the parameter structure.
\r
7017 result = snd_pcm_hw_params_any( phandle, params );
\r
7018 if ( result < 0 ) {
\r
7019 snd_pcm_close( phandle );
\r
7020 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7021 errorText_ = errorStream_.str();
\r
7022 error( RtAudioError::WARNING );
\r
7026 // Test our discrete set of sample rate values.
\r
7027 info.sampleRates.clear();
\r
7028 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7029 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7030 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7032 if ( info.sampleRates.size() == 0 ) {
\r
7033 snd_pcm_close( phandle );
\r
7034 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7035 errorText_ = errorStream_.str();
\r
7036 error( RtAudioError::WARNING );
\r
7040 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7041 snd_pcm_format_t format;
\r
7042 info.nativeFormats = 0;
\r
7043 format = SND_PCM_FORMAT_S8;
\r
7044 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7045 info.nativeFormats |= RTAUDIO_SINT8;
\r
7046 format = SND_PCM_FORMAT_S16;
\r
7047 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7048 info.nativeFormats |= RTAUDIO_SINT16;
\r
7049 format = SND_PCM_FORMAT_S24;
\r
7050 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7051 info.nativeFormats |= RTAUDIO_SINT24;
\r
7052 format = SND_PCM_FORMAT_S32;
\r
7053 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7054 info.nativeFormats |= RTAUDIO_SINT32;
\r
7055 format = SND_PCM_FORMAT_FLOAT;
\r
7056 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7057 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7058 format = SND_PCM_FORMAT_FLOAT64;
\r
7059 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7060 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7062 // Check that we have at least one supported format
\r
7063 if ( info.nativeFormats == 0 ) {
\r
7064 snd_pcm_close( phandle );
\r
7065 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7066 errorText_ = errorStream_.str();
\r
7067 error( RtAudioError::WARNING );
\r
7071 // Get the device name
\r
7073 result = snd_card_get_name( card, &cardname );
\r
7074 if ( result >= 0 ) {
\r
7075 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7080 // That's all ... close the device and return
\r
7081 snd_pcm_close( phandle );
\r
7082 info.probed = true;
\r
7086 void RtApiAlsa :: saveDeviceInfo( void )
\r
7090 unsigned int nDevices = getDeviceCount();
\r
7091 devices_.resize( nDevices );
\r
7092 for ( unsigned int i=0; i<nDevices; i++ )
\r
7093 devices_[i] = getDeviceInfo( i );
\r
7096 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7097 unsigned int firstChannel, unsigned int sampleRate,
\r
7098 RtAudioFormat format, unsigned int *bufferSize,
\r
7099 RtAudio::StreamOptions *options )
\r
7102 #if defined(__RTAUDIO_DEBUG__)
\r
7103 snd_output_t *out;
\r
7104 snd_output_stdio_attach(&out, stderr, 0);
\r
7107 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7109 unsigned nDevices = 0;
\r
7110 int result, subdevice, card;
\r
7112 snd_ctl_t *chandle;
\r
7114 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7115 snprintf(name, sizeof(name), "%s", "default");
\r
7117 // Count cards and devices
\r
7119 snd_card_next( &card );
\r
7120 while ( card >= 0 ) {
\r
7121 sprintf( name, "hw:%d", card );
\r
7122 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7123 if ( result < 0 ) {
\r
7124 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7125 errorText_ = errorStream_.str();
\r
7130 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7131 if ( result < 0 ) break;
\r
7132 if ( subdevice < 0 ) break;
\r
7133 if ( nDevices == device ) {
\r
7134 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7135 snd_ctl_close( chandle );
\r
7140 snd_ctl_close( chandle );
\r
7141 snd_card_next( &card );
\r
7144 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7145 if ( result == 0 ) {
\r
7146 if ( nDevices == device ) {
\r
7147 strcpy( name, "default" );
\r
7153 if ( nDevices == 0 ) {
\r
7154 // This should not happen because a check is made before this function is called.
\r
7155 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7159 if ( device >= nDevices ) {
\r
7160 // This should not happen because a check is made before this function is called.
\r
7161 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7168 // The getDeviceInfo() function will not work for a device that is
\r
7169 // already open. Thus, we'll probe the system before opening a
\r
7170 // stream and save the results for use by getDeviceInfo().
\r
7171 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7172 this->saveDeviceInfo();
\r
7174 snd_pcm_stream_t stream;
\r
7175 if ( mode == OUTPUT )
\r
7176 stream = SND_PCM_STREAM_PLAYBACK;
\r
7178 stream = SND_PCM_STREAM_CAPTURE;
\r
7180 snd_pcm_t *phandle;
\r
7181 int openMode = SND_PCM_ASYNC;
\r
7182 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7183 if ( result < 0 ) {
\r
7184 if ( mode == OUTPUT )
\r
7185 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7187 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7188 errorText_ = errorStream_.str();
\r
7192 // Fill the parameter structure.
\r
7193 snd_pcm_hw_params_t *hw_params;
\r
7194 snd_pcm_hw_params_alloca( &hw_params );
\r
7195 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7196 if ( result < 0 ) {
\r
7197 snd_pcm_close( phandle );
\r
7198 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7199 errorText_ = errorStream_.str();
\r
7203 #if defined(__RTAUDIO_DEBUG__)
\r
7204 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7205 snd_pcm_hw_params_dump( hw_params, out );
\r
7208 // Set access ... check user preference.
\r
7209 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7210 stream_.userInterleaved = false;
\r
7211 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7212 if ( result < 0 ) {
\r
7213 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7214 stream_.deviceInterleaved[mode] = true;
\r
7217 stream_.deviceInterleaved[mode] = false;
\r
7220 stream_.userInterleaved = true;
\r
7221 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7222 if ( result < 0 ) {
\r
7223 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7224 stream_.deviceInterleaved[mode] = false;
\r
7227 stream_.deviceInterleaved[mode] = true;
\r
7230 if ( result < 0 ) {
\r
7231 snd_pcm_close( phandle );
\r
7232 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7233 errorText_ = errorStream_.str();
\r
7237 // Determine how to set the device format.
\r
7238 stream_.userFormat = format;
\r
7239 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7241 if ( format == RTAUDIO_SINT8 )
\r
7242 deviceFormat = SND_PCM_FORMAT_S8;
\r
7243 else if ( format == RTAUDIO_SINT16 )
\r
7244 deviceFormat = SND_PCM_FORMAT_S16;
\r
7245 else if ( format == RTAUDIO_SINT24 )
\r
7246 deviceFormat = SND_PCM_FORMAT_S24;
\r
7247 else if ( format == RTAUDIO_SINT32 )
\r
7248 deviceFormat = SND_PCM_FORMAT_S32;
\r
7249 else if ( format == RTAUDIO_FLOAT32 )
\r
7250 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7251 else if ( format == RTAUDIO_FLOAT64 )
\r
7252 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7254 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7255 stream_.deviceFormat[mode] = format;
\r
7259 // The user requested format is not natively supported by the device.
\r
7260 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7261 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7262 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7266 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7267 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7268 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7272 deviceFormat = SND_PCM_FORMAT_S32;
\r
7273 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7274 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7278 deviceFormat = SND_PCM_FORMAT_S24;
\r
7279 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7280 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7284 deviceFormat = SND_PCM_FORMAT_S16;
\r
7285 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7286 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7290 deviceFormat = SND_PCM_FORMAT_S8;
\r
7291 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7292 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7296 // If we get here, no supported format was found.
\r
7297 snd_pcm_close( phandle );
\r
7298 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7299 errorText_ = errorStream_.str();
\r
7303 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7304 if ( result < 0 ) {
\r
7305 snd_pcm_close( phandle );
\r
7306 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7307 errorText_ = errorStream_.str();
\r
7311 // Determine whether byte-swaping is necessary.
\r
7312 stream_.doByteSwap[mode] = false;
\r
7313 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7314 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7315 if ( result == 0 )
\r
7316 stream_.doByteSwap[mode] = true;
\r
7317 else if (result < 0) {
\r
7318 snd_pcm_close( phandle );
\r
7319 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7320 errorText_ = errorStream_.str();
\r
7325 // Set the sample rate.
\r
7326 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7327 if ( result < 0 ) {
\r
7328 snd_pcm_close( phandle );
\r
7329 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7330 errorText_ = errorStream_.str();
\r
7334 // Determine the number of channels for this device. We support a possible
\r
7335 // minimum device channel number > than the value requested by the user.
\r
7336 stream_.nUserChannels[mode] = channels;
\r
7337 unsigned int value;
\r
7338 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7339 unsigned int deviceChannels = value;
\r
7340 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7341 snd_pcm_close( phandle );
\r
7342 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7343 errorText_ = errorStream_.str();
\r
7347 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7348 if ( result < 0 ) {
\r
7349 snd_pcm_close( phandle );
\r
7350 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7351 errorText_ = errorStream_.str();
\r
7354 deviceChannels = value;
\r
7355 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7356 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7358 // Set the device channels.
\r
7359 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7360 if ( result < 0 ) {
\r
7361 snd_pcm_close( phandle );
\r
7362 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7363 errorText_ = errorStream_.str();
\r
7367 // Set the buffer (or period) size.
\r
7369 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7370 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7371 if ( result < 0 ) {
\r
7372 snd_pcm_close( phandle );
\r
7373 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7374 errorText_ = errorStream_.str();
\r
7377 *bufferSize = periodSize;
\r
7379 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7380 unsigned int periods = 0;
\r
7381 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7382 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7383 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7384 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7385 if ( result < 0 ) {
\r
7386 snd_pcm_close( phandle );
\r
7387 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7388 errorText_ = errorStream_.str();
\r
7392 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7393 // MUST be the same in both directions!
\r
7394 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7395 snd_pcm_close( phandle );
\r
7396 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7397 errorText_ = errorStream_.str();
\r
7401 stream_.bufferSize = *bufferSize;
\r
7403 // Install the hardware configuration
\r
7404 result = snd_pcm_hw_params( phandle, hw_params );
\r
7405 if ( result < 0 ) {
\r
7406 snd_pcm_close( phandle );
\r
7407 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7408 errorText_ = errorStream_.str();
\r
7412 #if defined(__RTAUDIO_DEBUG__)
\r
7413 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7414 snd_pcm_hw_params_dump( hw_params, out );
\r
7417 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7418 snd_pcm_sw_params_t *sw_params = NULL;
\r
7419 snd_pcm_sw_params_alloca( &sw_params );
\r
7420 snd_pcm_sw_params_current( phandle, sw_params );
\r
7421 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7422 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7423 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7425 // The following two settings were suggested by Theo Veenker
\r
7426 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7427 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7429 // here are two options for a fix
\r
7430 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7431 snd_pcm_uframes_t val;
\r
7432 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7433 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7435 result = snd_pcm_sw_params( phandle, sw_params );
\r
7436 if ( result < 0 ) {
\r
7437 snd_pcm_close( phandle );
\r
7438 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7439 errorText_ = errorStream_.str();
\r
7443 #if defined(__RTAUDIO_DEBUG__)
\r
7444 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7445 snd_pcm_sw_params_dump( sw_params, out );
\r
7448 // Set flags for buffer conversion
\r
7449 stream_.doConvertBuffer[mode] = false;
\r
7450 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7451 stream_.doConvertBuffer[mode] = true;
\r
7452 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7453 stream_.doConvertBuffer[mode] = true;
\r
7454 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7455 stream_.nUserChannels[mode] > 1 )
\r
7456 stream_.doConvertBuffer[mode] = true;
\r
7458 // Allocate the ApiHandle if necessary and then save.
\r
7459 AlsaHandle *apiInfo = 0;
\r
7460 if ( stream_.apiHandle == 0 ) {
\r
7462 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7464 catch ( std::bad_alloc& ) {
\r
7465 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7469 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7470 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7474 stream_.apiHandle = (void *) apiInfo;
\r
7475 apiInfo->handles[0] = 0;
\r
7476 apiInfo->handles[1] = 0;
\r
7479 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7481 apiInfo->handles[mode] = phandle;
\r
7484 // Allocate necessary internal buffers.
\r
7485 unsigned long bufferBytes;
\r
7486 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7487 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7488 if ( stream_.userBuffer[mode] == NULL ) {
\r
7489 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7493 if ( stream_.doConvertBuffer[mode] ) {
\r
7495 bool makeBuffer = true;
\r
7496 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7497 if ( mode == INPUT ) {
\r
7498 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7499 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7500 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7504 if ( makeBuffer ) {
\r
7505 bufferBytes *= *bufferSize;
\r
7506 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7507 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7508 if ( stream_.deviceBuffer == NULL ) {
\r
7509 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7515 stream_.sampleRate = sampleRate;
\r
7516 stream_.nBuffers = periods;
\r
7517 stream_.device[mode] = device;
\r
7518 stream_.state = STREAM_STOPPED;
\r
7520 // Setup the buffer conversion information structure.
\r
7521 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7523 // Setup thread if necessary.
\r
7524 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7525 // We had already set up an output stream.
\r
7526 stream_.mode = DUPLEX;
\r
7527 // Link the streams if possible.
\r
7528 apiInfo->synchronized = false;
\r
7529 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7530 apiInfo->synchronized = true;
\r
7532 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7533 error( RtAudioError::WARNING );
\r
7537 stream_.mode = mode;
\r
7539 // Setup callback thread.
\r
7540 stream_.callbackInfo.object = (void *) this;
\r
7542 // Set the thread attributes for joinable and realtime scheduling
\r
7543 // priority (optional). The higher priority will only take affect
\r
7544 // if the program is run as root or suid. Note, under Linux
\r
7545 // processes with CAP_SYS_NICE privilege, a user can change
\r
7546 // scheduling policy and priority (thus need not be root). See
\r
7547 // POSIX "capabilities".
\r
7548 pthread_attr_t attr;
\r
7549 pthread_attr_init( &attr );
\r
7550 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7552 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7553 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7554 // We previously attempted to increase the audio callback priority
\r
7555 // to SCHED_RR here via the attributes. However, while no errors
\r
7556 // were reported in doing so, it did not work. So, now this is
\r
7557 // done in the alsaCallbackHandler function.
\r
7558 stream_.callbackInfo.doRealtime = true;
\r
7559 int priority = options->priority;
\r
7560 int min = sched_get_priority_min( SCHED_RR );
\r
7561 int max = sched_get_priority_max( SCHED_RR );
\r
7562 if ( priority < min ) priority = min;
\r
7563 else if ( priority > max ) priority = max;
\r
7564 stream_.callbackInfo.priority = priority;
\r
7568 stream_.callbackInfo.isRunning = true;
\r
7569 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7570 pthread_attr_destroy( &attr );
\r
7572 stream_.callbackInfo.isRunning = false;
\r
7573 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7582 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7583 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7584 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7586 stream_.apiHandle = 0;
\r
7589 if ( phandle) snd_pcm_close( phandle );
\r
7591 for ( int i=0; i<2; i++ ) {
\r
7592 if ( stream_.userBuffer[i] ) {
\r
7593 free( stream_.userBuffer[i] );
\r
7594 stream_.userBuffer[i] = 0;
\r
7598 if ( stream_.deviceBuffer ) {
\r
7599 free( stream_.deviceBuffer );
\r
7600 stream_.deviceBuffer = 0;
\r
7603 stream_.state = STREAM_CLOSED;
\r
7607 void RtApiAlsa :: closeStream()
\r
7609 if ( stream_.state == STREAM_CLOSED ) {
\r
7610 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7611 error( RtAudioError::WARNING );
\r
7615 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7616 stream_.callbackInfo.isRunning = false;
\r
7617 MUTEX_LOCK( &stream_.mutex );
\r
7618 if ( stream_.state == STREAM_STOPPED ) {
\r
7619 apiInfo->runnable = true;
\r
7620 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7622 MUTEX_UNLOCK( &stream_.mutex );
\r
7623 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7625 if ( stream_.state == STREAM_RUNNING ) {
\r
7626 stream_.state = STREAM_STOPPED;
\r
7627 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7628 snd_pcm_drop( apiInfo->handles[0] );
\r
7629 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7630 snd_pcm_drop( apiInfo->handles[1] );
\r
7634 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7635 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7636 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7638 stream_.apiHandle = 0;
\r
7641 for ( int i=0; i<2; i++ ) {
\r
7642 if ( stream_.userBuffer[i] ) {
\r
7643 free( stream_.userBuffer[i] );
\r
7644 stream_.userBuffer[i] = 0;
\r
7648 if ( stream_.deviceBuffer ) {
\r
7649 free( stream_.deviceBuffer );
\r
7650 stream_.deviceBuffer = 0;
\r
7653 stream_.mode = UNINITIALIZED;
\r
7654 stream_.state = STREAM_CLOSED;
\r
7657 void RtApiAlsa :: startStream()
\r
7659 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7662 if ( stream_.state == STREAM_RUNNING ) {
\r
7663 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7664 error( RtAudioError::WARNING );
\r
7668 MUTEX_LOCK( &stream_.mutex );
\r
7671 snd_pcm_state_t state;
\r
7672 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7673 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7674 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7675 state = snd_pcm_state( handle[0] );
\r
7676 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7677 result = snd_pcm_prepare( handle[0] );
\r
7678 if ( result < 0 ) {
\r
7679 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7680 errorText_ = errorStream_.str();
\r
7686 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7687 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7688 state = snd_pcm_state( handle[1] );
\r
7689 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7690 result = snd_pcm_prepare( handle[1] );
\r
7691 if ( result < 0 ) {
\r
7692 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7693 errorText_ = errorStream_.str();
\r
7699 stream_.state = STREAM_RUNNING;
\r
7702 apiInfo->runnable = true;
\r
7703 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7704 MUTEX_UNLOCK( &stream_.mutex );
\r
7706 if ( result >= 0 ) return;
\r
7707 error( RtAudioError::SYSTEM_ERROR );
\r
7710 void RtApiAlsa :: stopStream()
\r
7713 if ( stream_.state == STREAM_STOPPED ) {
\r
7714 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7715 error( RtAudioError::WARNING );
\r
7719 stream_.state = STREAM_STOPPED;
\r
7720 MUTEX_LOCK( &stream_.mutex );
\r
7723 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7724 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7725 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7726 if ( apiInfo->synchronized )
\r
7727 result = snd_pcm_drop( handle[0] );
\r
7729 result = snd_pcm_drain( handle[0] );
\r
7730 if ( result < 0 ) {
\r
7731 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7732 errorText_ = errorStream_.str();
\r
7737 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7738 result = snd_pcm_drop( handle[1] );
\r
7739 if ( result < 0 ) {
\r
7740 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7741 errorText_ = errorStream_.str();
\r
7747 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7748 MUTEX_UNLOCK( &stream_.mutex );
\r
7750 if ( result >= 0 ) return;
\r
7751 error( RtAudioError::SYSTEM_ERROR );
\r
7754 void RtApiAlsa :: abortStream()
\r
7757 if ( stream_.state == STREAM_STOPPED ) {
\r
7758 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7759 error( RtAudioError::WARNING );
\r
7763 stream_.state = STREAM_STOPPED;
\r
7764 MUTEX_LOCK( &stream_.mutex );
\r
7767 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7768 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7769 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7770 result = snd_pcm_drop( handle[0] );
\r
7771 if ( result < 0 ) {
\r
7772 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7773 errorText_ = errorStream_.str();
\r
7778 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7779 result = snd_pcm_drop( handle[1] );
\r
7780 if ( result < 0 ) {
\r
7781 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7782 errorText_ = errorStream_.str();
\r
7788 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7789 MUTEX_UNLOCK( &stream_.mutex );
\r
7791 if ( result >= 0 ) return;
\r
7792 error( RtAudioError::SYSTEM_ERROR );
\r
7795 void RtApiAlsa :: callbackEvent()
\r
7797 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7798 if ( stream_.state == STREAM_STOPPED ) {
\r
7799 MUTEX_LOCK( &stream_.mutex );
\r
7800 while ( !apiInfo->runnable )
\r
7801 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7803 if ( stream_.state != STREAM_RUNNING ) {
\r
7804 MUTEX_UNLOCK( &stream_.mutex );
\r
7807 MUTEX_UNLOCK( &stream_.mutex );
\r
7810 if ( stream_.state == STREAM_CLOSED ) {
\r
7811 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7812 error( RtAudioError::WARNING );
\r
7816 int doStopStream = 0;
\r
7817 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7818 double streamTime = getStreamTime();
\r
7819 RtAudioStreamStatus status = 0;
\r
7820 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7821 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7822 apiInfo->xrun[0] = false;
\r
7824 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7825 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7826 apiInfo->xrun[1] = false;
\r
7828 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7829 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7831 if ( doStopStream == 2 ) {
\r
7836 MUTEX_LOCK( &stream_.mutex );
\r
7838 // The state might change while waiting on a mutex.
\r
7839 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7844 snd_pcm_t **handle;
\r
7845 snd_pcm_sframes_t frames;
\r
7846 RtAudioFormat format;
\r
7847 handle = (snd_pcm_t **) apiInfo->handles;
\r
7849 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7851 // Setup parameters.
\r
7852 if ( stream_.doConvertBuffer[1] ) {
\r
7853 buffer = stream_.deviceBuffer;
\r
7854 channels = stream_.nDeviceChannels[1];
\r
7855 format = stream_.deviceFormat[1];
\r
7858 buffer = stream_.userBuffer[1];
\r
7859 channels = stream_.nUserChannels[1];
\r
7860 format = stream_.userFormat;
\r
7863 // Read samples from device in interleaved/non-interleaved format.
\r
7864 if ( stream_.deviceInterleaved[1] )
\r
7865 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7867 void *bufs[channels];
\r
7868 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7869 for ( int i=0; i<channels; i++ )
\r
7870 bufs[i] = (void *) (buffer + (i * offset));
\r
7871 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7874 if ( result < (int) stream_.bufferSize ) {
\r
7875 // Either an error or overrun occured.
\r
7876 if ( result == -EPIPE ) {
\r
7877 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7878 if ( state == SND_PCM_STATE_XRUN ) {
\r
7879 apiInfo->xrun[1] = true;
\r
7880 result = snd_pcm_prepare( handle[1] );
\r
7881 if ( result < 0 ) {
\r
7882 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7883 errorText_ = errorStream_.str();
\r
7887 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7888 errorText_ = errorStream_.str();
\r
7892 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7893 errorText_ = errorStream_.str();
\r
7895 error( RtAudioError::WARNING );
\r
7899 // Do byte swapping if necessary.
\r
7900 if ( stream_.doByteSwap[1] )
\r
7901 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7903 // Do buffer conversion if necessary.
\r
7904 if ( stream_.doConvertBuffer[1] )
\r
7905 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7907 // Check stream latency
\r
7908 result = snd_pcm_delay( handle[1], &frames );
\r
7909 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7914 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7916 // Setup parameters and do buffer conversion if necessary.
\r
7917 if ( stream_.doConvertBuffer[0] ) {
\r
7918 buffer = stream_.deviceBuffer;
\r
7919 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7920 channels = stream_.nDeviceChannels[0];
\r
7921 format = stream_.deviceFormat[0];
\r
7924 buffer = stream_.userBuffer[0];
\r
7925 channels = stream_.nUserChannels[0];
\r
7926 format = stream_.userFormat;
\r
7929 // Do byte swapping if necessary.
\r
7930 if ( stream_.doByteSwap[0] )
\r
7931 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7933 // Write samples to device in interleaved/non-interleaved format.
\r
7934 if ( stream_.deviceInterleaved[0] )
\r
7935 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7937 void *bufs[channels];
\r
7938 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7939 for ( int i=0; i<channels; i++ )
\r
7940 bufs[i] = (void *) (buffer + (i * offset));
\r
7941 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7944 if ( result < (int) stream_.bufferSize ) {
\r
7945 // Either an error or underrun occured.
\r
7946 if ( result == -EPIPE ) {
\r
7947 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7948 if ( state == SND_PCM_STATE_XRUN ) {
\r
7949 apiInfo->xrun[0] = true;
\r
7950 result = snd_pcm_prepare( handle[0] );
\r
7951 if ( result < 0 ) {
\r
7952 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7953 errorText_ = errorStream_.str();
\r
7957 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7958 errorText_ = errorStream_.str();
\r
7962 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7963 errorText_ = errorStream_.str();
\r
7965 error( RtAudioError::WARNING );
\r
7969 // Check stream latency
\r
7970 result = snd_pcm_delay( handle[0], &frames );
\r
7971 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7975 MUTEX_UNLOCK( &stream_.mutex );
\r
7977 RtApi::tickStreamTime();
\r
7978 if ( doStopStream == 1 ) this->stopStream();
\r
7981 static void *alsaCallbackHandler( void *ptr )
\r
7983 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7984 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7985 bool *isRunning = &info->isRunning;
\r
7987 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7988 if ( &info->doRealtime ) {
\r
7989 pthread_t tID = pthread_self(); // ID of this thread
\r
7990 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7991 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7995 while ( *isRunning == true ) {
\r
7996 pthread_testcancel();
\r
7997 object->callbackEvent();
\r
8000 pthread_exit( NULL );
\r
8003 //******************** End of __LINUX_ALSA__ *********************//
\r
8006 #if defined(__LINUX_PULSE__)
\r
8008 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8009 // and Tristan Matthews.
\r
8011 #include <pulse/error.h>
\r
8012 #include <pulse/simple.h>
\r
8015 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8016 44100, 48000, 96000, 0};
\r
8018 struct rtaudio_pa_format_mapping_t {
\r
8019 RtAudioFormat rtaudio_format;
\r
8020 pa_sample_format_t pa_format;
\r
8023 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8024 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8025 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8026 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8027 {0, PA_SAMPLE_INVALID}};
\r
8029 struct PulseAudioHandle {
\r
8030 pa_simple *s_play;
\r
8033 pthread_cond_t runnable_cv;
\r
8035 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8038 RtApiPulse::~RtApiPulse()
\r
8040 if ( stream_.state != STREAM_CLOSED )
\r
8044 unsigned int RtApiPulse::getDeviceCount( void )
\r
8049 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8051 RtAudio::DeviceInfo info;
\r
8052 info.probed = true;
\r
8053 info.name = "PulseAudio";
\r
8054 info.outputChannels = 2;
\r
8055 info.inputChannels = 2;
\r
8056 info.duplexChannels = 2;
\r
8057 info.isDefaultOutput = true;
\r
8058 info.isDefaultInput = true;
\r
8060 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8061 info.sampleRates.push_back( *sr );
\r
8063 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8068 static void *pulseaudio_callback( void * user )
\r
8070 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8071 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8072 volatile bool *isRunning = &cbi->isRunning;
\r
8074 while ( *isRunning ) {
\r
8075 pthread_testcancel();
\r
8076 context->callbackEvent();
\r
8079 pthread_exit( NULL );
\r
8082 void RtApiPulse::closeStream( void )
\r
8084 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8086 stream_.callbackInfo.isRunning = false;
\r
8088 MUTEX_LOCK( &stream_.mutex );
\r
8089 if ( stream_.state == STREAM_STOPPED ) {
\r
8090 pah->runnable = true;
\r
8091 pthread_cond_signal( &pah->runnable_cv );
\r
8093 MUTEX_UNLOCK( &stream_.mutex );
\r
8095 pthread_join( pah->thread, 0 );
\r
8096 if ( pah->s_play ) {
\r
8097 pa_simple_flush( pah->s_play, NULL );
\r
8098 pa_simple_free( pah->s_play );
\r
8101 pa_simple_free( pah->s_rec );
\r
8103 pthread_cond_destroy( &pah->runnable_cv );
\r
8105 stream_.apiHandle = 0;
\r
8108 if ( stream_.userBuffer[0] ) {
\r
8109 free( stream_.userBuffer[0] );
\r
8110 stream_.userBuffer[0] = 0;
\r
8112 if ( stream_.userBuffer[1] ) {
\r
8113 free( stream_.userBuffer[1] );
\r
8114 stream_.userBuffer[1] = 0;
\r
8117 stream_.state = STREAM_CLOSED;
\r
8118 stream_.mode = UNINITIALIZED;
\r
8121 void RtApiPulse::callbackEvent( void )
\r
8123 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8125 if ( stream_.state == STREAM_STOPPED ) {
\r
8126 MUTEX_LOCK( &stream_.mutex );
\r
8127 while ( !pah->runnable )
\r
8128 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8130 if ( stream_.state != STREAM_RUNNING ) {
\r
8131 MUTEX_UNLOCK( &stream_.mutex );
\r
8134 MUTEX_UNLOCK( &stream_.mutex );
\r
8137 if ( stream_.state == STREAM_CLOSED ) {
\r
8138 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8139 "this shouldn't happen!";
\r
8140 error( RtAudioError::WARNING );
\r
8144 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8145 double streamTime = getStreamTime();
\r
8146 RtAudioStreamStatus status = 0;
\r
8147 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8148 stream_.bufferSize, streamTime, status,
\r
8149 stream_.callbackInfo.userData );
\r
8151 if ( doStopStream == 2 ) {
\r
8156 MUTEX_LOCK( &stream_.mutex );
\r
8157 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8158 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8160 if ( stream_.state != STREAM_RUNNING )
\r
8165 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8166 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8167 convertBuffer( stream_.deviceBuffer,
\r
8168 stream_.userBuffer[OUTPUT],
\r
8169 stream_.convertInfo[OUTPUT] );
\r
8170 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8171 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8173 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8174 formatBytes( stream_.userFormat );
\r
8176 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8177 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8178 pa_strerror( pa_error ) << ".";
\r
8179 errorText_ = errorStream_.str();
\r
8180 error( RtAudioError::WARNING );
\r
8184 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8185 if ( stream_.doConvertBuffer[INPUT] )
\r
8186 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8187 formatBytes( stream_.deviceFormat[INPUT] );
\r
8189 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8190 formatBytes( stream_.userFormat );
\r
8192 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8193 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8194 pa_strerror( pa_error ) << ".";
\r
8195 errorText_ = errorStream_.str();
\r
8196 error( RtAudioError::WARNING );
\r
8198 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8199 convertBuffer( stream_.userBuffer[INPUT],
\r
8200 stream_.deviceBuffer,
\r
8201 stream_.convertInfo[INPUT] );
\r
8206 MUTEX_UNLOCK( &stream_.mutex );
\r
8207 RtApi::tickStreamTime();
\r
8209 if ( doStopStream == 1 )
\r
8213 void RtApiPulse::startStream( void )
\r
8215 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8217 if ( stream_.state == STREAM_CLOSED ) {
\r
8218 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8219 error( RtAudioError::INVALID_USE );
\r
8222 if ( stream_.state == STREAM_RUNNING ) {
\r
8223 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8224 error( RtAudioError::WARNING );
\r
8228 MUTEX_LOCK( &stream_.mutex );
\r
8230 stream_.state = STREAM_RUNNING;
\r
8232 pah->runnable = true;
\r
8233 pthread_cond_signal( &pah->runnable_cv );
\r
8234 MUTEX_UNLOCK( &stream_.mutex );
\r
8237 void RtApiPulse::stopStream( void )
\r
8239 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8241 if ( stream_.state == STREAM_CLOSED ) {
\r
8242 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8243 error( RtAudioError::INVALID_USE );
\r
8246 if ( stream_.state == STREAM_STOPPED ) {
\r
8247 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8248 error( RtAudioError::WARNING );
\r
8252 stream_.state = STREAM_STOPPED;
\r
8253 MUTEX_LOCK( &stream_.mutex );
\r
8255 if ( pah && pah->s_play ) {
\r
8257 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8258 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8259 pa_strerror( pa_error ) << ".";
\r
8260 errorText_ = errorStream_.str();
\r
8261 MUTEX_UNLOCK( &stream_.mutex );
\r
8262 error( RtAudioError::SYSTEM_ERROR );
\r
8267 stream_.state = STREAM_STOPPED;
\r
8268 MUTEX_UNLOCK( &stream_.mutex );
\r
8271 void RtApiPulse::abortStream( void )
\r
8273 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8275 if ( stream_.state == STREAM_CLOSED ) {
\r
8276 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8277 error( RtAudioError::INVALID_USE );
\r
8280 if ( stream_.state == STREAM_STOPPED ) {
\r
8281 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8282 error( RtAudioError::WARNING );
\r
8286 stream_.state = STREAM_STOPPED;
\r
8287 MUTEX_LOCK( &stream_.mutex );
\r
8289 if ( pah && pah->s_play ) {
\r
8291 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8292 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8293 pa_strerror( pa_error ) << ".";
\r
8294 errorText_ = errorStream_.str();
\r
8295 MUTEX_UNLOCK( &stream_.mutex );
\r
8296 error( RtAudioError::SYSTEM_ERROR );
\r
8301 stream_.state = STREAM_STOPPED;
\r
8302 MUTEX_UNLOCK( &stream_.mutex );
\r
8305 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8306 unsigned int channels, unsigned int firstChannel,
\r
8307 unsigned int sampleRate, RtAudioFormat format,
\r
8308 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8310 PulseAudioHandle *pah = 0;
\r
8311 unsigned long bufferBytes = 0;
\r
8312 pa_sample_spec ss;
\r
8314 if ( device != 0 ) return false;
\r
8315 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8316 if ( channels != 1 && channels != 2 ) {
\r
8317 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8320 ss.channels = channels;
\r
8322 if ( firstChannel != 0 ) return false;
\r
8324 bool sr_found = false;
\r
8325 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8326 if ( sampleRate == *sr ) {
\r
8328 stream_.sampleRate = sampleRate;
\r
8329 ss.rate = sampleRate;
\r
8333 if ( !sr_found ) {
\r
8334 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8338 bool sf_found = 0;
\r
8339 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8340 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8341 if ( format == sf->rtaudio_format ) {
\r
8343 stream_.userFormat = sf->rtaudio_format;
\r
8344 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8345 ss.format = sf->pa_format;
\r
8349 if ( !sf_found ) { // Use internal data format conversion.
\r
8350 stream_.userFormat = format;
\r
8351 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8352 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8355 // Set other stream parameters.
\r
8356 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8357 else stream_.userInterleaved = true;
\r
8358 stream_.deviceInterleaved[mode] = true;
\r
8359 stream_.nBuffers = 1;
\r
8360 stream_.doByteSwap[mode] = false;
\r
8361 stream_.nUserChannels[mode] = channels;
\r
8362 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8363 stream_.channelOffset[mode] = 0;
\r
8364 std::string streamName = "RtAudio";
\r
8366 // Set flags for buffer conversion.
\r
8367 stream_.doConvertBuffer[mode] = false;
\r
8368 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8369 stream_.doConvertBuffer[mode] = true;
\r
8370 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8371 stream_.doConvertBuffer[mode] = true;
\r
8373 // Allocate necessary internal buffers.
\r
8374 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8375 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8376 if ( stream_.userBuffer[mode] == NULL ) {
\r
8377 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8380 stream_.bufferSize = *bufferSize;
\r
8382 if ( stream_.doConvertBuffer[mode] ) {
\r
8384 bool makeBuffer = true;
\r
8385 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8386 if ( mode == INPUT ) {
\r
8387 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8388 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8389 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8393 if ( makeBuffer ) {
\r
8394 bufferBytes *= *bufferSize;
\r
8395 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8396 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8397 if ( stream_.deviceBuffer == NULL ) {
\r
8398 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8404 stream_.device[mode] = device;
\r
8406 // Setup the buffer conversion information structure.
\r
8407 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8409 if ( !stream_.apiHandle ) {
\r
8410 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8412 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8416 stream_.apiHandle = pah;
\r
8417 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8418 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8422 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8425 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8428 pa_buffer_attr buffer_attr;
\r
8429 buffer_attr.fragsize = bufferBytes;
\r
8430 buffer_attr.maxlength = -1;
\r
8432 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8433 if ( !pah->s_rec ) {
\r
8434 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8439 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8440 if ( !pah->s_play ) {
\r
8441 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8449 if ( stream_.mode == UNINITIALIZED )
\r
8450 stream_.mode = mode;
\r
8451 else if ( stream_.mode == mode )
\r
8454 stream_.mode = DUPLEX;
\r
8456 if ( !stream_.callbackInfo.isRunning ) {
\r
8457 stream_.callbackInfo.object = this;
\r
8458 stream_.callbackInfo.isRunning = true;
\r
8459 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8460 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8465 stream_.state = STREAM_STOPPED;
\r
8469 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8470 pthread_cond_destroy( &pah->runnable_cv );
\r
8472 stream_.apiHandle = 0;
\r
8475 for ( int i=0; i<2; i++ ) {
\r
8476 if ( stream_.userBuffer[i] ) {
\r
8477 free( stream_.userBuffer[i] );
\r
8478 stream_.userBuffer[i] = 0;
\r
8482 if ( stream_.deviceBuffer ) {
\r
8483 free( stream_.deviceBuffer );
\r
8484 stream_.deviceBuffer = 0;
\r
8490 //******************** End of __LINUX_PULSE__ *********************//
\r
8493 #if defined(__LINUX_OSS__)
\r
8495 #include <unistd.h>
\r
8496 #include <sys/ioctl.h>
\r
8497 #include <unistd.h>
\r
8498 #include <fcntl.h>
\r
8499 #include <sys/soundcard.h>
\r
8500 #include <errno.h>
\r
8503 static void *ossCallbackHandler(void * ptr);
\r
8505 // A structure to hold various information related to the OSS API
\r
8506 // implementation.
\r
8507 struct OssHandle {
\r
8508 int id[2]; // device ids
\r
8511 pthread_cond_t runnable;
\r
8514 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8517 RtApiOss :: RtApiOss()
\r
8519 // Nothing to do here.
\r
8522 RtApiOss :: ~RtApiOss()
\r
8524 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8527 unsigned int RtApiOss :: getDeviceCount( void )
\r
8529 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8530 if ( mixerfd == -1 ) {
\r
8531 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8532 error( RtAudioError::WARNING );
\r
8536 oss_sysinfo sysinfo;
\r
8537 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8539 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8540 error( RtAudioError::WARNING );
\r
8545 return sysinfo.numaudios;
\r
8548 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8550 RtAudio::DeviceInfo info;
\r
8551 info.probed = false;
\r
8553 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8554 if ( mixerfd == -1 ) {
\r
8555 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8556 error( RtAudioError::WARNING );
\r
8560 oss_sysinfo sysinfo;
\r
8561 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8562 if ( result == -1 ) {
\r
8564 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8565 error( RtAudioError::WARNING );
\r
8569 unsigned nDevices = sysinfo.numaudios;
\r
8570 if ( nDevices == 0 ) {
\r
8572 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8573 error( RtAudioError::INVALID_USE );
\r
8577 if ( device >= nDevices ) {
\r
8579 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8580 error( RtAudioError::INVALID_USE );
\r
8584 oss_audioinfo ainfo;
\r
8585 ainfo.dev = device;
\r
8586 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8588 if ( result == -1 ) {
\r
8589 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8590 errorText_ = errorStream_.str();
\r
8591 error( RtAudioError::WARNING );
\r
8596 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8597 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8598 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8599 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8600 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8603 // Probe data formats ... do for input
\r
8604 unsigned long mask = ainfo.iformats;
\r
8605 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8606 info.nativeFormats |= RTAUDIO_SINT16;
\r
8607 if ( mask & AFMT_S8 )
\r
8608 info.nativeFormats |= RTAUDIO_SINT8;
\r
8609 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8610 info.nativeFormats |= RTAUDIO_SINT32;
\r
8611 if ( mask & AFMT_FLOAT )
\r
8612 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8613 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8614 info.nativeFormats |= RTAUDIO_SINT24;
\r
8616 // Check that we have at least one supported format
\r
8617 if ( info.nativeFormats == 0 ) {
\r
8618 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8619 errorText_ = errorStream_.str();
\r
8620 error( RtAudioError::WARNING );
\r
8624 // Probe the supported sample rates.
\r
8625 info.sampleRates.clear();
\r
8626 if ( ainfo.nrates ) {
\r
8627 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8628 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8629 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8630 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8637 // Check min and max rate values;
\r
8638 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8639 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8640 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8644 if ( info.sampleRates.size() == 0 ) {
\r
8645 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8646 errorText_ = errorStream_.str();
\r
8647 error( RtAudioError::WARNING );
\r
8650 info.probed = true;
\r
8651 info.name = ainfo.name;
\r
8658 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8659 unsigned int firstChannel, unsigned int sampleRate,
\r
8660 RtAudioFormat format, unsigned int *bufferSize,
\r
8661 RtAudio::StreamOptions *options )
\r
8663 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8664 if ( mixerfd == -1 ) {
\r
8665 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8669 oss_sysinfo sysinfo;
\r
8670 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8671 if ( result == -1 ) {
\r
8673 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8677 unsigned nDevices = sysinfo.numaudios;
\r
8678 if ( nDevices == 0 ) {
\r
8679 // This should not happen because a check is made before this function is called.
\r
8681 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8685 if ( device >= nDevices ) {
\r
8686 // This should not happen because a check is made before this function is called.
\r
8688 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8692 oss_audioinfo ainfo;
\r
8693 ainfo.dev = device;
\r
8694 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8696 if ( result == -1 ) {
\r
8697 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8698 errorText_ = errorStream_.str();
\r
8702 // Check if device supports input or output
\r
8703 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8704 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8705 if ( mode == OUTPUT )
\r
8706 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8708 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8709 errorText_ = errorStream_.str();
\r
8714 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8715 if ( mode == OUTPUT )
\r
8716 flags |= O_WRONLY;
\r
8717 else { // mode == INPUT
\r
8718 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8719 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8720 close( handle->id[0] );
\r
8721 handle->id[0] = 0;
\r
8722 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8723 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8724 errorText_ = errorStream_.str();
\r
8727 // Check that the number previously set channels is the same.
\r
8728 if ( stream_.nUserChannels[0] != channels ) {
\r
8729 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8730 errorText_ = errorStream_.str();
\r
8736 flags |= O_RDONLY;
\r
8739 // Set exclusive access if specified.
\r
8740 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8742 // Try to open the device.
\r
8744 fd = open( ainfo.devnode, flags, 0 );
\r
8746 if ( errno == EBUSY )
\r
8747 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8749 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8750 errorText_ = errorStream_.str();
\r
8754 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8756 if ( flags | O_RDWR ) {
\r
8757 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8758 if ( result == -1) {
\r
8759 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8760 errorText_ = errorStream_.str();
\r
8766 // Check the device channel support.
\r
8767 stream_.nUserChannels[mode] = channels;
\r
8768 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8770 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8771 errorText_ = errorStream_.str();
\r
8775 // Set the number of channels.
\r
8776 int deviceChannels = channels + firstChannel;
\r
8777 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8778 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8780 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8781 errorText_ = errorStream_.str();
\r
8784 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8786 // Get the data format mask
\r
8788 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8789 if ( result == -1 ) {
\r
8791 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8792 errorText_ = errorStream_.str();
\r
8796 // Determine how to set the device format.
\r
8797 stream_.userFormat = format;
\r
8798 int deviceFormat = -1;
\r
8799 stream_.doByteSwap[mode] = false;
\r
8800 if ( format == RTAUDIO_SINT8 ) {
\r
8801 if ( mask & AFMT_S8 ) {
\r
8802 deviceFormat = AFMT_S8;
\r
8803 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8806 else if ( format == RTAUDIO_SINT16 ) {
\r
8807 if ( mask & AFMT_S16_NE ) {
\r
8808 deviceFormat = AFMT_S16_NE;
\r
8809 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8811 else if ( mask & AFMT_S16_OE ) {
\r
8812 deviceFormat = AFMT_S16_OE;
\r
8813 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8814 stream_.doByteSwap[mode] = true;
\r
8817 else if ( format == RTAUDIO_SINT24 ) {
\r
8818 if ( mask & AFMT_S24_NE ) {
\r
8819 deviceFormat = AFMT_S24_NE;
\r
8820 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8822 else if ( mask & AFMT_S24_OE ) {
\r
8823 deviceFormat = AFMT_S24_OE;
\r
8824 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8825 stream_.doByteSwap[mode] = true;
\r
8828 else if ( format == RTAUDIO_SINT32 ) {
\r
8829 if ( mask & AFMT_S32_NE ) {
\r
8830 deviceFormat = AFMT_S32_NE;
\r
8831 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8833 else if ( mask & AFMT_S32_OE ) {
\r
8834 deviceFormat = AFMT_S32_OE;
\r
8835 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8836 stream_.doByteSwap[mode] = true;
\r
8840 if ( deviceFormat == -1 ) {
\r
8841 // The user requested format is not natively supported by the device.
\r
8842 if ( mask & AFMT_S16_NE ) {
\r
8843 deviceFormat = AFMT_S16_NE;
\r
8844 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8846 else if ( mask & AFMT_S32_NE ) {
\r
8847 deviceFormat = AFMT_S32_NE;
\r
8848 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8850 else if ( mask & AFMT_S24_NE ) {
\r
8851 deviceFormat = AFMT_S24_NE;
\r
8852 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8854 else if ( mask & AFMT_S16_OE ) {
\r
8855 deviceFormat = AFMT_S16_OE;
\r
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8857 stream_.doByteSwap[mode] = true;
\r
8859 else if ( mask & AFMT_S32_OE ) {
\r
8860 deviceFormat = AFMT_S32_OE;
\r
8861 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8862 stream_.doByteSwap[mode] = true;
\r
8864 else if ( mask & AFMT_S24_OE ) {
\r
8865 deviceFormat = AFMT_S24_OE;
\r
8866 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8867 stream_.doByteSwap[mode] = true;
\r
8869 else if ( mask & AFMT_S8) {
\r
8870 deviceFormat = AFMT_S8;
\r
8871 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8875 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8876 // This really shouldn't happen ...
\r
8878 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8879 errorText_ = errorStream_.str();
\r
8883 // Set the data format.
\r
8884 int temp = deviceFormat;
\r
8885 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8886 if ( result == -1 || deviceFormat != temp ) {
\r
8888 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8889 errorText_ = errorStream_.str();
\r
8893 // Attempt to set the buffer size. According to OSS, the minimum
\r
8894 // number of buffers is two. The supposed minimum buffer size is 16
\r
8895 // bytes, so that will be our lower bound. The argument to this
\r
8896 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8897 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8898 // We'll check the actual value used near the end of the setup
\r
8900 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8901 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8903 if ( options ) buffers = options->numberOfBuffers;
\r
8904 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8905 if ( buffers < 2 ) buffers = 3;
\r
8906 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8907 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8908 if ( result == -1 ) {
\r
8910 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8911 errorText_ = errorStream_.str();
\r
8914 stream_.nBuffers = buffers;
\r
8916 // Save buffer size (in sample frames).
\r
8917 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8918 stream_.bufferSize = *bufferSize;
\r
8920 // Set the sample rate.
\r
8921 int srate = sampleRate;
\r
8922 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8923 if ( result == -1 ) {
\r
8925 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8926 errorText_ = errorStream_.str();
\r
8930 // Verify the sample rate setup worked.
\r
8931 if ( abs( srate - sampleRate ) > 100 ) {
\r
8933 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8934 errorText_ = errorStream_.str();
\r
8937 stream_.sampleRate = sampleRate;
\r
8939 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8940 // We're doing duplex setup here.
\r
8941 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8942 stream_.nDeviceChannels[0] = deviceChannels;
\r
8945 // Set interleaving parameters.
\r
8946 stream_.userInterleaved = true;
\r
8947 stream_.deviceInterleaved[mode] = true;
\r
8948 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8949 stream_.userInterleaved = false;
\r
8951 // Set flags for buffer conversion
\r
8952 stream_.doConvertBuffer[mode] = false;
\r
8953 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8954 stream_.doConvertBuffer[mode] = true;
\r
8955 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8956 stream_.doConvertBuffer[mode] = true;
\r
8957 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8958 stream_.nUserChannels[mode] > 1 )
\r
8959 stream_.doConvertBuffer[mode] = true;
\r
8961 // Allocate the stream handles if necessary and then save.
\r
8962 if ( stream_.apiHandle == 0 ) {
\r
8964 handle = new OssHandle;
\r
8966 catch ( std::bad_alloc& ) {
\r
8967 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8971 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8972 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8976 stream_.apiHandle = (void *) handle;
\r
8979 handle = (OssHandle *) stream_.apiHandle;
\r
8981 handle->id[mode] = fd;
\r
8983 // Allocate necessary internal buffers.
\r
8984 unsigned long bufferBytes;
\r
8985 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8986 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8987 if ( stream_.userBuffer[mode] == NULL ) {
\r
8988 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8992 if ( stream_.doConvertBuffer[mode] ) {
\r
8994 bool makeBuffer = true;
\r
8995 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8996 if ( mode == INPUT ) {
\r
8997 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8998 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8999 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9003 if ( makeBuffer ) {
\r
9004 bufferBytes *= *bufferSize;
\r
9005 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9006 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9007 if ( stream_.deviceBuffer == NULL ) {
\r
9008 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9014 stream_.device[mode] = device;
\r
9015 stream_.state = STREAM_STOPPED;
\r
9017 // Setup the buffer conversion information structure.
\r
9018 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9020 // Setup thread if necessary.
\r
9021 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9022 // We had already set up an output stream.
\r
9023 stream_.mode = DUPLEX;
\r
9024 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9027 stream_.mode = mode;
\r
9029 // Setup callback thread.
\r
9030 stream_.callbackInfo.object = (void *) this;
\r
9032 // Set the thread attributes for joinable and realtime scheduling
\r
9033 // priority. The higher priority will only take affect if the
\r
9034 // program is run as root or suid.
\r
9035 pthread_attr_t attr;
\r
9036 pthread_attr_init( &attr );
\r
9037 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9038 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9039 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9040 struct sched_param param;
\r
9041 int priority = options->priority;
\r
9042 int min = sched_get_priority_min( SCHED_RR );
\r
9043 int max = sched_get_priority_max( SCHED_RR );
\r
9044 if ( priority < min ) priority = min;
\r
9045 else if ( priority > max ) priority = max;
\r
9046 param.sched_priority = priority;
\r
9047 pthread_attr_setschedparam( &attr, ¶m );
\r
9048 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9051 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9053 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9056 stream_.callbackInfo.isRunning = true;
\r
9057 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9058 pthread_attr_destroy( &attr );
\r
9060 stream_.callbackInfo.isRunning = false;
\r
9061 errorText_ = "RtApiOss::error creating callback thread!";
\r
9070 pthread_cond_destroy( &handle->runnable );
\r
9071 if ( handle->id[0] ) close( handle->id[0] );
\r
9072 if ( handle->id[1] ) close( handle->id[1] );
\r
9074 stream_.apiHandle = 0;
\r
9077 for ( int i=0; i<2; i++ ) {
\r
9078 if ( stream_.userBuffer[i] ) {
\r
9079 free( stream_.userBuffer[i] );
\r
9080 stream_.userBuffer[i] = 0;
\r
9084 if ( stream_.deviceBuffer ) {
\r
9085 free( stream_.deviceBuffer );
\r
9086 stream_.deviceBuffer = 0;
\r
9092 void RtApiOss :: closeStream()
\r
9094 if ( stream_.state == STREAM_CLOSED ) {
\r
9095 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9096 error( RtAudioError::WARNING );
\r
9100 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9101 stream_.callbackInfo.isRunning = false;
\r
9102 MUTEX_LOCK( &stream_.mutex );
\r
9103 if ( stream_.state == STREAM_STOPPED )
\r
9104 pthread_cond_signal( &handle->runnable );
\r
9105 MUTEX_UNLOCK( &stream_.mutex );
\r
9106 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9108 if ( stream_.state == STREAM_RUNNING ) {
\r
9109 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9110 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9112 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9113 stream_.state = STREAM_STOPPED;
\r
9117 pthread_cond_destroy( &handle->runnable );
\r
9118 if ( handle->id[0] ) close( handle->id[0] );
\r
9119 if ( handle->id[1] ) close( handle->id[1] );
\r
9121 stream_.apiHandle = 0;
\r
9124 for ( int i=0; i<2; i++ ) {
\r
9125 if ( stream_.userBuffer[i] ) {
\r
9126 free( stream_.userBuffer[i] );
\r
9127 stream_.userBuffer[i] = 0;
\r
9131 if ( stream_.deviceBuffer ) {
\r
9132 free( stream_.deviceBuffer );
\r
9133 stream_.deviceBuffer = 0;
\r
9136 stream_.mode = UNINITIALIZED;
\r
9137 stream_.state = STREAM_CLOSED;
\r
9140 void RtApiOss :: startStream()
\r
9143 if ( stream_.state == STREAM_RUNNING ) {
\r
9144 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9145 error( RtAudioError::WARNING );
\r
9149 MUTEX_LOCK( &stream_.mutex );
\r
9151 stream_.state = STREAM_RUNNING;
\r
9153 // No need to do anything else here ... OSS automatically starts
\r
9154 // when fed samples.
\r
9156 MUTEX_UNLOCK( &stream_.mutex );
\r
9158 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9159 pthread_cond_signal( &handle->runnable );
\r
9162 void RtApiOss :: stopStream()
\r
9165 if ( stream_.state == STREAM_STOPPED ) {
\r
9166 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9167 error( RtAudioError::WARNING );
\r
9171 MUTEX_LOCK( &stream_.mutex );
\r
9173 // The state might change while waiting on a mutex.
\r
9174 if ( stream_.state == STREAM_STOPPED ) {
\r
9175 MUTEX_UNLOCK( &stream_.mutex );
\r
9180 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9181 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9183 // Flush the output with zeros a few times.
\r
9186 RtAudioFormat format;
\r
9188 if ( stream_.doConvertBuffer[0] ) {
\r
9189 buffer = stream_.deviceBuffer;
\r
9190 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9191 format = stream_.deviceFormat[0];
\r
9194 buffer = stream_.userBuffer[0];
\r
9195 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9196 format = stream_.userFormat;
\r
9199 memset( buffer, 0, samples * formatBytes(format) );
\r
9200 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9201 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9202 if ( result == -1 ) {
\r
9203 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9204 error( RtAudioError::WARNING );
\r
9208 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9209 if ( result == -1 ) {
\r
9210 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9211 errorText_ = errorStream_.str();
\r
9214 handle->triggered = false;
\r
9217 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9218 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9219 if ( result == -1 ) {
\r
9220 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9221 errorText_ = errorStream_.str();
\r
9227 stream_.state = STREAM_STOPPED;
\r
9228 MUTEX_UNLOCK( &stream_.mutex );
\r
9230 if ( result != -1 ) return;
\r
9231 error( RtAudioError::SYSTEM_ERROR );
\r
9234 void RtApiOss :: abortStream()
\r
9237 if ( stream_.state == STREAM_STOPPED ) {
\r
9238 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9239 error( RtAudioError::WARNING );
\r
9243 MUTEX_LOCK( &stream_.mutex );
\r
9245 // The state might change while waiting on a mutex.
\r
9246 if ( stream_.state == STREAM_STOPPED ) {
\r
9247 MUTEX_UNLOCK( &stream_.mutex );
\r
9252 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9253 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9254 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9255 if ( result == -1 ) {
\r
9256 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9257 errorText_ = errorStream_.str();
\r
9260 handle->triggered = false;
\r
9263 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9264 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9265 if ( result == -1 ) {
\r
9266 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9267 errorText_ = errorStream_.str();
\r
9273 stream_.state = STREAM_STOPPED;
\r
9274 MUTEX_UNLOCK( &stream_.mutex );
\r
9276 if ( result != -1 ) return;
\r
9277 error( RtAudioError::SYSTEM_ERROR );
\r
9280 void RtApiOss :: callbackEvent()
\r
9282 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9283 if ( stream_.state == STREAM_STOPPED ) {
\r
9284 MUTEX_LOCK( &stream_.mutex );
\r
9285 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9286 if ( stream_.state != STREAM_RUNNING ) {
\r
9287 MUTEX_UNLOCK( &stream_.mutex );
\r
9290 MUTEX_UNLOCK( &stream_.mutex );
\r
9293 if ( stream_.state == STREAM_CLOSED ) {
\r
9294 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9295 error( RtAudioError::WARNING );
\r
9299 // Invoke user callback to get fresh output data.
\r
9300 int doStopStream = 0;
\r
9301 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9302 double streamTime = getStreamTime();
\r
9303 RtAudioStreamStatus status = 0;
\r
9304 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9305 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9306 handle->xrun[0] = false;
\r
9308 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9309 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9310 handle->xrun[1] = false;
\r
9312 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9313 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9314 if ( doStopStream == 2 ) {
\r
9315 this->abortStream();
\r
9319 MUTEX_LOCK( &stream_.mutex );
\r
9321 // The state might change while waiting on a mutex.
\r
9322 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9327 RtAudioFormat format;
\r
9329 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9331 // Setup parameters and do buffer conversion if necessary.
\r
9332 if ( stream_.doConvertBuffer[0] ) {
\r
9333 buffer = stream_.deviceBuffer;
\r
9334 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9335 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9336 format = stream_.deviceFormat[0];
\r
9339 buffer = stream_.userBuffer[0];
\r
9340 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9341 format = stream_.userFormat;
\r
9344 // Do byte swapping if necessary.
\r
9345 if ( stream_.doByteSwap[0] )
\r
9346 byteSwapBuffer( buffer, samples, format );
\r
9348 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9350 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9351 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9352 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9353 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9354 handle->triggered = true;
\r
9357 // Write samples to device.
\r
9358 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9360 if ( result == -1 ) {
\r
9361 // We'll assume this is an underrun, though there isn't a
\r
9362 // specific means for determining that.
\r
9363 handle->xrun[0] = true;
\r
9364 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9365 error( RtAudioError::WARNING );
\r
9366 // Continue on to input section.
\r
9370 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9372 // Setup parameters.
\r
9373 if ( stream_.doConvertBuffer[1] ) {
\r
9374 buffer = stream_.deviceBuffer;
\r
9375 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9376 format = stream_.deviceFormat[1];
\r
9379 buffer = stream_.userBuffer[1];
\r
9380 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9381 format = stream_.userFormat;
\r
9384 // Read samples from device.
\r
9385 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9387 if ( result == -1 ) {
\r
9388 // We'll assume this is an overrun, though there isn't a
\r
9389 // specific means for determining that.
\r
9390 handle->xrun[1] = true;
\r
9391 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9392 error( RtAudioError::WARNING );
\r
9396 // Do byte swapping if necessary.
\r
9397 if ( stream_.doByteSwap[1] )
\r
9398 byteSwapBuffer( buffer, samples, format );
\r
9400 // Do buffer conversion if necessary.
\r
9401 if ( stream_.doConvertBuffer[1] )
\r
9402 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9406 MUTEX_UNLOCK( &stream_.mutex );
\r
9408 RtApi::tickStreamTime();
\r
9409 if ( doStopStream == 1 ) this->stopStream();
\r
9412 static void *ossCallbackHandler( void *ptr )
\r
9414 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9415 RtApiOss *object = (RtApiOss *) info->object;
\r
9416 bool *isRunning = &info->isRunning;
\r
9418 while ( *isRunning == true ) {
\r
9419 pthread_testcancel();
\r
9420 object->callbackEvent();
\r
9423 pthread_exit( NULL );
\r
9426 //******************** End of __LINUX_OSS__ *********************//
\r
9430 // *************************************************** //
\r
9432 // Protected common (OS-independent) RtAudio methods.
\r
9434 // *************************************************** //
\r
9436 // This method can be modified to control the behavior of error
\r
9437 // message printing.
\r
9438 void RtApi :: error( RtAudioError::Type type )
\r
9440 errorStream_.str(""); // clear the ostringstream
\r
9442 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9443 if ( errorCallback ) {
\r
9444 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9446 if ( firstErrorOccurred_ )
\r
9449 firstErrorOccurred_ = true;
\r
9450 const std::string errorMessage = errorText_;
\r
9452 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9453 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9457 errorCallback( type, errorMessage );
\r
9458 firstErrorOccurred_ = false;
\r
9462 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9463 std::cerr << '\n' << errorText_ << "\n\n";
\r
9464 else if ( type != RtAudioError::WARNING )
\r
9465 throw( RtAudioError( errorText_, type ) );
\r
9468 void RtApi :: verifyStream()
\r
9470 if ( stream_.state == STREAM_CLOSED ) {
\r
9471 errorText_ = "RtApi:: a stream is not open!";
\r
9472 error( RtAudioError::INVALID_USE );
\r
9476 void RtApi :: clearStreamInfo()
\r
9478 stream_.mode = UNINITIALIZED;
\r
9479 stream_.state = STREAM_CLOSED;
\r
9480 stream_.sampleRate = 0;
\r
9481 stream_.bufferSize = 0;
\r
9482 stream_.nBuffers = 0;
\r
9483 stream_.userFormat = 0;
\r
9484 stream_.userInterleaved = true;
\r
9485 stream_.streamTime = 0.0;
\r
9486 stream_.apiHandle = 0;
\r
9487 stream_.deviceBuffer = 0;
\r
9488 stream_.callbackInfo.callback = 0;
\r
9489 stream_.callbackInfo.userData = 0;
\r
9490 stream_.callbackInfo.isRunning = false;
\r
9491 stream_.callbackInfo.errorCallback = 0;
\r
9492 for ( int i=0; i<2; i++ ) {
\r
9493 stream_.device[i] = 11111;
\r
9494 stream_.doConvertBuffer[i] = false;
\r
9495 stream_.deviceInterleaved[i] = true;
\r
9496 stream_.doByteSwap[i] = false;
\r
9497 stream_.nUserChannels[i] = 0;
\r
9498 stream_.nDeviceChannels[i] = 0;
\r
9499 stream_.channelOffset[i] = 0;
\r
9500 stream_.deviceFormat[i] = 0;
\r
9501 stream_.latency[i] = 0;
\r
9502 stream_.userBuffer[i] = 0;
\r
9503 stream_.convertInfo[i].channels = 0;
\r
9504 stream_.convertInfo[i].inJump = 0;
\r
9505 stream_.convertInfo[i].outJump = 0;
\r
9506 stream_.convertInfo[i].inFormat = 0;
\r
9507 stream_.convertInfo[i].outFormat = 0;
\r
9508 stream_.convertInfo[i].inOffset.clear();
\r
9509 stream_.convertInfo[i].outOffset.clear();
\r
9513 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9515 if ( format == RTAUDIO_SINT16 )
\r
9517 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9519 else if ( format == RTAUDIO_FLOAT64 )
\r
9521 else if ( format == RTAUDIO_SINT24 )
\r
9523 else if ( format == RTAUDIO_SINT8 )
\r
9526 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9527 error( RtAudioError::WARNING );
\r
9532 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9534 if ( mode == INPUT ) { // convert device to user buffer
\r
9535 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9536 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9537 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9538 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9540 else { // convert user to device buffer
\r
9541 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9542 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9543 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9544 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9547 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9548 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9550 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9552 // Set up the interleave/deinterleave offsets.
\r
9553 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9554 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9555 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9556 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9557 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9558 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9559 stream_.convertInfo[mode].inJump = 1;
\r
9563 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9564 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9565 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9566 stream_.convertInfo[mode].outJump = 1;
\r
9570 else { // no (de)interleaving
\r
9571 if ( stream_.userInterleaved ) {
\r
9572 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9573 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9574 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9578 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9579 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9580 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9581 stream_.convertInfo[mode].inJump = 1;
\r
9582 stream_.convertInfo[mode].outJump = 1;
\r
9587 // Add channel offset.
\r
9588 if ( firstChannel > 0 ) {
\r
9589 if ( stream_.deviceInterleaved[mode] ) {
\r
9590 if ( mode == OUTPUT ) {
\r
9591 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9592 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9595 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9596 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9600 if ( mode == OUTPUT ) {
\r
9601 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9602 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9605 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9606 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9612 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9614 // This function does format conversion, input/output channel compensation, and
\r
9615 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9616 // the lower three bytes of a 32-bit integer.
\r
9618 // Clear our device buffer when in/out duplex device channels are different
\r
9619 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9620 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9621 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9624 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9626 Float64 *out = (Float64 *)outBuffer;
\r
9628 if (info.inFormat == RTAUDIO_SINT8) {
\r
9629 signed char *in = (signed char *)inBuffer;
\r
9630 scale = 1.0 / 127.5;
\r
9631 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9632 for (j=0; j<info.channels; j++) {
\r
9633 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9634 out[info.outOffset[j]] += 0.5;
\r
9635 out[info.outOffset[j]] *= scale;
\r
9637 in += info.inJump;
\r
9638 out += info.outJump;
\r
9641 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9642 Int16 *in = (Int16 *)inBuffer;
\r
9643 scale = 1.0 / 32767.5;
\r
9644 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9645 for (j=0; j<info.channels; j++) {
\r
9646 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9647 out[info.outOffset[j]] += 0.5;
\r
9648 out[info.outOffset[j]] *= scale;
\r
9650 in += info.inJump;
\r
9651 out += info.outJump;
\r
9654 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9655 Int24 *in = (Int24 *)inBuffer;
\r
9656 scale = 1.0 / 8388607.5;
\r
9657 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9658 for (j=0; j<info.channels; j++) {
\r
9659 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9660 out[info.outOffset[j]] += 0.5;
\r
9661 out[info.outOffset[j]] *= scale;
\r
9663 in += info.inJump;
\r
9664 out += info.outJump;
\r
9667 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9668 Int32 *in = (Int32 *)inBuffer;
\r
9669 scale = 1.0 / 2147483647.5;
\r
9670 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9671 for (j=0; j<info.channels; j++) {
\r
9672 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9673 out[info.outOffset[j]] += 0.5;
\r
9674 out[info.outOffset[j]] *= scale;
\r
9676 in += info.inJump;
\r
9677 out += info.outJump;
\r
9680 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9681 Float32 *in = (Float32 *)inBuffer;
\r
9682 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9683 for (j=0; j<info.channels; j++) {
\r
9684 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9686 in += info.inJump;
\r
9687 out += info.outJump;
\r
9690 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9691 // Channel compensation and/or (de)interleaving only.
\r
9692 Float64 *in = (Float64 *)inBuffer;
\r
9693 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9694 for (j=0; j<info.channels; j++) {
\r
9695 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9697 in += info.inJump;
\r
9698 out += info.outJump;
\r
9702 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9704 Float32 *out = (Float32 *)outBuffer;
\r
9706 if (info.inFormat == RTAUDIO_SINT8) {
\r
9707 signed char *in = (signed char *)inBuffer;
\r
9708 scale = (Float32) ( 1.0 / 127.5 );
\r
9709 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9710 for (j=0; j<info.channels; j++) {
\r
9711 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9712 out[info.outOffset[j]] += 0.5;
\r
9713 out[info.outOffset[j]] *= scale;
\r
9715 in += info.inJump;
\r
9716 out += info.outJump;
\r
9719 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9720 Int16 *in = (Int16 *)inBuffer;
\r
9721 scale = (Float32) ( 1.0 / 32767.5 );
\r
9722 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9723 for (j=0; j<info.channels; j++) {
\r
9724 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9725 out[info.outOffset[j]] += 0.5;
\r
9726 out[info.outOffset[j]] *= scale;
\r
9728 in += info.inJump;
\r
9729 out += info.outJump;
\r
9732 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9733 Int24 *in = (Int24 *)inBuffer;
\r
9734 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9735 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9736 for (j=0; j<info.channels; j++) {
\r
9737 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9738 out[info.outOffset[j]] += 0.5;
\r
9739 out[info.outOffset[j]] *= scale;
\r
9741 in += info.inJump;
\r
9742 out += info.outJump;
\r
9745 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9746 Int32 *in = (Int32 *)inBuffer;
\r
9747 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9748 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9749 for (j=0; j<info.channels; j++) {
\r
9750 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9751 out[info.outOffset[j]] += 0.5;
\r
9752 out[info.outOffset[j]] *= scale;
\r
9754 in += info.inJump;
\r
9755 out += info.outJump;
\r
9758 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9759 // Channel compensation and/or (de)interleaving only.
\r
9760 Float32 *in = (Float32 *)inBuffer;
\r
9761 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9762 for (j=0; j<info.channels; j++) {
\r
9763 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9765 in += info.inJump;
\r
9766 out += info.outJump;
\r
9769 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9770 Float64 *in = (Float64 *)inBuffer;
\r
9771 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9772 for (j=0; j<info.channels; j++) {
\r
9773 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9775 in += info.inJump;
\r
9776 out += info.outJump;
\r
9780 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9781 Int32 *out = (Int32 *)outBuffer;
\r
9782 if (info.inFormat == RTAUDIO_SINT8) {
\r
9783 signed char *in = (signed char *)inBuffer;
\r
9784 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9785 for (j=0; j<info.channels; j++) {
\r
9786 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9787 out[info.outOffset[j]] <<= 24;
\r
9789 in += info.inJump;
\r
9790 out += info.outJump;
\r
9793 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9794 Int16 *in = (Int16 *)inBuffer;
\r
9795 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9796 for (j=0; j<info.channels; j++) {
\r
9797 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9798 out[info.outOffset[j]] <<= 16;
\r
9800 in += info.inJump;
\r
9801 out += info.outJump;
\r
9804 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9805 Int24 *in = (Int24 *)inBuffer;
\r
9806 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9807 for (j=0; j<info.channels; j++) {
\r
9808 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9809 out[info.outOffset[j]] <<= 8;
\r
9811 in += info.inJump;
\r
9812 out += info.outJump;
\r
9815 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9816 // Channel compensation and/or (de)interleaving only.
\r
9817 Int32 *in = (Int32 *)inBuffer;
\r
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9819 for (j=0; j<info.channels; j++) {
\r
9820 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9822 in += info.inJump;
\r
9823 out += info.outJump;
\r
9826 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9827 Float32 *in = (Float32 *)inBuffer;
\r
9828 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9829 for (j=0; j<info.channels; j++) {
\r
9830 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9832 in += info.inJump;
\r
9833 out += info.outJump;
\r
9836 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9837 Float64 *in = (Float64 *)inBuffer;
\r
9838 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9839 for (j=0; j<info.channels; j++) {
\r
9840 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9842 in += info.inJump;
\r
9843 out += info.outJump;
\r
9847 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9848 Int24 *out = (Int24 *)outBuffer;
\r
9849 if (info.inFormat == RTAUDIO_SINT8) {
\r
9850 signed char *in = (signed char *)inBuffer;
\r
9851 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9852 for (j=0; j<info.channels; j++) {
\r
9853 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9854 //out[info.outOffset[j]] <<= 16;
\r
9856 in += info.inJump;
\r
9857 out += info.outJump;
\r
9860 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9861 Int16 *in = (Int16 *)inBuffer;
\r
9862 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9863 for (j=0; j<info.channels; j++) {
\r
9864 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9865 //out[info.outOffset[j]] <<= 8;
\r
9867 in += info.inJump;
\r
9868 out += info.outJump;
\r
9871 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9872 // Channel compensation and/or (de)interleaving only.
\r
9873 Int24 *in = (Int24 *)inBuffer;
\r
9874 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9875 for (j=0; j<info.channels; j++) {
\r
9876 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9878 in += info.inJump;
\r
9879 out += info.outJump;
\r
9882 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9883 Int32 *in = (Int32 *)inBuffer;
\r
9884 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9885 for (j=0; j<info.channels; j++) {
\r
9886 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9887 //out[info.outOffset[j]] >>= 8;
\r
9889 in += info.inJump;
\r
9890 out += info.outJump;
\r
9893 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9894 Float32 *in = (Float32 *)inBuffer;
\r
9895 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9896 for (j=0; j<info.channels; j++) {
\r
9897 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9899 in += info.inJump;
\r
9900 out += info.outJump;
\r
9903 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9904 Float64 *in = (Float64 *)inBuffer;
\r
9905 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9906 for (j=0; j<info.channels; j++) {
\r
9907 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9909 in += info.inJump;
\r
9910 out += info.outJump;
\r
9914 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9915 Int16 *out = (Int16 *)outBuffer;
\r
9916 if (info.inFormat == RTAUDIO_SINT8) {
\r
9917 signed char *in = (signed char *)inBuffer;
\r
9918 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9919 for (j=0; j<info.channels; j++) {
\r
9920 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9921 out[info.outOffset[j]] <<= 8;
\r
9923 in += info.inJump;
\r
9924 out += info.outJump;
\r
9927 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9928 // Channel compensation and/or (de)interleaving only.
\r
9929 Int16 *in = (Int16 *)inBuffer;
\r
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9931 for (j=0; j<info.channels; j++) {
\r
9932 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9934 in += info.inJump;
\r
9935 out += info.outJump;
\r
9938 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9939 Int24 *in = (Int24 *)inBuffer;
\r
9940 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9941 for (j=0; j<info.channels; j++) {
\r
9942 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9944 in += info.inJump;
\r
9945 out += info.outJump;
\r
9948 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9949 Int32 *in = (Int32 *)inBuffer;
\r
9950 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9951 for (j=0; j<info.channels; j++) {
\r
9952 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9954 in += info.inJump;
\r
9955 out += info.outJump;
\r
9958 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9959 Float32 *in = (Float32 *)inBuffer;
\r
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9961 for (j=0; j<info.channels; j++) {
\r
9962 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9964 in += info.inJump;
\r
9965 out += info.outJump;
\r
9968 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9969 Float64 *in = (Float64 *)inBuffer;
\r
9970 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9971 for (j=0; j<info.channels; j++) {
\r
9972 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9974 in += info.inJump;
\r
9975 out += info.outJump;
\r
9979 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9980 signed char *out = (signed char *)outBuffer;
\r
9981 if (info.inFormat == RTAUDIO_SINT8) {
\r
9982 // Channel compensation and/or (de)interleaving only.
\r
9983 signed char *in = (signed char *)inBuffer;
\r
9984 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9985 for (j=0; j<info.channels; j++) {
\r
9986 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9988 in += info.inJump;
\r
9989 out += info.outJump;
\r
9992 if (info.inFormat == RTAUDIO_SINT16) {
\r
9993 Int16 *in = (Int16 *)inBuffer;
\r
9994 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9995 for (j=0; j<info.channels; j++) {
\r
9996 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9998 in += info.inJump;
\r
9999 out += info.outJump;
\r
10002 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10003 Int24 *in = (Int24 *)inBuffer;
\r
10004 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10005 for (j=0; j<info.channels; j++) {
\r
10006 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10008 in += info.inJump;
\r
10009 out += info.outJump;
\r
10012 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10013 Int32 *in = (Int32 *)inBuffer;
\r
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10015 for (j=0; j<info.channels; j++) {
\r
10016 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10018 in += info.inJump;
\r
10019 out += info.outJump;
\r
10022 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10023 Float32 *in = (Float32 *)inBuffer;
\r
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10025 for (j=0; j<info.channels; j++) {
\r
10026 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10028 in += info.inJump;
\r
10029 out += info.outJump;
\r
10032 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10033 Float64 *in = (Float64 *)inBuffer;
\r
10034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10035 for (j=0; j<info.channels; j++) {
\r
10036 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10038 in += info.inJump;
\r
10039 out += info.outJump;
\r
10045 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10046 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10047 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10049 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10051 register char val;
\r
10052 register char *ptr;
\r
10055 if ( format == RTAUDIO_SINT16 ) {
\r
10056 for ( unsigned int i=0; i<samples; i++ ) {
\r
10057 // Swap 1st and 2nd bytes.
\r
10059 *(ptr) = *(ptr+1);
\r
10062 // Increment 2 bytes.
\r
10066 else if ( format == RTAUDIO_SINT32 ||
\r
10067 format == RTAUDIO_FLOAT32 ) {
\r
10068 for ( unsigned int i=0; i<samples; i++ ) {
\r
10069 // Swap 1st and 4th bytes.
\r
10071 *(ptr) = *(ptr+3);
\r
10074 // Swap 2nd and 3rd bytes.
\r
10077 *(ptr) = *(ptr+1);
\r
10080 // Increment 3 more bytes.
\r
10084 else if ( format == RTAUDIO_SINT24 ) {
\r
10085 for ( unsigned int i=0; i<samples; i++ ) {
\r
10086 // Swap 1st and 3rd bytes.
\r
10088 *(ptr) = *(ptr+2);
\r
10091 // Increment 2 more bytes.
\r
10095 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10096 for ( unsigned int i=0; i<samples; i++ ) {
\r
10097 // Swap 1st and 8th bytes
\r
10099 *(ptr) = *(ptr+7);
\r
10102 // Swap 2nd and 7th bytes
\r
10105 *(ptr) = *(ptr+5);
\r
10108 // Swap 3rd and 6th bytes
\r
10111 *(ptr) = *(ptr+3);
\r
10114 // Swap 4th and 5th bytes
\r
10117 *(ptr) = *(ptr+1);
\r
10120 // Increment 5 more bytes.
\r
10126 // Indentation settings for Vim and Emacs
\r
10128 // Local Variables:
\r
10129 // c-basic-offset: 2
\r
10130 // indent-tabs-mode: nil
\r
10133 // vim: et sts=2 sw=2
\r