1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
64 static std::string convertCharPointerToStdString(const char *text)
\r
66 return std::string(text);
\r
69 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
71 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
72 std::string s( length-1, '\0' );
\r
73 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
77 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
79 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
80 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
81 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
82 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
84 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
85 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
88 // *************************************************** //
\r
90 // RtAudio definitions.
\r
92 // *************************************************** //
\r
94 std::string RtAudio :: getVersion( void ) throw()
\r
96 return RTAUDIO_VERSION;
\r
99 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
103 // The order here will control the order of RtAudio's API search in
\r
104 // the constructor.
\r
105 #if defined(__UNIX_JACK__)
\r
106 apis.push_back( UNIX_JACK );
\r
108 #if defined(__LINUX_ALSA__)
\r
109 apis.push_back( LINUX_ALSA );
\r
111 #if defined(__LINUX_PULSE__)
\r
112 apis.push_back( LINUX_PULSE );
\r
114 #if defined(__LINUX_OSS__)
\r
115 apis.push_back( LINUX_OSS );
\r
117 #if defined(__WINDOWS_ASIO__)
\r
118 apis.push_back( WINDOWS_ASIO );
\r
120 #if defined(__WINDOWS_WASAPI__)
\r
121 apis.push_back( WINDOWS_WASAPI );
\r
123 #if defined(__WINDOWS_DS__)
\r
124 apis.push_back( WINDOWS_DS );
\r
126 #if defined(__MACOSX_CORE__)
\r
127 apis.push_back( MACOSX_CORE );
\r
129 #if defined(__RTAUDIO_DUMMY__)
\r
130 apis.push_back( RTAUDIO_DUMMY );
\r
134 void RtAudio :: openRtApi( RtAudio::Api api )
\r
140 #if defined(__UNIX_JACK__)
\r
141 if ( api == UNIX_JACK )
\r
142 rtapi_ = new RtApiJack();
\r
144 #if defined(__LINUX_ALSA__)
\r
145 if ( api == LINUX_ALSA )
\r
146 rtapi_ = new RtApiAlsa();
\r
148 #if defined(__LINUX_PULSE__)
\r
149 if ( api == LINUX_PULSE )
\r
150 rtapi_ = new RtApiPulse();
\r
152 #if defined(__LINUX_OSS__)
\r
153 if ( api == LINUX_OSS )
\r
154 rtapi_ = new RtApiOss();
\r
156 #if defined(__WINDOWS_ASIO__)
\r
157 if ( api == WINDOWS_ASIO )
\r
158 rtapi_ = new RtApiAsio();
\r
160 #if defined(__WINDOWS_WASAPI__)
\r
161 if ( api == WINDOWS_WASAPI )
\r
162 rtapi_ = new RtApiWasapi();
\r
164 #if defined(__WINDOWS_DS__)
\r
165 if ( api == WINDOWS_DS )
\r
166 rtapi_ = new RtApiDs();
\r
168 #if defined(__MACOSX_CORE__)
\r
169 if ( api == MACOSX_CORE )
\r
170 rtapi_ = new RtApiCore();
\r
172 #if defined(__RTAUDIO_DUMMY__)
\r
173 if ( api == RTAUDIO_DUMMY )
\r
174 rtapi_ = new RtApiDummy();
\r
178 RtAudio :: RtAudio( RtAudio::Api api )
\r
182 if ( api != UNSPECIFIED ) {
\r
183 // Attempt to open the specified API.
\r
185 if ( rtapi_ ) return;
\r
187 // No compiled support for specified API value. Issue a debug
\r
188 // warning and continue as if no API was specified.
\r
189 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
192 // Iterate through the compiled APIs and return as soon as we find
\r
193 // one with at least one device or we reach the end of the list.
\r
194 std::vector< RtAudio::Api > apis;
\r
195 getCompiledApi( apis );
\r
196 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
197 openRtApi( apis[i] );
\r
198 if ( rtapi_->getDeviceCount() ) break;
\r
201 if ( rtapi_ ) return;
\r
203 // It should not be possible to get here because the preprocessor
\r
204 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
205 // API-specific definitions are passed to the compiler. But just in
\r
206 // case something weird happens, we'll thow an error.
\r
207 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
208 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
211 RtAudio :: ~RtAudio() throw()
\r
217 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
218 RtAudio::StreamParameters *inputParameters,
\r
219 RtAudioFormat format, unsigned int sampleRate,
\r
220 unsigned int *bufferFrames,
\r
221 RtAudioCallback callback, void *userData,
\r
222 RtAudio::StreamOptions *options,
\r
223 RtAudioErrorCallback errorCallback )
\r
225 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
226 sampleRate, bufferFrames, callback,
\r
227 userData, options, errorCallback );
\r
230 // *************************************************** //
\r
232 // Public RtApi definitions (see end of file for
\r
233 // private or protected utility functions).
\r
235 // *************************************************** //
\r
239 stream_.state = STREAM_CLOSED;
\r
240 stream_.mode = UNINITIALIZED;
\r
241 stream_.apiHandle = 0;
\r
242 stream_.userBuffer[0] = 0;
\r
243 stream_.userBuffer[1] = 0;
\r
244 MUTEX_INITIALIZE( &stream_.mutex );
\r
245 showWarnings_ = true;
\r
246 firstErrorOccurred_ = false;
\r
251 MUTEX_DESTROY( &stream_.mutex );
\r
254 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
255 RtAudio::StreamParameters *iParams,
\r
256 RtAudioFormat format, unsigned int sampleRate,
\r
257 unsigned int *bufferFrames,
\r
258 RtAudioCallback callback, void *userData,
\r
259 RtAudio::StreamOptions *options,
\r
260 RtAudioErrorCallback errorCallback )
\r
262 if ( stream_.state != STREAM_CLOSED ) {
\r
263 errorText_ = "RtApi::openStream: a stream is already open!";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 // Clear stream information potentially left from a previously open stream.
\r
271 if ( oParams && oParams->nChannels < 1 ) {
\r
272 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
273 error( RtAudioError::INVALID_USE );
\r
277 if ( iParams && iParams->nChannels < 1 ) {
\r
278 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
279 error( RtAudioError::INVALID_USE );
\r
283 if ( oParams == NULL && iParams == NULL ) {
\r
284 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
285 error( RtAudioError::INVALID_USE );
\r
289 if ( formatBytes(format) == 0 ) {
\r
290 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
291 error( RtAudioError::INVALID_USE );
\r
295 unsigned int nDevices = getDeviceCount();
\r
296 unsigned int oChannels = 0;
\r
298 oChannels = oParams->nChannels;
\r
299 if ( oParams->deviceId >= nDevices ) {
\r
300 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
301 error( RtAudioError::INVALID_USE );
\r
306 unsigned int iChannels = 0;
\r
308 iChannels = iParams->nChannels;
\r
309 if ( iParams->deviceId >= nDevices ) {
\r
310 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
311 error( RtAudioError::INVALID_USE );
\r
318 if ( oChannels > 0 ) {
\r
320 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
321 sampleRate, format, bufferFrames, options );
\r
322 if ( result == false ) {
\r
323 error( RtAudioError::SYSTEM_ERROR );
\r
328 if ( iChannels > 0 ) {
\r
330 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
331 sampleRate, format, bufferFrames, options );
\r
332 if ( result == false ) {
\r
333 if ( oChannels > 0 ) closeStream();
\r
334 error( RtAudioError::SYSTEM_ERROR );
\r
339 stream_.callbackInfo.callback = (void *) callback;
\r
340 stream_.callbackInfo.userData = userData;
\r
341 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
343 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
344 stream_.state = STREAM_STOPPED;
\r
347 unsigned int RtApi :: getDefaultInputDevice( void )
\r
349 // Should be implemented in subclasses if possible.
\r
353 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
355 // Should be implemented in subclasses if possible.
\r
359 void RtApi :: closeStream( void )
\r
361 // MUST be implemented in subclasses!
\r
365 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
366 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
367 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
368 RtAudio::StreamOptions * /*options*/ )
\r
370 // MUST be implemented in subclasses!
\r
374 void RtApi :: tickStreamTime( void )
\r
376 // Subclasses that do not provide their own implementation of
\r
377 // getStreamTime should call this function once per buffer I/O to
\r
378 // provide basic stream time support.
\r
380 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
382 #if defined( HAVE_GETTIMEOFDAY )
\r
383 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
387 long RtApi :: getStreamLatency( void )
\r
391 long totalLatency = 0;
\r
392 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
393 totalLatency = stream_.latency[0];
\r
394 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
395 totalLatency += stream_.latency[1];
\r
397 return totalLatency;
\r
400 double RtApi :: getStreamTime( void )
\r
404 #if defined( HAVE_GETTIMEOFDAY )
\r
405 // Return a very accurate estimate of the stream time by
\r
406 // adding in the elapsed time since the last tick.
\r
407 struct timeval then;
\r
408 struct timeval now;
\r
410 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
411 return stream_.streamTime;
\r
413 gettimeofday( &now, NULL );
\r
414 then = stream_.lastTickTimestamp;
\r
415 return stream_.streamTime +
\r
416 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
417 (then.tv_sec + 0.000001 * then.tv_usec));
\r
419 return stream_.streamTime;
\r
423 void RtApi :: setStreamTime( double time )
\r
428 stream_.streamTime = time;
\r
431 unsigned int RtApi :: getStreamSampleRate( void )
\r
435 return stream_.sampleRate;
\r
439 // *************************************************** //
\r
441 // OS/API-specific methods.
\r
443 // *************************************************** //
\r
445 #if defined(__MACOSX_CORE__)
\r
447 // The OS X CoreAudio API is designed to use a separate callback
\r
448 // procedure for each of its audio devices. A single RtAudio duplex
\r
449 // stream using two different devices is supported here, though it
\r
450 // cannot be guaranteed to always behave correctly because we cannot
\r
451 // synchronize these two callbacks.
\r
453 // A property listener is installed for over/underrun information.
\r
454 // However, no functionality is currently provided to allow property
\r
455 // listeners to trigger user handlers because it is unclear what could
\r
456 // be done if a critical stream parameter (buffer size, sample rate,
\r
457 // device disconnect) notification arrived. The listeners entail
\r
458 // quite a bit of extra code and most likely, a user program wouldn't
\r
459 // be prepared for the result anyway. However, we do provide a flag
\r
460 // to the client callback function to inform of an over/underrun.
\r
462 // A structure to hold various information related to the CoreAudio API
\r
464 struct CoreHandle {
\r
465 AudioDeviceID id[2]; // device ids
\r
466 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
467 AudioDeviceIOProcID procId[2];
\r
469 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
470 UInt32 nStreams[2]; // number of streams to use
\r
472 char *deviceBuffer;
\r
473 pthread_cond_t condition;
\r
474 int drainCounter; // Tracks callback counts when draining
\r
475 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
478 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
481 RtApiCore:: RtApiCore()
\r
483 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
484 // This is a largely undocumented but absolutely necessary
\r
485 // requirement starting with OS-X 10.6. If not called, queries and
\r
486 // updates to various audio device properties are not handled
\r
488 CFRunLoopRef theRunLoop = NULL;
\r
489 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
490 kAudioObjectPropertyScopeGlobal,
\r
491 kAudioObjectPropertyElementMaster };
\r
492 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
493 if ( result != noErr ) {
\r
494 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
495 error( RtAudioError::WARNING );
\r
500 RtApiCore :: ~RtApiCore()
\r
502 // The subclass destructor gets called before the base class
\r
503 // destructor, so close an existing stream before deallocating
\r
504 // apiDeviceId memory.
\r
505 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
508 unsigned int RtApiCore :: getDeviceCount( void )
\r
510 // Find out how many audio devices there are, if any.
\r
512 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
513 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
514 if ( result != noErr ) {
\r
515 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
516 error( RtAudioError::WARNING );
\r
520 return dataSize / sizeof( AudioDeviceID );
\r
523 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
525 unsigned int nDevices = getDeviceCount();
\r
526 if ( nDevices <= 1 ) return 0;
\r
529 UInt32 dataSize = sizeof( AudioDeviceID );
\r
530 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
531 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
532 if ( result != noErr ) {
\r
533 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
534 error( RtAudioError::WARNING );
\r
538 dataSize *= nDevices;
\r
539 AudioDeviceID deviceList[ nDevices ];
\r
540 property.mSelector = kAudioHardwarePropertyDevices;
\r
541 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
542 if ( result != noErr ) {
\r
543 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
544 error( RtAudioError::WARNING );
\r
548 for ( unsigned int i=0; i<nDevices; i++ )
\r
549 if ( id == deviceList[i] ) return i;
\r
551 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
552 error( RtAudioError::WARNING );
\r
556 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
558 unsigned int nDevices = getDeviceCount();
\r
559 if ( nDevices <= 1 ) return 0;
\r
562 UInt32 dataSize = sizeof( AudioDeviceID );
\r
563 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
564 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
565 if ( result != noErr ) {
\r
566 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
567 error( RtAudioError::WARNING );
\r
571 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
572 AudioDeviceID deviceList[ nDevices ];
\r
573 property.mSelector = kAudioHardwarePropertyDevices;
\r
574 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
575 if ( result != noErr ) {
\r
576 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
577 error( RtAudioError::WARNING );
\r
581 for ( unsigned int i=0; i<nDevices; i++ )
\r
582 if ( id == deviceList[i] ) return i;
\r
584 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
585 error( RtAudioError::WARNING );
\r
589 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
591 RtAudio::DeviceInfo info;
\r
592 info.probed = false;
\r
595 unsigned int nDevices = getDeviceCount();
\r
596 if ( nDevices == 0 ) {
\r
597 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
598 error( RtAudioError::INVALID_USE );
\r
602 if ( device >= nDevices ) {
\r
603 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
604 error( RtAudioError::INVALID_USE );
\r
608 AudioDeviceID deviceList[ nDevices ];
\r
609 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
610 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
611 kAudioObjectPropertyScopeGlobal,
\r
612 kAudioObjectPropertyElementMaster };
\r
613 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
614 0, NULL, &dataSize, (void *) &deviceList );
\r
615 if ( result != noErr ) {
\r
616 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
617 error( RtAudioError::WARNING );
\r
621 AudioDeviceID id = deviceList[ device ];
\r
623 // Get the device name.
\r
625 CFStringRef cfname;
\r
626 dataSize = sizeof( CFStringRef );
\r
627 property.mSelector = kAudioObjectPropertyManufacturer;
\r
628 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
629 if ( result != noErr ) {
\r
630 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
631 errorText_ = errorStream_.str();
\r
632 error( RtAudioError::WARNING );
\r
636 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
637 int length = CFStringGetLength(cfname);
\r
638 char *mname = (char *)malloc(length * 3 + 1);
\r
639 #if defined( UNICODE ) || defined( _UNICODE )
\r
640 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
642 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
644 info.name.append( (const char *)mname, strlen(mname) );
\r
645 info.name.append( ": " );
\r
646 CFRelease( cfname );
\r
649 property.mSelector = kAudioObjectPropertyName;
\r
650 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
651 if ( result != noErr ) {
\r
652 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
653 errorText_ = errorStream_.str();
\r
654 error( RtAudioError::WARNING );
\r
658 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
659 length = CFStringGetLength(cfname);
\r
660 char *name = (char *)malloc(length * 3 + 1);
\r
661 #if defined( UNICODE ) || defined( _UNICODE )
\r
662 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
664 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
666 info.name.append( (const char *)name, strlen(name) );
\r
667 CFRelease( cfname );
\r
670 // Get the output stream "configuration".
\r
671 AudioBufferList *bufferList = nil;
\r
672 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
673 property.mScope = kAudioDevicePropertyScopeOutput;
\r
674 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
676 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
677 if ( result != noErr || dataSize == 0 ) {
\r
678 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
679 errorText_ = errorStream_.str();
\r
680 error( RtAudioError::WARNING );
\r
684 // Allocate the AudioBufferList.
\r
685 bufferList = (AudioBufferList *) malloc( dataSize );
\r
686 if ( bufferList == NULL ) {
\r
687 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
688 error( RtAudioError::WARNING );
\r
692 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
693 if ( result != noErr || dataSize == 0 ) {
\r
694 free( bufferList );
\r
695 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
696 errorText_ = errorStream_.str();
\r
697 error( RtAudioError::WARNING );
\r
701 // Get output channel information.
\r
702 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
703 for ( i=0; i<nStreams; i++ )
\r
704 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
705 free( bufferList );
\r
707 // Get the input stream "configuration".
\r
708 property.mScope = kAudioDevicePropertyScopeInput;
\r
709 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
710 if ( result != noErr || dataSize == 0 ) {
\r
711 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
712 errorText_ = errorStream_.str();
\r
713 error( RtAudioError::WARNING );
\r
717 // Allocate the AudioBufferList.
\r
718 bufferList = (AudioBufferList *) malloc( dataSize );
\r
719 if ( bufferList == NULL ) {
\r
720 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
721 error( RtAudioError::WARNING );
\r
725 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
726 if (result != noErr || dataSize == 0) {
\r
727 free( bufferList );
\r
728 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
729 errorText_ = errorStream_.str();
\r
730 error( RtAudioError::WARNING );
\r
734 // Get input channel information.
\r
735 nStreams = bufferList->mNumberBuffers;
\r
736 for ( i=0; i<nStreams; i++ )
\r
737 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
738 free( bufferList );
\r
740 // If device opens for both playback and capture, we determine the channels.
\r
741 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
742 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
744 // Probe the device sample rates.
\r
745 bool isInput = false;
\r
746 if ( info.outputChannels == 0 ) isInput = true;
\r
748 // Determine the supported sample rates.
\r
749 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
750 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
751 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
752 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
753 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
754 errorText_ = errorStream_.str();
\r
755 error( RtAudioError::WARNING );
\r
759 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
760 AudioValueRange rangeList[ nRanges ];
\r
761 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
762 if ( result != kAudioHardwareNoError ) {
\r
763 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
764 errorText_ = errorStream_.str();
\r
765 error( RtAudioError::WARNING );
\r
769 // The sample rate reporting mechanism is a bit of a mystery. It
\r
770 // seems that it can either return individual rates or a range of
\r
771 // rates. I assume that if the min / max range values are the same,
\r
772 // then that represents a single supported rate and if the min / max
\r
773 // range values are different, the device supports an arbitrary
\r
774 // range of values (though there might be multiple ranges, so we'll
\r
775 // use the most conservative range).
\r
776 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
777 bool haveValueRange = false;
\r
778 info.sampleRates.clear();
\r
779 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
780 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
781 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
782 info.sampleRates.push_back( tmpSr );
\r
784 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
785 info.preferredSampleRate = tmpSr;
\r
788 haveValueRange = true;
\r
789 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
790 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
794 if ( haveValueRange ) {
\r
795 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
796 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
797 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
799 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
800 info.preferredSampleRate = SAMPLE_RATES[k];
\r
805 // Sort and remove any redundant values
\r
806 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
807 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
809 if ( info.sampleRates.size() == 0 ) {
\r
810 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
811 errorText_ = errorStream_.str();
\r
812 error( RtAudioError::WARNING );
\r
816 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
817 // Thus, any other "physical" formats supported by the device are of
\r
818 // no interest to the client.
\r
819 info.nativeFormats = RTAUDIO_FLOAT32;
\r
821 if ( info.outputChannels > 0 )
\r
822 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
823 if ( info.inputChannels > 0 )
\r
824 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
826 info.probed = true;
\r
830 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
831 const AudioTimeStamp* /*inNow*/,
\r
832 const AudioBufferList* inInputData,
\r
833 const AudioTimeStamp* /*inInputTime*/,
\r
834 AudioBufferList* outOutputData,
\r
835 const AudioTimeStamp* /*inOutputTime*/,
\r
836 void* infoPointer )
\r
838 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
840 RtApiCore *object = (RtApiCore *) info->object;
\r
841 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
842 return kAudioHardwareUnspecifiedError;
\r
844 return kAudioHardwareNoError;
\r
847 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
849 const AudioObjectPropertyAddress properties[],
\r
850 void* handlePointer )
\r
852 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
853 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
854 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
855 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
856 handle->xrun[1] = true;
\r
858 handle->xrun[0] = true;
\r
862 return kAudioHardwareNoError;
\r
865 static OSStatus rateListener( AudioObjectID inDevice,
\r
866 UInt32 /*nAddresses*/,
\r
867 const AudioObjectPropertyAddress /*properties*/[],
\r
868 void* ratePointer )
\r
870 Float64 *rate = (Float64 *) ratePointer;
\r
871 UInt32 dataSize = sizeof( Float64 );
\r
872 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
873 kAudioObjectPropertyScopeGlobal,
\r
874 kAudioObjectPropertyElementMaster };
\r
875 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
876 return kAudioHardwareNoError;
\r
879 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
880 unsigned int firstChannel, unsigned int sampleRate,
\r
881 RtAudioFormat format, unsigned int *bufferSize,
\r
882 RtAudio::StreamOptions *options )
\r
885 unsigned int nDevices = getDeviceCount();
\r
886 if ( nDevices == 0 ) {
\r
887 // This should not happen because a check is made before this function is called.
\r
888 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
892 if ( device >= nDevices ) {
\r
893 // This should not happen because a check is made before this function is called.
\r
894 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
898 AudioDeviceID deviceList[ nDevices ];
\r
899 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
900 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
901 kAudioObjectPropertyScopeGlobal,
\r
902 kAudioObjectPropertyElementMaster };
\r
903 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
904 0, NULL, &dataSize, (void *) &deviceList );
\r
905 if ( result != noErr ) {
\r
906 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
910 AudioDeviceID id = deviceList[ device ];
\r
912 // Setup for stream mode.
\r
913 bool isInput = false;
\r
914 if ( mode == INPUT ) {
\r
916 property.mScope = kAudioDevicePropertyScopeInput;
\r
919 property.mScope = kAudioDevicePropertyScopeOutput;
\r
921 // Get the stream "configuration".
\r
922 AudioBufferList *bufferList = nil;
\r
924 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
925 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
926 if ( result != noErr || dataSize == 0 ) {
\r
927 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
928 errorText_ = errorStream_.str();
\r
932 // Allocate the AudioBufferList.
\r
933 bufferList = (AudioBufferList *) malloc( dataSize );
\r
934 if ( bufferList == NULL ) {
\r
935 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
939 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
940 if (result != noErr || dataSize == 0) {
\r
941 free( bufferList );
\r
942 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
943 errorText_ = errorStream_.str();
\r
947 // Search for one or more streams that contain the desired number of
\r
948 // channels. CoreAudio devices can have an arbitrary number of
\r
949 // streams and each stream can have an arbitrary number of channels.
\r
950 // For each stream, a single buffer of interleaved samples is
\r
951 // provided. RtAudio prefers the use of one stream of interleaved
\r
952 // data or multiple consecutive single-channel streams. However, we
\r
953 // now support multiple consecutive multi-channel streams of
\r
954 // interleaved data as well.
\r
955 UInt32 iStream, offsetCounter = firstChannel;
\r
956 UInt32 nStreams = bufferList->mNumberBuffers;
\r
957 bool monoMode = false;
\r
958 bool foundStream = false;
\r
960 // First check that the device supports the requested number of
\r
962 UInt32 deviceChannels = 0;
\r
963 for ( iStream=0; iStream<nStreams; iStream++ )
\r
964 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
966 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
967 free( bufferList );
\r
968 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
969 errorText_ = errorStream_.str();
\r
973 // Look for a single stream meeting our needs.
\r
974 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
975 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
976 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
977 if ( streamChannels >= channels + offsetCounter ) {
\r
978 firstStream = iStream;
\r
979 channelOffset = offsetCounter;
\r
980 foundStream = true;
\r
983 if ( streamChannels > offsetCounter ) break;
\r
984 offsetCounter -= streamChannels;
\r
987 // If we didn't find a single stream above, then we should be able
\r
988 // to meet the channel specification with multiple streams.
\r
989 if ( foundStream == false ) {
\r
991 offsetCounter = firstChannel;
\r
992 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
993 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
994 if ( streamChannels > offsetCounter ) break;
\r
995 offsetCounter -= streamChannels;
\r
998 firstStream = iStream;
\r
999 channelOffset = offsetCounter;
\r
1000 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1002 if ( streamChannels > 1 ) monoMode = false;
\r
1003 while ( channelCounter > 0 ) {
\r
1004 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1005 if ( streamChannels > 1 ) monoMode = false;
\r
1006 channelCounter -= streamChannels;
\r
1011 free( bufferList );
\r
1013 // Determine the buffer size.
\r
1014 AudioValueRange bufferRange;
\r
1015 dataSize = sizeof( AudioValueRange );
\r
1016 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1017 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1019 if ( result != noErr ) {
\r
1020 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1021 errorText_ = errorStream_.str();
\r
1025 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1026 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1027 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1029 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1030 // need to make this setting for the master channel.
\r
1031 UInt32 theSize = (UInt32) *bufferSize;
\r
1032 dataSize = sizeof( UInt32 );
\r
1033 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1034 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1038 errorText_ = errorStream_.str();
\r
1042 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1043 // MUST be the same in both directions!
\r
1044 *bufferSize = theSize;
\r
1045 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1046 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1047 errorText_ = errorStream_.str();
\r
1051 stream_.bufferSize = *bufferSize;
\r
1052 stream_.nBuffers = 1;
\r
1054 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1055 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1057 dataSize = sizeof( hog_pid );
\r
1058 property.mSelector = kAudioDevicePropertyHogMode;
\r
1059 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1060 if ( result != noErr ) {
\r
1061 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1062 errorText_ = errorStream_.str();
\r
1066 if ( hog_pid != getpid() ) {
\r
1067 hog_pid = getpid();
\r
1068 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1069 if ( result != noErr ) {
\r
1070 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1071 errorText_ = errorStream_.str();
\r
1077 // Check and if necessary, change the sample rate for the device.
\r
1078 Float64 nominalRate;
\r
1079 dataSize = sizeof( Float64 );
\r
1080 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1081 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1082 if ( result != noErr ) {
\r
1083 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1084 errorText_ = errorStream_.str();
\r
1088 // Only change the sample rate if off by more than 1 Hz.
\r
1089 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1091 // Set a property listener for the sample rate change
\r
1092 Float64 reportedRate = 0.0;
\r
1093 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1094 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1095 if ( result != noErr ) {
\r
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1097 errorText_ = errorStream_.str();
\r
1101 nominalRate = (Float64) sampleRate;
\r
1102 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1103 if ( result != noErr ) {
\r
1104 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1105 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1106 errorText_ = errorStream_.str();
\r
1110 // Now wait until the reported nominal rate is what we just set.
\r
1111 UInt32 microCounter = 0;
\r
1112 while ( reportedRate != nominalRate ) {
\r
1113 microCounter += 5000;
\r
1114 if ( microCounter > 5000000 ) break;
\r
1118 // Remove the property listener.
\r
1119 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1121 if ( microCounter > 5000000 ) {
\r
1122 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1123 errorText_ = errorStream_.str();
\r
1128 // Now set the stream format for all streams. Also, check the
\r
1129 // physical format of the device and change that if necessary.
\r
1130 AudioStreamBasicDescription description;
\r
1131 dataSize = sizeof( AudioStreamBasicDescription );
\r
1132 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1133 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1134 if ( result != noErr ) {
\r
1135 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1136 errorText_ = errorStream_.str();
\r
1140 // Set the sample rate and data format id. However, only make the
\r
1141 // change if the sample rate is not within 1.0 of the desired
\r
1142 // rate and the format is not linear pcm.
\r
1143 bool updateFormat = false;
\r
1144 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1145 description.mSampleRate = (Float64) sampleRate;
\r
1146 updateFormat = true;
\r
1149 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1150 description.mFormatID = kAudioFormatLinearPCM;
\r
1151 updateFormat = true;
\r
1154 if ( updateFormat ) {
\r
1155 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1156 if ( result != noErr ) {
\r
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1158 errorText_ = errorStream_.str();
\r
1163 // Now check the physical format.
\r
1164 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1165 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1166 if ( result != noErr ) {
\r
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1168 errorText_ = errorStream_.str();
\r
1172 //std::cout << "Current physical stream format:" << std::endl;
\r
1173 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1174 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1175 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1176 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1178 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1179 description.mFormatID = kAudioFormatLinearPCM;
\r
1180 //description.mSampleRate = (Float64) sampleRate;
\r
1181 AudioStreamBasicDescription testDescription = description;
\r
1182 UInt32 formatFlags;
\r
1184 // We'll try higher bit rates first and then work our way down.
\r
1185 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1186 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1187 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1188 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1189 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1191 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1193 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1195 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1196 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1199 bool setPhysicalFormat = false;
\r
1200 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1201 testDescription = description;
\r
1202 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1203 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1204 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1205 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1207 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1209 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1210 if ( result == noErr ) {
\r
1211 setPhysicalFormat = true;
\r
1212 //std::cout << "Updated physical stream format:" << std::endl;
\r
1213 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1214 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1215 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1216 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1221 if ( !setPhysicalFormat ) {
\r
1222 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1223 errorText_ = errorStream_.str();
\r
1226 } // done setting virtual/physical formats.
\r
1228 // Get the stream / device latency.
\r
1230 dataSize = sizeof( UInt32 );
\r
1231 property.mSelector = kAudioDevicePropertyLatency;
\r
1232 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1233 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1234 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1236 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1237 errorText_ = errorStream_.str();
\r
1238 error( RtAudioError::WARNING );
\r
1242 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1243 // always be presented in native-endian format, so we should never
\r
1244 // need to byte swap.
\r
1245 stream_.doByteSwap[mode] = false;
\r
1247 // From the CoreAudio documentation, PCM data must be supplied as
\r
1249 stream_.userFormat = format;
\r
1250 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1252 if ( streamCount == 1 )
\r
1253 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1254 else // multiple streams
\r
1255 stream_.nDeviceChannels[mode] = channels;
\r
1256 stream_.nUserChannels[mode] = channels;
\r
1257 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1258 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1259 else stream_.userInterleaved = true;
\r
1260 stream_.deviceInterleaved[mode] = true;
\r
1261 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1263 // Set flags for buffer conversion.
\r
1264 stream_.doConvertBuffer[mode] = false;
\r
1265 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1266 stream_.doConvertBuffer[mode] = true;
\r
1267 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1268 stream_.doConvertBuffer[mode] = true;
\r
1269 if ( streamCount == 1 ) {
\r
1270 if ( stream_.nUserChannels[mode] > 1 &&
\r
1271 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1272 stream_.doConvertBuffer[mode] = true;
\r
1274 else if ( monoMode && stream_.userInterleaved )
\r
1275 stream_.doConvertBuffer[mode] = true;
\r
1277 // Allocate our CoreHandle structure for the stream.
\r
1278 CoreHandle *handle = 0;
\r
1279 if ( stream_.apiHandle == 0 ) {
\r
1281 handle = new CoreHandle;
\r
1283 catch ( std::bad_alloc& ) {
\r
1284 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1288 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1289 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1292 stream_.apiHandle = (void *) handle;
\r
1295 handle = (CoreHandle *) stream_.apiHandle;
\r
1296 handle->iStream[mode] = firstStream;
\r
1297 handle->nStreams[mode] = streamCount;
\r
1298 handle->id[mode] = id;
\r
1300 // Allocate necessary internal buffers.
\r
1301 unsigned long bufferBytes;
\r
1302 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1303 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1304 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1305 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1306 if ( stream_.userBuffer[mode] == NULL ) {
\r
1307 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1311 // If possible, we will make use of the CoreAudio stream buffers as
\r
1312 // "device buffers". However, we can't do this if using multiple
\r
1314 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1316 bool makeBuffer = true;
\r
1317 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1318 if ( mode == INPUT ) {
\r
1319 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1320 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1321 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1325 if ( makeBuffer ) {
\r
1326 bufferBytes *= *bufferSize;
\r
1327 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1328 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1329 if ( stream_.deviceBuffer == NULL ) {
\r
1330 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1336 stream_.sampleRate = sampleRate;
\r
1337 stream_.device[mode] = device;
\r
1338 stream_.state = STREAM_STOPPED;
\r
1339 stream_.callbackInfo.object = (void *) this;
\r
1341 // Setup the buffer conversion information structure.
\r
1342 if ( stream_.doConvertBuffer[mode] ) {
\r
1343 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1344 else setConvertInfo( mode, channelOffset );
\r
1347 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1348 // Only one callback procedure per device.
\r
1349 stream_.mode = DUPLEX;
\r
1351 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1352 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1354 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1355 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1357 if ( result != noErr ) {
\r
1358 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1359 errorText_ = errorStream_.str();
\r
1362 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1363 stream_.mode = DUPLEX;
\r
1365 stream_.mode = mode;
\r
1368 // Setup the device property listener for over/underload.
\r
1369 property.mSelector = kAudioDeviceProcessorOverload;
\r
1370 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1371 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1377 pthread_cond_destroy( &handle->condition );
\r
1379 stream_.apiHandle = 0;
\r
1382 for ( int i=0; i<2; i++ ) {
\r
1383 if ( stream_.userBuffer[i] ) {
\r
1384 free( stream_.userBuffer[i] );
\r
1385 stream_.userBuffer[i] = 0;
\r
1389 if ( stream_.deviceBuffer ) {
\r
1390 free( stream_.deviceBuffer );
\r
1391 stream_.deviceBuffer = 0;
\r
1394 stream_.state = STREAM_CLOSED;
\r
1398 void RtApiCore :: closeStream( void )
\r
1400 if ( stream_.state == STREAM_CLOSED ) {
\r
1401 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1402 error( RtAudioError::WARNING );
\r
1406 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1407 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1408 if ( stream_.state == STREAM_RUNNING )
\r
1409 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1410 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1411 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1413 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1414 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1418 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1419 if ( stream_.state == STREAM_RUNNING )
\r
1420 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1421 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1422 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1424 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1425 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1429 for ( int i=0; i<2; i++ ) {
\r
1430 if ( stream_.userBuffer[i] ) {
\r
1431 free( stream_.userBuffer[i] );
\r
1432 stream_.userBuffer[i] = 0;
\r
1436 if ( stream_.deviceBuffer ) {
\r
1437 free( stream_.deviceBuffer );
\r
1438 stream_.deviceBuffer = 0;
\r
1441 // Destroy pthread condition variable.
\r
1442 pthread_cond_destroy( &handle->condition );
\r
1444 stream_.apiHandle = 0;
\r
1446 stream_.mode = UNINITIALIZED;
\r
1447 stream_.state = STREAM_CLOSED;
\r
1450 void RtApiCore :: startStream( void )
\r
1453 if ( stream_.state == STREAM_RUNNING ) {
\r
1454 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1455 error( RtAudioError::WARNING );
\r
1459 OSStatus result = noErr;
\r
1460 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1461 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1463 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1464 if ( result != noErr ) {
\r
1465 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1466 errorText_ = errorStream_.str();
\r
1471 if ( stream_.mode == INPUT ||
\r
1472 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1474 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1475 if ( result != noErr ) {
\r
1476 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1477 errorText_ = errorStream_.str();
\r
1482 handle->drainCounter = 0;
\r
1483 handle->internalDrain = false;
\r
1484 stream_.state = STREAM_RUNNING;
\r
1487 if ( result == noErr ) return;
\r
1488 error( RtAudioError::SYSTEM_ERROR );
\r
1491 void RtApiCore :: stopStream( void )
\r
1494 if ( stream_.state == STREAM_STOPPED ) {
\r
1495 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1496 error( RtAudioError::WARNING );
\r
1500 OSStatus result = noErr;
\r
1501 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1502 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1504 if ( handle->drainCounter == 0 ) {
\r
1505 handle->drainCounter = 2;
\r
1506 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1509 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1510 if ( result != noErr ) {
\r
1511 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1512 errorText_ = errorStream_.str();
\r
1517 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1519 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1520 if ( result != noErr ) {
\r
1521 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1522 errorText_ = errorStream_.str();
\r
1527 stream_.state = STREAM_STOPPED;
\r
1530 if ( result == noErr ) return;
\r
1531 error( RtAudioError::SYSTEM_ERROR );
\r
1534 void RtApiCore :: abortStream( void )
\r
1537 if ( stream_.state == STREAM_STOPPED ) {
\r
1538 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1539 error( RtAudioError::WARNING );
\r
1543 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 handle->drainCounter = 2;
\r
1549 // This function will be called by a spawned thread when the user
\r
1550 // callback function signals that the stream should be stopped or
\r
1551 // aborted. It is better to handle it this way because the
\r
1552 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1553 // function is called.
\r
1554 static void *coreStopStream( void *ptr )
\r
1556 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1557 RtApiCore *object = (RtApiCore *) info->object;
\r
1559 object->stopStream();
\r
1560 pthread_exit( NULL );
\r
1563 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1564 const AudioBufferList *inBufferList,
\r
1565 const AudioBufferList *outBufferList )
\r
1567 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1568 if ( stream_.state == STREAM_CLOSED ) {
\r
1569 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1570 error( RtAudioError::WARNING );
\r
1574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1575 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1577 // Check if we were draining the stream and signal is finished.
\r
1578 if ( handle->drainCounter > 3 ) {
\r
1579 ThreadHandle threadId;
\r
1581 stream_.state = STREAM_STOPPING;
\r
1582 if ( handle->internalDrain == true )
\r
1583 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1584 else // external call to stopStream()
\r
1585 pthread_cond_signal( &handle->condition );
\r
1589 AudioDeviceID outputDevice = handle->id[0];
\r
1591 // Invoke user callback to get fresh output data UNLESS we are
\r
1592 // draining stream or duplex mode AND the input/output devices are
\r
1593 // different AND this function is called for the input device.
\r
1594 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1595 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1596 double streamTime = getStreamTime();
\r
1597 RtAudioStreamStatus status = 0;
\r
1598 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1599 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1600 handle->xrun[0] = false;
\r
1602 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1603 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1604 handle->xrun[1] = false;
\r
1607 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1608 stream_.bufferSize, streamTime, status, info->userData );
\r
1609 if ( cbReturnValue == 2 ) {
\r
1610 stream_.state = STREAM_STOPPING;
\r
1611 handle->drainCounter = 2;
\r
1615 else if ( cbReturnValue == 1 ) {
\r
1616 handle->drainCounter = 1;
\r
1617 handle->internalDrain = true;
\r
1621 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1625 if ( handle->nStreams[0] == 1 ) {
\r
1626 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1628 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1630 else { // fill multiple streams with zeros
\r
1631 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1632 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1634 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1638 else if ( handle->nStreams[0] == 1 ) {
\r
1639 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1640 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1641 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1643 else { // copy from user buffer
\r
1644 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1645 stream_.userBuffer[0],
\r
1646 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1649 else { // fill multiple streams
\r
1650 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1651 if ( stream_.doConvertBuffer[0] ) {
\r
1652 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1653 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1656 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1657 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1658 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1659 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1660 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1663 else { // fill multiple multi-channel streams with interleaved data
\r
1664 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1665 Float32 *out, *in;
\r
1667 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1668 UInt32 inChannels = stream_.nUserChannels[0];
\r
1669 if ( stream_.doConvertBuffer[0] ) {
\r
1670 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1671 inChannels = stream_.nDeviceChannels[0];
\r
1674 if ( inInterleaved ) inOffset = 1;
\r
1675 else inOffset = stream_.bufferSize;
\r
1677 channelsLeft = inChannels;
\r
1678 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1680 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1681 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1684 // Account for possible channel offset in first stream
\r
1685 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1686 streamChannels -= stream_.channelOffset[0];
\r
1687 outJump = stream_.channelOffset[0];
\r
1691 // Account for possible unfilled channels at end of the last stream
\r
1692 if ( streamChannels > channelsLeft ) {
\r
1693 outJump = streamChannels - channelsLeft;
\r
1694 streamChannels = channelsLeft;
\r
1697 // Determine input buffer offsets and skips
\r
1698 if ( inInterleaved ) {
\r
1699 inJump = inChannels;
\r
1700 in += inChannels - channelsLeft;
\r
1704 in += (inChannels - channelsLeft) * inOffset;
\r
1707 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1708 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1709 *out++ = in[j*inOffset];
\r
1714 channelsLeft -= streamChannels;
\r
1720 // Don't bother draining input
\r
1721 if ( handle->drainCounter ) {
\r
1722 handle->drainCounter++;
\r
1726 AudioDeviceID inputDevice;
\r
1727 inputDevice = handle->id[1];
\r
1728 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1730 if ( handle->nStreams[1] == 1 ) {
\r
1731 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1732 convertBuffer( stream_.userBuffer[1],
\r
1733 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1734 stream_.convertInfo[1] );
\r
1736 else { // copy to user buffer
\r
1737 memcpy( stream_.userBuffer[1],
\r
1738 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1739 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1742 else { // read from multiple streams
\r
1743 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1744 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1746 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1747 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1748 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1749 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1750 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1753 else { // read from multiple multi-channel streams
\r
1754 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1755 Float32 *out, *in;
\r
1757 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1758 UInt32 outChannels = stream_.nUserChannels[1];
\r
1759 if ( stream_.doConvertBuffer[1] ) {
\r
1760 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1761 outChannels = stream_.nDeviceChannels[1];
\r
1764 if ( outInterleaved ) outOffset = 1;
\r
1765 else outOffset = stream_.bufferSize;
\r
1767 channelsLeft = outChannels;
\r
1768 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1770 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1771 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1774 // Account for possible channel offset in first stream
\r
1775 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1776 streamChannels -= stream_.channelOffset[1];
\r
1777 inJump = stream_.channelOffset[1];
\r
1781 // Account for possible unread channels at end of the last stream
\r
1782 if ( streamChannels > channelsLeft ) {
\r
1783 inJump = streamChannels - channelsLeft;
\r
1784 streamChannels = channelsLeft;
\r
1787 // Determine output buffer offsets and skips
\r
1788 if ( outInterleaved ) {
\r
1789 outJump = outChannels;
\r
1790 out += outChannels - channelsLeft;
\r
1794 out += (outChannels - channelsLeft) * outOffset;
\r
1797 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1798 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1799 out[j*outOffset] = *in++;
\r
1804 channelsLeft -= streamChannels;
\r
1808 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1809 convertBuffer( stream_.userBuffer[1],
\r
1810 stream_.deviceBuffer,
\r
1811 stream_.convertInfo[1] );
\r
1817 //MUTEX_UNLOCK( &stream_.mutex );
\r
1819 RtApi::tickStreamTime();
\r
1823 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1827 case kAudioHardwareNotRunningError:
\r
1828 return "kAudioHardwareNotRunningError";
\r
1830 case kAudioHardwareUnspecifiedError:
\r
1831 return "kAudioHardwareUnspecifiedError";
\r
1833 case kAudioHardwareUnknownPropertyError:
\r
1834 return "kAudioHardwareUnknownPropertyError";
\r
1836 case kAudioHardwareBadPropertySizeError:
\r
1837 return "kAudioHardwareBadPropertySizeError";
\r
1839 case kAudioHardwareIllegalOperationError:
\r
1840 return "kAudioHardwareIllegalOperationError";
\r
1842 case kAudioHardwareBadObjectError:
\r
1843 return "kAudioHardwareBadObjectError";
\r
1845 case kAudioHardwareBadDeviceError:
\r
1846 return "kAudioHardwareBadDeviceError";
\r
1848 case kAudioHardwareBadStreamError:
\r
1849 return "kAudioHardwareBadStreamError";
\r
1851 case kAudioHardwareUnsupportedOperationError:
\r
1852 return "kAudioHardwareUnsupportedOperationError";
\r
1854 case kAudioDeviceUnsupportedFormatError:
\r
1855 return "kAudioDeviceUnsupportedFormatError";
\r
1857 case kAudioDevicePermissionsError:
\r
1858 return "kAudioDevicePermissionsError";
\r
1861 return "CoreAudio unknown error";
\r
1865 //******************** End of __MACOSX_CORE__ *********************//
\r
1868 #if defined(__UNIX_JACK__)
\r
1870 // JACK is a low-latency audio server, originally written for the
\r
1871 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1872 // connect a number of different applications to an audio device, as
\r
1873 // well as allowing them to share audio between themselves.
\r
1875 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1876 // have ports connected to the server. The JACK server is typically
\r
1877 // started in a terminal as follows:
\r
1879 // .jackd -d alsa -d hw:0
\r
1881 // or through an interface program such as qjackctl. Many of the
\r
1882 // parameters normally set for a stream are fixed by the JACK server
\r
1883 // and can be specified when the JACK server is started. In
\r
1886 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1888 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1889 // frames, and number of buffers = 4. Once the server is running, it
\r
1890 // is not possible to override these values. If the values are not
\r
1891 // specified in the command-line, the JACK server uses default values.
\r
1893 // The JACK server does not have to be running when an instance of
\r
1894 // RtApiJack is created, though the function getDeviceCount() will
\r
1895 // report 0 devices found until JACK has been started. When no
\r
1896 // devices are available (i.e., the JACK server is not running), a
\r
1897 // stream cannot be opened.
\r
1899 #include <jack/jack.h>
\r
1900 #include <unistd.h>
\r
1903 // A structure to hold various information related to the Jack API
\r
1904 // implementation.
\r
1905 struct JackHandle {
\r
1906 jack_client_t *client;
\r
1907 jack_port_t **ports[2];
\r
1908 std::string deviceName[2];
\r
1910 pthread_cond_t condition;
\r
1911 int drainCounter; // Tracks callback counts when draining
\r
1912 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1915 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1918 static void jackSilentError( const char * ) {};
\r
1920 RtApiJack :: RtApiJack()
\r
1922 // Nothing to do here.
\r
1923 #if !defined(__RTAUDIO_DEBUG__)
\r
1924 // Turn off Jack's internal error reporting.
\r
1925 jack_set_error_function( &jackSilentError );
\r
1929 RtApiJack :: ~RtApiJack()
\r
1931 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1934 unsigned int RtApiJack :: getDeviceCount( void )
\r
1936 // See if we can become a jack client.
\r
1937 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1938 jack_status_t *status = NULL;
\r
1939 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1940 if ( client == 0 ) return 0;
\r
1942 const char **ports;
\r
1943 std::string port, previousPort;
\r
1944 unsigned int nChannels = 0, nDevices = 0;
\r
1945 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1947 // Parse the port names up to the first colon (:).
\r
1948 size_t iColon = 0;
\r
1950 port = (char *) ports[ nChannels ];
\r
1951 iColon = port.find(":");
\r
1952 if ( iColon != std::string::npos ) {
\r
1953 port = port.substr( 0, iColon + 1 );
\r
1954 if ( port != previousPort ) {
\r
1956 previousPort = port;
\r
1959 } while ( ports[++nChannels] );
\r
1963 jack_client_close( client );
\r
1967 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1969 RtAudio::DeviceInfo info;
\r
1970 info.probed = false;
\r
1972 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1973 jack_status_t *status = NULL;
\r
1974 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1975 if ( client == 0 ) {
\r
1976 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1977 error( RtAudioError::WARNING );
\r
1981 const char **ports;
\r
1982 std::string port, previousPort;
\r
1983 unsigned int nPorts = 0, nDevices = 0;
\r
1984 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1986 // Parse the port names up to the first colon (:).
\r
1987 size_t iColon = 0;
\r
1989 port = (char *) ports[ nPorts ];
\r
1990 iColon = port.find(":");
\r
1991 if ( iColon != std::string::npos ) {
\r
1992 port = port.substr( 0, iColon );
\r
1993 if ( port != previousPort ) {
\r
1994 if ( nDevices == device ) info.name = port;
\r
1996 previousPort = port;
\r
1999 } while ( ports[++nPorts] );
\r
2003 if ( device >= nDevices ) {
\r
2004 jack_client_close( client );
\r
2005 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2006 error( RtAudioError::INVALID_USE );
\r
2010 // Get the current jack server sample rate.
\r
2011 info.sampleRates.clear();
\r
2013 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2014 info.sampleRates.push_back( info.preferredSampleRate );
\r
2016 // Count the available ports containing the client name as device
\r
2017 // channels. Jack "input ports" equal RtAudio output channels.
\r
2018 unsigned int nChannels = 0;
\r
2019 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2021 while ( ports[ nChannels ] ) nChannels++;
\r
2023 info.outputChannels = nChannels;
\r
2026 // Jack "output ports" equal RtAudio input channels.
\r
2028 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2030 while ( ports[ nChannels ] ) nChannels++;
\r
2032 info.inputChannels = nChannels;
\r
2035 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2036 jack_client_close(client);
\r
2037 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2038 error( RtAudioError::WARNING );
\r
2042 // If device opens for both playback and capture, we determine the channels.
\r
2043 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2044 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2046 // Jack always uses 32-bit floats.
\r
2047 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2049 // Jack doesn't provide default devices so we'll use the first available one.
\r
2050 if ( device == 0 && info.outputChannels > 0 )
\r
2051 info.isDefaultOutput = true;
\r
2052 if ( device == 0 && info.inputChannels > 0 )
\r
2053 info.isDefaultInput = true;
\r
2055 jack_client_close(client);
\r
2056 info.probed = true;
\r
2060 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2062 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2064 RtApiJack *object = (RtApiJack *) info->object;
\r
2065 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2070 // This function will be called by a spawned thread when the Jack
\r
2071 // server signals that it is shutting down. It is necessary to handle
\r
2072 // it this way because the jackShutdown() function must return before
\r
2073 // the jack_deactivate() function (in closeStream()) will return.
\r
2074 static void *jackCloseStream( void *ptr )
\r
2076 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2077 RtApiJack *object = (RtApiJack *) info->object;
\r
2079 object->closeStream();
\r
2081 pthread_exit( NULL );
\r
2083 static void jackShutdown( void *infoPointer )
\r
2085 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2086 RtApiJack *object = (RtApiJack *) info->object;
\r
2088 // Check current stream state. If stopped, then we'll assume this
\r
2089 // was called as a result of a call to RtApiJack::stopStream (the
\r
2090 // deactivation of a client handle causes this function to be called).
\r
2091 // If not, we'll assume the Jack server is shutting down or some
\r
2092 // other problem occurred and we should close the stream.
\r
2093 if ( object->isStreamRunning() == false ) return;
\r
2095 ThreadHandle threadId;
\r
2096 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2097 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2100 static int jackXrun( void *infoPointer )
\r
2102 JackHandle *handle = (JackHandle *) infoPointer;
\r
2104 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2105 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2110 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2111 unsigned int firstChannel, unsigned int sampleRate,
\r
2112 RtAudioFormat format, unsigned int *bufferSize,
\r
2113 RtAudio::StreamOptions *options )
\r
2115 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2117 // Look for jack server and try to become a client (only do once per stream).
\r
2118 jack_client_t *client = 0;
\r
2119 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2120 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2121 jack_status_t *status = NULL;
\r
2122 if ( options && !options->streamName.empty() )
\r
2123 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2125 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2126 if ( client == 0 ) {
\r
2127 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2128 error( RtAudioError::WARNING );
\r
2133 // The handle must have been created on an earlier pass.
\r
2134 client = handle->client;
\r
2137 const char **ports;
\r
2138 std::string port, previousPort, deviceName;
\r
2139 unsigned int nPorts = 0, nDevices = 0;
\r
2140 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2142 // Parse the port names up to the first colon (:).
\r
2143 size_t iColon = 0;
\r
2145 port = (char *) ports[ nPorts ];
\r
2146 iColon = port.find(":");
\r
2147 if ( iColon != std::string::npos ) {
\r
2148 port = port.substr( 0, iColon );
\r
2149 if ( port != previousPort ) {
\r
2150 if ( nDevices == device ) deviceName = port;
\r
2152 previousPort = port;
\r
2155 } while ( ports[++nPorts] );
\r
2159 if ( device >= nDevices ) {
\r
2160 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2164 // Count the available ports containing the client name as device
\r
2165 // channels. Jack "input ports" equal RtAudio output channels.
\r
2166 unsigned int nChannels = 0;
\r
2167 unsigned long flag = JackPortIsInput;
\r
2168 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2169 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2171 while ( ports[ nChannels ] ) nChannels++;
\r
2175 // Compare the jack ports for specified client to the requested number of channels.
\r
2176 if ( nChannels < (channels + firstChannel) ) {
\r
2177 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2178 errorText_ = errorStream_.str();
\r
2182 // Check the jack server sample rate.
\r
2183 unsigned int jackRate = jack_get_sample_rate( client );
\r
2184 if ( sampleRate != jackRate ) {
\r
2185 jack_client_close( client );
\r
2186 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2187 errorText_ = errorStream_.str();
\r
2190 stream_.sampleRate = jackRate;
\r
2192 // Get the latency of the JACK port.
\r
2193 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2194 if ( ports[ firstChannel ] ) {
\r
2195 // Added by Ge Wang
\r
2196 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2197 // the range (usually the min and max are equal)
\r
2198 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2199 // get the latency range
\r
2200 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2201 // be optimistic, use the min!
\r
2202 stream_.latency[mode] = latrange.min;
\r
2203 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2207 // The jack server always uses 32-bit floating-point data.
\r
2208 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2209 stream_.userFormat = format;
\r
2211 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2212 else stream_.userInterleaved = true;
\r
2214 // Jack always uses non-interleaved buffers.
\r
2215 stream_.deviceInterleaved[mode] = false;
\r
2217 // Jack always provides host byte-ordered data.
\r
2218 stream_.doByteSwap[mode] = false;
\r
2220 // Get the buffer size. The buffer size and number of buffers
\r
2221 // (periods) is set when the jack server is started.
\r
2222 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2223 *bufferSize = stream_.bufferSize;
\r
2225 stream_.nDeviceChannels[mode] = channels;
\r
2226 stream_.nUserChannels[mode] = channels;
\r
2228 // Set flags for buffer conversion.
\r
2229 stream_.doConvertBuffer[mode] = false;
\r
2230 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2231 stream_.doConvertBuffer[mode] = true;
\r
2232 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2233 stream_.nUserChannels[mode] > 1 )
\r
2234 stream_.doConvertBuffer[mode] = true;
\r
2236 // Allocate our JackHandle structure for the stream.
\r
2237 if ( handle == 0 ) {
\r
2239 handle = new JackHandle;
\r
2241 catch ( std::bad_alloc& ) {
\r
2242 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2246 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2247 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2250 stream_.apiHandle = (void *) handle;
\r
2251 handle->client = client;
\r
2253 handle->deviceName[mode] = deviceName;
\r
2255 // Allocate necessary internal buffers.
\r
2256 unsigned long bufferBytes;
\r
2257 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2258 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2259 if ( stream_.userBuffer[mode] == NULL ) {
\r
2260 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2264 if ( stream_.doConvertBuffer[mode] ) {
\r
2266 bool makeBuffer = true;
\r
2267 if ( mode == OUTPUT )
\r
2268 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2269 else { // mode == INPUT
\r
2270 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2271 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2272 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2273 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2277 if ( makeBuffer ) {
\r
2278 bufferBytes *= *bufferSize;
\r
2279 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2280 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2281 if ( stream_.deviceBuffer == NULL ) {
\r
2282 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2288 // Allocate memory for the Jack ports (channels) identifiers.
\r
2289 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2290 if ( handle->ports[mode] == NULL ) {
\r
2291 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2295 stream_.device[mode] = device;
\r
2296 stream_.channelOffset[mode] = firstChannel;
\r
2297 stream_.state = STREAM_STOPPED;
\r
2298 stream_.callbackInfo.object = (void *) this;
\r
2300 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2301 // We had already set up the stream for output.
\r
2302 stream_.mode = DUPLEX;
\r
2304 stream_.mode = mode;
\r
2305 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2306 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2307 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2310 // Register our ports.
\r
2312 if ( mode == OUTPUT ) {
\r
2313 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2314 snprintf( label, 64, "outport %d", i );
\r
2315 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2316 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2320 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2321 snprintf( label, 64, "inport %d", i );
\r
2322 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2323 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2327 // Setup the buffer conversion information structure. We don't use
\r
2328 // buffers to do channel offsets, so we override that parameter
\r
2330 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2336 pthread_cond_destroy( &handle->condition );
\r
2337 jack_client_close( handle->client );
\r
2339 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2340 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2343 stream_.apiHandle = 0;
\r
2346 for ( int i=0; i<2; i++ ) {
\r
2347 if ( stream_.userBuffer[i] ) {
\r
2348 free( stream_.userBuffer[i] );
\r
2349 stream_.userBuffer[i] = 0;
\r
2353 if ( stream_.deviceBuffer ) {
\r
2354 free( stream_.deviceBuffer );
\r
2355 stream_.deviceBuffer = 0;
\r
2361 void RtApiJack :: closeStream( void )
\r
2363 if ( stream_.state == STREAM_CLOSED ) {
\r
2364 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2365 error( RtAudioError::WARNING );
\r
2369 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2372 if ( stream_.state == STREAM_RUNNING )
\r
2373 jack_deactivate( handle->client );
\r
2375 jack_client_close( handle->client );
\r
2379 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2380 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2381 pthread_cond_destroy( &handle->condition );
\r
2383 stream_.apiHandle = 0;
\r
2386 for ( int i=0; i<2; i++ ) {
\r
2387 if ( stream_.userBuffer[i] ) {
\r
2388 free( stream_.userBuffer[i] );
\r
2389 stream_.userBuffer[i] = 0;
\r
2393 if ( stream_.deviceBuffer ) {
\r
2394 free( stream_.deviceBuffer );
\r
2395 stream_.deviceBuffer = 0;
\r
2398 stream_.mode = UNINITIALIZED;
\r
2399 stream_.state = STREAM_CLOSED;
\r
2402 void RtApiJack :: startStream( void )
\r
2405 if ( stream_.state == STREAM_RUNNING ) {
\r
2406 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2407 error( RtAudioError::WARNING );
\r
2411 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2412 int result = jack_activate( handle->client );
\r
2414 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2418 const char **ports;
\r
2420 // Get the list of available ports.
\r
2421 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2423 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2424 if ( ports == NULL) {
\r
2425 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2429 // Now make the port connections. Since RtAudio wasn't designed to
\r
2430 // allow the user to select particular channels of a device, we'll
\r
2431 // just open the first "nChannels" ports with offset.
\r
2432 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2434 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2435 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2438 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2445 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2447 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2448 if ( ports == NULL) {
\r
2449 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2453 // Now make the port connections. See note above.
\r
2454 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2456 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2457 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2460 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2467 handle->drainCounter = 0;
\r
2468 handle->internalDrain = false;
\r
2469 stream_.state = STREAM_RUNNING;
\r
2472 if ( result == 0 ) return;
\r
2473 error( RtAudioError::SYSTEM_ERROR );
\r
2476 void RtApiJack :: stopStream( void )
\r
2479 if ( stream_.state == STREAM_STOPPED ) {
\r
2480 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2481 error( RtAudioError::WARNING );
\r
2485 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2488 if ( handle->drainCounter == 0 ) {
\r
2489 handle->drainCounter = 2;
\r
2490 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2494 jack_deactivate( handle->client );
\r
2495 stream_.state = STREAM_STOPPED;
\r
2498 void RtApiJack :: abortStream( void )
\r
2501 if ( stream_.state == STREAM_STOPPED ) {
\r
2502 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2503 error( RtAudioError::WARNING );
\r
2507 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2508 handle->drainCounter = 2;
\r
2513 // This function will be called by a spawned thread when the user
\r
2514 // callback function signals that the stream should be stopped or
\r
2515 // aborted. It is necessary to handle it this way because the
\r
2516 // callbackEvent() function must return before the jack_deactivate()
\r
2517 // function will return.
\r
2518 static void *jackStopStream( void *ptr )
\r
2520 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2521 RtApiJack *object = (RtApiJack *) info->object;
\r
2523 object->stopStream();
\r
2524 pthread_exit( NULL );
\r
2527 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2529 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2530 if ( stream_.state == STREAM_CLOSED ) {
\r
2531 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2532 error( RtAudioError::WARNING );
\r
2535 if ( stream_.bufferSize != nframes ) {
\r
2536 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2537 error( RtAudioError::WARNING );
\r
2541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2544 // Check if we were draining the stream and signal is finished.
\r
2545 if ( handle->drainCounter > 3 ) {
\r
2546 ThreadHandle threadId;
\r
2548 stream_.state = STREAM_STOPPING;
\r
2549 if ( handle->internalDrain == true )
\r
2550 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2552 pthread_cond_signal( &handle->condition );
\r
2556 // Invoke user callback first, to get fresh output data.
\r
2557 if ( handle->drainCounter == 0 ) {
\r
2558 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2559 double streamTime = getStreamTime();
\r
2560 RtAudioStreamStatus status = 0;
\r
2561 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2562 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2563 handle->xrun[0] = false;
\r
2565 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2566 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2567 handle->xrun[1] = false;
\r
2569 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2570 stream_.bufferSize, streamTime, status, info->userData );
\r
2571 if ( cbReturnValue == 2 ) {
\r
2572 stream_.state = STREAM_STOPPING;
\r
2573 handle->drainCounter = 2;
\r
2575 pthread_create( &id, NULL, jackStopStream, info );
\r
2578 else if ( cbReturnValue == 1 ) {
\r
2579 handle->drainCounter = 1;
\r
2580 handle->internalDrain = true;
\r
2584 jack_default_audio_sample_t *jackbuffer;
\r
2585 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2586 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2588 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2590 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2591 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2592 memset( jackbuffer, 0, bufferBytes );
\r
2596 else if ( stream_.doConvertBuffer[0] ) {
\r
2598 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2600 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2601 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2602 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2605 else { // no buffer conversion
\r
2606 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2607 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2608 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2613 // Don't bother draining input
\r
2614 if ( handle->drainCounter ) {
\r
2615 handle->drainCounter++;
\r
2619 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2621 if ( stream_.doConvertBuffer[1] ) {
\r
2622 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2623 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2624 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2626 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2628 else { // no buffer conversion
\r
2629 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2630 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2631 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2637 RtApi::tickStreamTime();
\r
2640 //******************** End of __UNIX_JACK__ *********************//
\r
2643 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2645 // The ASIO API is designed around a callback scheme, so this
\r
2646 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2647 // Jack. The primary constraint with ASIO is that it only allows
\r
2648 // access to a single driver at a time. Thus, it is not possible to
\r
2649 // have more than one simultaneous RtAudio stream.
\r
2651 // This implementation also requires a number of external ASIO files
\r
2652 // and a few global variables. The ASIO callback scheme does not
\r
2653 // allow for the passing of user data, so we must create a global
\r
2654 // pointer to our callbackInfo structure.
\r
2656 // On unix systems, we make use of a pthread condition variable.
\r
2657 // Since there is no equivalent in Windows, I hacked something based
\r
2658 // on information found in
\r
2659 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2661 #include "asiosys.h"
\r
2663 #include "iasiothiscallresolver.h"
\r
2664 #include "asiodrivers.h"
\r
2667 static AsioDrivers drivers;
\r
2668 static ASIOCallbacks asioCallbacks;
\r
2669 static ASIODriverInfo driverInfo;
\r
2670 static CallbackInfo *asioCallbackInfo;
\r
2671 static bool asioXRun;
\r
2673 struct AsioHandle {
\r
2674 int drainCounter; // Tracks callback counts when draining
\r
2675 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2676 ASIOBufferInfo *bufferInfos;
\r
2680 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2683 // Function declarations (definitions at end of section)
\r
2684 static const char* getAsioErrorString( ASIOError result );
\r
2685 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2686 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2688 RtApiAsio :: RtApiAsio()
\r
2690 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2691 // CoInitialize beforehand, but it must be for appartment threading
\r
2692 // (in which case, CoInitilialize will return S_FALSE here).
\r
2693 coInitialized_ = false;
\r
2694 HRESULT hr = CoInitialize( NULL );
\r
2695 if ( FAILED(hr) ) {
\r
2696 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2697 error( RtAudioError::WARNING );
\r
2699 coInitialized_ = true;
\r
2701 drivers.removeCurrentDriver();
\r
2702 driverInfo.asioVersion = 2;
\r
2704 // See note in DirectSound implementation about GetDesktopWindow().
\r
2705 driverInfo.sysRef = GetForegroundWindow();
\r
2708 RtApiAsio :: ~RtApiAsio()
\r
2710 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2711 if ( coInitialized_ ) CoUninitialize();
\r
2714 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2716 return (unsigned int) drivers.asioGetNumDev();
\r
2719 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2721 RtAudio::DeviceInfo info;
\r
2722 info.probed = false;
\r
2725 unsigned int nDevices = getDeviceCount();
\r
2726 if ( nDevices == 0 ) {
\r
2727 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2728 error( RtAudioError::INVALID_USE );
\r
2732 if ( device >= nDevices ) {
\r
2733 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2734 error( RtAudioError::INVALID_USE );
\r
2738 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2739 if ( stream_.state != STREAM_CLOSED ) {
\r
2740 if ( device >= devices_.size() ) {
\r
2741 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2742 error( RtAudioError::WARNING );
\r
2745 return devices_[ device ];
\r
2748 char driverName[32];
\r
2749 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2750 if ( result != ASE_OK ) {
\r
2751 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2752 errorText_ = errorStream_.str();
\r
2753 error( RtAudioError::WARNING );
\r
2757 info.name = driverName;
\r
2759 if ( !drivers.loadDriver( driverName ) ) {
\r
2760 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2761 errorText_ = errorStream_.str();
\r
2762 error( RtAudioError::WARNING );
\r
2766 result = ASIOInit( &driverInfo );
\r
2767 if ( result != ASE_OK ) {
\r
2768 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2769 errorText_ = errorStream_.str();
\r
2770 error( RtAudioError::WARNING );
\r
2774 // Determine the device channel information.
\r
2775 long inputChannels, outputChannels;
\r
2776 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2777 if ( result != ASE_OK ) {
\r
2778 drivers.removeCurrentDriver();
\r
2779 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2780 errorText_ = errorStream_.str();
\r
2781 error( RtAudioError::WARNING );
\r
2785 info.outputChannels = outputChannels;
\r
2786 info.inputChannels = inputChannels;
\r
2787 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2788 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2790 // Determine the supported sample rates.
\r
2791 info.sampleRates.clear();
\r
2792 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2793 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2794 if ( result == ASE_OK ) {
\r
2795 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2797 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2798 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2802 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2803 ASIOChannelInfo channelInfo;
\r
2804 channelInfo.channel = 0;
\r
2805 channelInfo.isInput = true;
\r
2806 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2807 result = ASIOGetChannelInfo( &channelInfo );
\r
2808 if ( result != ASE_OK ) {
\r
2809 drivers.removeCurrentDriver();
\r
2810 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2811 errorText_ = errorStream_.str();
\r
2812 error( RtAudioError::WARNING );
\r
2816 info.nativeFormats = 0;
\r
2817 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2818 info.nativeFormats |= RTAUDIO_SINT16;
\r
2819 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2820 info.nativeFormats |= RTAUDIO_SINT32;
\r
2821 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2822 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2823 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2824 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2825 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2826 info.nativeFormats |= RTAUDIO_SINT24;
\r
2828 if ( info.outputChannels > 0 )
\r
2829 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2830 if ( info.inputChannels > 0 )
\r
2831 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2833 info.probed = true;
\r
2834 drivers.removeCurrentDriver();
\r
2838 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2840 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2841 object->callbackEvent( index );
\r
2844 void RtApiAsio :: saveDeviceInfo( void )
\r
2848 unsigned int nDevices = getDeviceCount();
\r
2849 devices_.resize( nDevices );
\r
2850 for ( unsigned int i=0; i<nDevices; i++ )
\r
2851 devices_[i] = getDeviceInfo( i );
\r
2854 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2855 unsigned int firstChannel, unsigned int sampleRate,
\r
2856 RtAudioFormat format, unsigned int *bufferSize,
\r
2857 RtAudio::StreamOptions *options )
\r
2858 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2860 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2862 // For ASIO, a duplex stream MUST use the same driver.
\r
2863 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2864 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2868 char driverName[32];
\r
2869 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2870 if ( result != ASE_OK ) {
\r
2871 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2872 errorText_ = errorStream_.str();
\r
2876 // Only load the driver once for duplex stream.
\r
2877 if ( !isDuplexInput ) {
\r
2878 // The getDeviceInfo() function will not work when a stream is open
\r
2879 // because ASIO does not allow multiple devices to run at the same
\r
2880 // time. Thus, we'll probe the system before opening a stream and
\r
2881 // save the results for use by getDeviceInfo().
\r
2882 this->saveDeviceInfo();
\r
2884 if ( !drivers.loadDriver( driverName ) ) {
\r
2885 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2886 errorText_ = errorStream_.str();
\r
2890 result = ASIOInit( &driverInfo );
\r
2891 if ( result != ASE_OK ) {
\r
2892 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2893 errorText_ = errorStream_.str();
\r
2898 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2899 bool buffersAllocated = false;
\r
2900 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2901 unsigned int nChannels;
\r
2904 // Check the device channel count.
\r
2905 long inputChannels, outputChannels;
\r
2906 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2907 if ( result != ASE_OK ) {
\r
2908 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2909 errorText_ = errorStream_.str();
\r
2913 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2914 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2916 errorText_ = errorStream_.str();
\r
2919 stream_.nDeviceChannels[mode] = channels;
\r
2920 stream_.nUserChannels[mode] = channels;
\r
2921 stream_.channelOffset[mode] = firstChannel;
\r
2923 // Verify the sample rate is supported.
\r
2924 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2925 if ( result != ASE_OK ) {
\r
2926 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2927 errorText_ = errorStream_.str();
\r
2931 // Get the current sample rate
\r
2932 ASIOSampleRate currentRate;
\r
2933 result = ASIOGetSampleRate( ¤tRate );
\r
2934 if ( result != ASE_OK ) {
\r
2935 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2936 errorText_ = errorStream_.str();
\r
2940 // Set the sample rate only if necessary
\r
2941 if ( currentRate != sampleRate ) {
\r
2942 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2943 if ( result != ASE_OK ) {
\r
2944 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2945 errorText_ = errorStream_.str();
\r
2950 // Determine the driver data type.
\r
2951 ASIOChannelInfo channelInfo;
\r
2952 channelInfo.channel = 0;
\r
2953 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2954 else channelInfo.isInput = true;
\r
2955 result = ASIOGetChannelInfo( &channelInfo );
\r
2956 if ( result != ASE_OK ) {
\r
2957 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2958 errorText_ = errorStream_.str();
\r
2962 // Assuming WINDOWS host is always little-endian.
\r
2963 stream_.doByteSwap[mode] = false;
\r
2964 stream_.userFormat = format;
\r
2965 stream_.deviceFormat[mode] = 0;
\r
2966 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2967 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2968 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2970 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2971 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2972 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2974 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2975 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2976 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2978 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2979 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2980 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2982 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2983 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2984 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2987 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2988 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2989 errorText_ = errorStream_.str();
\r
2993 // Set the buffer size. For a duplex stream, this will end up
\r
2994 // setting the buffer size based on the input constraints, which
\r
2996 long minSize, maxSize, preferSize, granularity;
\r
2997 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2998 if ( result != ASE_OK ) {
\r
2999 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3000 errorText_ = errorStream_.str();
\r
3004 if ( isDuplexInput ) {
\r
3005 // When this is the duplex input (output was opened before), then we have to use the same
\r
3006 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3007 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3008 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3009 // to the "bufferSize" param as usual to set up processing buffers.
\r
3011 *bufferSize = stream_.bufferSize;
\r
3014 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3015 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3016 else if ( granularity == -1 ) {
\r
3017 // Make sure bufferSize is a power of two.
\r
3018 int log2_of_min_size = 0;
\r
3019 int log2_of_max_size = 0;
\r
3021 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3022 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3023 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3026 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3027 int min_delta_num = log2_of_min_size;
\r
3029 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3030 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3031 if (current_delta < min_delta) {
\r
3032 min_delta = current_delta;
\r
3033 min_delta_num = i;
\r
3037 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3038 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3039 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3041 else if ( granularity != 0 ) {
\r
3042 // Set to an even multiple of granularity, rounding up.
\r
3043 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3048 // we don't use it anymore, see above!
\r
3049 // Just left it here for the case...
\r
3050 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3051 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3056 stream_.bufferSize = *bufferSize;
\r
3057 stream_.nBuffers = 2;
\r
3059 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3060 else stream_.userInterleaved = true;
\r
3062 // ASIO always uses non-interleaved buffers.
\r
3063 stream_.deviceInterleaved[mode] = false;
\r
3065 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3066 if ( handle == 0 ) {
\r
3068 handle = new AsioHandle;
\r
3070 catch ( std::bad_alloc& ) {
\r
3071 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3074 handle->bufferInfos = 0;
\r
3076 // Create a manual-reset event.
\r
3077 handle->condition = CreateEvent( NULL, // no security
\r
3078 TRUE, // manual-reset
\r
3079 FALSE, // non-signaled initially
\r
3080 NULL ); // unnamed
\r
3081 stream_.apiHandle = (void *) handle;
\r
3084 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3085 // and output separately, we'll have to dispose of previously
\r
3086 // created output buffers for a duplex stream.
\r
3087 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3088 ASIODisposeBuffers();
\r
3089 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3092 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3094 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3095 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3096 if ( handle->bufferInfos == NULL ) {
\r
3097 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3098 errorText_ = errorStream_.str();
\r
3102 ASIOBufferInfo *infos;
\r
3103 infos = handle->bufferInfos;
\r
3104 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3105 infos->isInput = ASIOFalse;
\r
3106 infos->channelNum = i + stream_.channelOffset[0];
\r
3107 infos->buffers[0] = infos->buffers[1] = 0;
\r
3109 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3110 infos->isInput = ASIOTrue;
\r
3111 infos->channelNum = i + stream_.channelOffset[1];
\r
3112 infos->buffers[0] = infos->buffers[1] = 0;
\r
3115 // prepare for callbacks
\r
3116 stream_.sampleRate = sampleRate;
\r
3117 stream_.device[mode] = device;
\r
3118 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3120 // store this class instance before registering callbacks, that are going to use it
\r
3121 asioCallbackInfo = &stream_.callbackInfo;
\r
3122 stream_.callbackInfo.object = (void *) this;
\r
3124 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3125 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3126 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3127 asioCallbacks.asioMessage = &asioMessages;
\r
3128 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3129 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3130 if ( result != ASE_OK ) {
\r
3131 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3132 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3133 // in that case, let's be naïve and try that instead
\r
3134 *bufferSize = preferSize;
\r
3135 stream_.bufferSize = *bufferSize;
\r
3136 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3139 if ( result != ASE_OK ) {
\r
3140 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3141 errorText_ = errorStream_.str();
\r
3144 buffersAllocated = true;
\r
3145 stream_.state = STREAM_STOPPED;
\r
3147 // Set flags for buffer conversion.
\r
3148 stream_.doConvertBuffer[mode] = false;
\r
3149 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3150 stream_.doConvertBuffer[mode] = true;
\r
3151 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3152 stream_.nUserChannels[mode] > 1 )
\r
3153 stream_.doConvertBuffer[mode] = true;
\r
3155 // Allocate necessary internal buffers
\r
3156 unsigned long bufferBytes;
\r
3157 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3158 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3159 if ( stream_.userBuffer[mode] == NULL ) {
\r
3160 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3164 if ( stream_.doConvertBuffer[mode] ) {
\r
3166 bool makeBuffer = true;
\r
3167 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3168 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3169 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3170 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3173 if ( makeBuffer ) {
\r
3174 bufferBytes *= *bufferSize;
\r
3175 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3176 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3177 if ( stream_.deviceBuffer == NULL ) {
\r
3178 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3184 // Determine device latencies
\r
3185 long inputLatency, outputLatency;
\r
3186 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3187 if ( result != ASE_OK ) {
\r
3188 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3189 errorText_ = errorStream_.str();
\r
3190 error( RtAudioError::WARNING); // warn but don't fail
\r
3193 stream_.latency[0] = outputLatency;
\r
3194 stream_.latency[1] = inputLatency;
\r
3197 // Setup the buffer conversion information structure. We don't use
\r
3198 // buffers to do channel offsets, so we override that parameter
\r
3200 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3205 if ( !isDuplexInput ) {
\r
3206 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3207 // So we clean up for single channel only
\r
3209 if ( buffersAllocated )
\r
3210 ASIODisposeBuffers();
\r
3212 drivers.removeCurrentDriver();
\r
3215 CloseHandle( handle->condition );
\r
3216 if ( handle->bufferInfos )
\r
3217 free( handle->bufferInfos );
\r
3220 stream_.apiHandle = 0;
\r
3224 if ( stream_.userBuffer[mode] ) {
\r
3225 free( stream_.userBuffer[mode] );
\r
3226 stream_.userBuffer[mode] = 0;
\r
3229 if ( stream_.deviceBuffer ) {
\r
3230 free( stream_.deviceBuffer );
\r
3231 stream_.deviceBuffer = 0;
\r
3236 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3238 void RtApiAsio :: closeStream()
\r
3240 if ( stream_.state == STREAM_CLOSED ) {
\r
3241 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3242 error( RtAudioError::WARNING );
\r
3246 if ( stream_.state == STREAM_RUNNING ) {
\r
3247 stream_.state = STREAM_STOPPED;
\r
3250 ASIODisposeBuffers();
\r
3251 drivers.removeCurrentDriver();
\r
3253 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 CloseHandle( handle->condition );
\r
3256 if ( handle->bufferInfos )
\r
3257 free( handle->bufferInfos );
\r
3259 stream_.apiHandle = 0;
\r
3262 for ( int i=0; i<2; i++ ) {
\r
3263 if ( stream_.userBuffer[i] ) {
\r
3264 free( stream_.userBuffer[i] );
\r
3265 stream_.userBuffer[i] = 0;
\r
3269 if ( stream_.deviceBuffer ) {
\r
3270 free( stream_.deviceBuffer );
\r
3271 stream_.deviceBuffer = 0;
\r
3274 stream_.mode = UNINITIALIZED;
\r
3275 stream_.state = STREAM_CLOSED;
\r
3278 bool stopThreadCalled = false;
\r
3280 void RtApiAsio :: startStream()
\r
3283 if ( stream_.state == STREAM_RUNNING ) {
\r
3284 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3285 error( RtAudioError::WARNING );
\r
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3290 ASIOError result = ASIOStart();
\r
3291 if ( result != ASE_OK ) {
\r
3292 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3293 errorText_ = errorStream_.str();
\r
3297 handle->drainCounter = 0;
\r
3298 handle->internalDrain = false;
\r
3299 ResetEvent( handle->condition );
\r
3300 stream_.state = STREAM_RUNNING;
\r
3304 stopThreadCalled = false;
\r
3306 if ( result == ASE_OK ) return;
\r
3307 error( RtAudioError::SYSTEM_ERROR );
\r
3310 void RtApiAsio :: stopStream()
\r
3313 if ( stream_.state == STREAM_STOPPED ) {
\r
3314 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3315 error( RtAudioError::WARNING );
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3320 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3321 if ( handle->drainCounter == 0 ) {
\r
3322 handle->drainCounter = 2;
\r
3323 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3327 stream_.state = STREAM_STOPPED;
\r
3329 ASIOError result = ASIOStop();
\r
3330 if ( result != ASE_OK ) {
\r
3331 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3332 errorText_ = errorStream_.str();
\r
3335 if ( result == ASE_OK ) return;
\r
3336 error( RtAudioError::SYSTEM_ERROR );
\r
3339 void RtApiAsio :: abortStream()
\r
3342 if ( stream_.state == STREAM_STOPPED ) {
\r
3343 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3344 error( RtAudioError::WARNING );
\r
3348 // The following lines were commented-out because some behavior was
\r
3349 // noted where the device buffers need to be zeroed to avoid
\r
3350 // continuing sound, even when the device buffers are completely
\r
3351 // disposed. So now, calling abort is the same as calling stop.
\r
3352 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3353 // handle->drainCounter = 2;
\r
3357 // This function will be called by a spawned thread when the user
\r
3358 // callback function signals that the stream should be stopped or
\r
3359 // aborted. It is necessary to handle it this way because the
\r
3360 // callbackEvent() function must return before the ASIOStop()
\r
3361 // function will return.
\r
3362 static unsigned __stdcall asioStopStream( void *ptr )
\r
3364 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3365 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3367 object->stopStream();
\r
3368 _endthreadex( 0 );
\r
3372 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3374 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3375 if ( stream_.state == STREAM_CLOSED ) {
\r
3376 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3377 error( RtAudioError::WARNING );
\r
3381 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3382 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3384 // Check if we were draining the stream and signal if finished.
\r
3385 if ( handle->drainCounter > 3 ) {
\r
3387 stream_.state = STREAM_STOPPING;
\r
3388 if ( handle->internalDrain == false )
\r
3389 SetEvent( handle->condition );
\r
3390 else { // spawn a thread to stop the stream
\r
3391 unsigned threadId;
\r
3392 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3393 &stream_.callbackInfo, 0, &threadId );
\r
3398 // Invoke user callback to get fresh output data UNLESS we are
\r
3399 // draining stream.
\r
3400 if ( handle->drainCounter == 0 ) {
\r
3401 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3402 double streamTime = getStreamTime();
\r
3403 RtAudioStreamStatus status = 0;
\r
3404 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3405 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3408 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3409 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3412 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3413 stream_.bufferSize, streamTime, status, info->userData );
\r
3414 if ( cbReturnValue == 2 ) {
\r
3415 stream_.state = STREAM_STOPPING;
\r
3416 handle->drainCounter = 2;
\r
3417 unsigned threadId;
\r
3418 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3419 &stream_.callbackInfo, 0, &threadId );
\r
3422 else if ( cbReturnValue == 1 ) {
\r
3423 handle->drainCounter = 1;
\r
3424 handle->internalDrain = true;
\r
3428 unsigned int nChannels, bufferBytes, i, j;
\r
3429 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3430 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3432 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3434 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3436 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3437 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3438 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3442 else if ( stream_.doConvertBuffer[0] ) {
\r
3444 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3445 if ( stream_.doByteSwap[0] )
\r
3446 byteSwapBuffer( stream_.deviceBuffer,
\r
3447 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3448 stream_.deviceFormat[0] );
\r
3450 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3451 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3452 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3453 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3459 if ( stream_.doByteSwap[0] )
\r
3460 byteSwapBuffer( stream_.userBuffer[0],
\r
3461 stream_.bufferSize * stream_.nUserChannels[0],
\r
3462 stream_.userFormat );
\r
3464 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3465 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3466 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3467 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3473 // Don't bother draining input
\r
3474 if ( handle->drainCounter ) {
\r
3475 handle->drainCounter++;
\r
3479 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3481 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3483 if (stream_.doConvertBuffer[1]) {
\r
3485 // Always interleave ASIO input data.
\r
3486 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3487 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3488 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3489 handle->bufferInfos[i].buffers[bufferIndex],
\r
3493 if ( stream_.doByteSwap[1] )
\r
3494 byteSwapBuffer( stream_.deviceBuffer,
\r
3495 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3496 stream_.deviceFormat[1] );
\r
3497 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3501 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3502 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3503 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3504 handle->bufferInfos[i].buffers[bufferIndex],
\r
3509 if ( stream_.doByteSwap[1] )
\r
3510 byteSwapBuffer( stream_.userBuffer[1],
\r
3511 stream_.bufferSize * stream_.nUserChannels[1],
\r
3512 stream_.userFormat );
\r
3517 // The following call was suggested by Malte Clasen. While the API
\r
3518 // documentation indicates it should not be required, some device
\r
3519 // drivers apparently do not function correctly without it.
\r
3520 ASIOOutputReady();
\r
3522 RtApi::tickStreamTime();
\r
3526 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3528 // The ASIO documentation says that this usually only happens during
\r
3529 // external sync. Audio processing is not stopped by the driver,
\r
3530 // actual sample rate might not have even changed, maybe only the
\r
3531 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3534 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3536 object->stopStream();
\r
3538 catch ( RtAudioError &exception ) {
\r
3539 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3543 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3546 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3550 switch( selector ) {
\r
3551 case kAsioSelectorSupported:
\r
3552 if ( value == kAsioResetRequest
\r
3553 || value == kAsioEngineVersion
\r
3554 || value == kAsioResyncRequest
\r
3555 || value == kAsioLatenciesChanged
\r
3556 // The following three were added for ASIO 2.0, you don't
\r
3557 // necessarily have to support them.
\r
3558 || value == kAsioSupportsTimeInfo
\r
3559 || value == kAsioSupportsTimeCode
\r
3560 || value == kAsioSupportsInputMonitor)
\r
3563 case kAsioResetRequest:
\r
3564 // Defer the task and perform the reset of the driver during the
\r
3565 // next "safe" situation. You cannot reset the driver right now,
\r
3566 // as this code is called from the driver. Reset the driver is
\r
3567 // done by completely destruct is. I.e. ASIOStop(),
\r
3568 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3570 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3573 case kAsioResyncRequest:
\r
3574 // This informs the application that the driver encountered some
\r
3575 // non-fatal data loss. It is used for synchronization purposes
\r
3576 // of different media. Added mainly to work around the Win16Mutex
\r
3577 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3578 // which could lose data because the Mutex was held too long by
\r
3579 // another thread. However a driver can issue it in other
\r
3580 // situations, too.
\r
3581 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3585 case kAsioLatenciesChanged:
\r
3586 // This will inform the host application that the drivers were
\r
3587 // latencies changed. Beware, it this does not mean that the
\r
3588 // buffer sizes have changed! You might need to update internal
\r
3590 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3593 case kAsioEngineVersion:
\r
3594 // Return the supported ASIO version of the host application. If
\r
3595 // a host application does not implement this selector, ASIO 1.0
\r
3596 // is assumed by the driver.
\r
3599 case kAsioSupportsTimeInfo:
\r
3600 // Informs the driver whether the
\r
3601 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3602 // For compatibility with ASIO 1.0 drivers the host application
\r
3603 // should always support the "old" bufferSwitch method, too.
\r
3606 case kAsioSupportsTimeCode:
\r
3607 // Informs the driver whether application is interested in time
\r
3608 // code info. If an application does not need to know about time
\r
3609 // code, the driver has less work to do.
\r
3616 static const char* getAsioErrorString( ASIOError result )
\r
3621 const char*message;
\r
3624 static const Messages m[] =
\r
3626 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3627 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3628 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3629 { ASE_InvalidMode, "Invalid mode." },
\r
3630 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3631 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3632 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3635 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3636 if ( m[i].value == result ) return m[i].message;
\r
3638 return "Unknown error.";
\r
3641 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3645 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3647 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3648 // - Introduces support for the Windows WASAPI API
\r
3649 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3650 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3651 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3656 #include <audioclient.h>
\r
3658 #include <mmdeviceapi.h>
\r
3659 #include <functiondiscoverykeys_devpkey.h>
\r
3661 //=============================================================================
\r
3663 #define SAFE_RELEASE( objectPtr )\
\r
3666 objectPtr->Release();\
\r
3667 objectPtr = NULL;\
\r
3670 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3672 //-----------------------------------------------------------------------------
\r
3674 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3675 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3676 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3677 // provide intermediate storage for read / write synchronization.
\r
3678 class WasapiBuffer
\r
3682 : buffer_( NULL ),
\r
3691 // sets the length of the internal ring buffer
\r
3692 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3695 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3697 bufferSize_ = bufferSize;
\r
3702 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3703 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3705 if ( !buffer || // incoming buffer is NULL
\r
3706 bufferSize == 0 || // incoming buffer has no data
\r
3707 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3712 unsigned int relOutIndex = outIndex_;
\r
3713 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3714 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3715 relOutIndex += bufferSize_;
\r
3718 // "in" index can end on the "out" index but cannot begin at it
\r
3719 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3720 return false; // not enough space between "in" index and "out" index
\r
3723 // copy buffer from external to internal
\r
3724 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3725 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3726 int fromInSize = bufferSize - fromZeroSize;
\r
3730 case RTAUDIO_SINT8:
\r
3731 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3732 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3734 case RTAUDIO_SINT16:
\r
3735 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3736 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3738 case RTAUDIO_SINT24:
\r
3739 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3740 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3742 case RTAUDIO_SINT32:
\r
3743 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3744 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3746 case RTAUDIO_FLOAT32:
\r
3747 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3748 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3750 case RTAUDIO_FLOAT64:
\r
3751 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3752 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3756 // update "in" index
\r
3757 inIndex_ += bufferSize;
\r
3758 inIndex_ %= bufferSize_;
\r
3763 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3764 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3766 if ( !buffer || // incoming buffer is NULL
\r
3767 bufferSize == 0 || // incoming buffer has no data
\r
3768 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3773 unsigned int relInIndex = inIndex_;
\r
3774 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3775 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3776 relInIndex += bufferSize_;
\r
3779 // "out" index can begin at and end on the "in" index
\r
3780 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3781 return false; // not enough space between "out" index and "in" index
\r
3784 // copy buffer from internal to external
\r
3785 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3786 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3787 int fromOutSize = bufferSize - fromZeroSize;
\r
3791 case RTAUDIO_SINT8:
\r
3792 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3793 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3795 case RTAUDIO_SINT16:
\r
3796 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3797 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3799 case RTAUDIO_SINT24:
\r
3800 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3801 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3803 case RTAUDIO_SINT32:
\r
3804 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3805 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3807 case RTAUDIO_FLOAT32:
\r
3808 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3809 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3811 case RTAUDIO_FLOAT64:
\r
3812 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3813 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3817 // update "out" index
\r
3818 outIndex_ += bufferSize;
\r
3819 outIndex_ %= bufferSize_;
\r
3826 unsigned int bufferSize_;
\r
3827 unsigned int inIndex_;
\r
3828 unsigned int outIndex_;
\r
3831 //-----------------------------------------------------------------------------
\r
3833 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3834 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3835 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3836 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3837 // one rate and its multiple.
\r
3838 void convertBufferWasapi( char* outBuffer,
\r
3839 const char* inBuffer,
\r
3840 const unsigned int& channelCount,
\r
3841 const unsigned int& inSampleRate,
\r
3842 const unsigned int& outSampleRate,
\r
3843 const unsigned int& inSampleCount,
\r
3844 unsigned int& outSampleCount,
\r
3845 const RtAudioFormat& format )
\r
3847 // calculate the new outSampleCount and relative sampleStep
\r
3848 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3849 float sampleStep = 1.0f / sampleRatio;
\r
3850 float inSampleFraction = 0.0f;
\r
3852 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3854 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3855 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3857 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3861 case RTAUDIO_SINT8:
\r
3862 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3864 case RTAUDIO_SINT16:
\r
3865 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3867 case RTAUDIO_SINT24:
\r
3868 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3870 case RTAUDIO_SINT32:
\r
3871 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3873 case RTAUDIO_FLOAT32:
\r
3874 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3876 case RTAUDIO_FLOAT64:
\r
3877 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3881 // jump to next in sample
\r
3882 inSampleFraction += sampleStep;
\r
3886 //-----------------------------------------------------------------------------
\r
3888 // A structure to hold various information related to the WASAPI implementation.
\r
3889 struct WasapiHandle
\r
3891 IAudioClient* captureAudioClient;
\r
3892 IAudioClient* renderAudioClient;
\r
3893 IAudioCaptureClient* captureClient;
\r
3894 IAudioRenderClient* renderClient;
\r
3895 HANDLE captureEvent;
\r
3896 HANDLE renderEvent;
\r
3899 : captureAudioClient( NULL ),
\r
3900 renderAudioClient( NULL ),
\r
3901 captureClient( NULL ),
\r
3902 renderClient( NULL ),
\r
3903 captureEvent( NULL ),
\r
3904 renderEvent( NULL ) {}
\r
3907 //=============================================================================
\r
3909 RtApiWasapi::RtApiWasapi()
\r
3910 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3912 // WASAPI can run either apartment or multi-threaded
\r
3913 HRESULT hr = CoInitialize( NULL );
\r
3914 if ( !FAILED( hr ) )
\r
3915 coInitialized_ = true;
\r
3917 // Instantiate device enumerator
\r
3918 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3919 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3920 ( void** ) &deviceEnumerator_ );
\r
3922 if ( FAILED( hr ) ) {
\r
3923 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3924 error( RtAudioError::DRIVER_ERROR );
\r
3928 //-----------------------------------------------------------------------------
\r
3930 RtApiWasapi::~RtApiWasapi()
\r
3932 if ( stream_.state != STREAM_CLOSED )
\r
3935 SAFE_RELEASE( deviceEnumerator_ );
\r
3937 // If this object previously called CoInitialize()
\r
3938 if ( coInitialized_ )
\r
3942 //=============================================================================
\r
3944 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3946 unsigned int captureDeviceCount = 0;
\r
3947 unsigned int renderDeviceCount = 0;
\r
3949 IMMDeviceCollection* captureDevices = NULL;
\r
3950 IMMDeviceCollection* renderDevices = NULL;
\r
3952 // Count capture devices
\r
3953 errorText_.clear();
\r
3954 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3955 if ( FAILED( hr ) ) {
\r
3956 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3960 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3961 if ( FAILED( hr ) ) {
\r
3962 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3966 // Count render devices
\r
3967 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3968 if ( FAILED( hr ) ) {
\r
3969 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3973 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3974 if ( FAILED( hr ) ) {
\r
3975 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3980 // release all references
\r
3981 SAFE_RELEASE( captureDevices );
\r
3982 SAFE_RELEASE( renderDevices );
\r
3984 if ( errorText_.empty() )
\r
3985 return captureDeviceCount + renderDeviceCount;
\r
3987 error( RtAudioError::DRIVER_ERROR );
\r
3991 //-----------------------------------------------------------------------------
\r
3993 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3995 RtAudio::DeviceInfo info;
\r
3996 unsigned int captureDeviceCount = 0;
\r
3997 unsigned int renderDeviceCount = 0;
\r
3998 std::string defaultDeviceName;
\r
3999 bool isCaptureDevice = false;
\r
4001 PROPVARIANT deviceNameProp;
\r
4002 PROPVARIANT defaultDeviceNameProp;
\r
4004 IMMDeviceCollection* captureDevices = NULL;
\r
4005 IMMDeviceCollection* renderDevices = NULL;
\r
4006 IMMDevice* devicePtr = NULL;
\r
4007 IMMDevice* defaultDevicePtr = NULL;
\r
4008 IAudioClient* audioClient = NULL;
\r
4009 IPropertyStore* devicePropStore = NULL;
\r
4010 IPropertyStore* defaultDevicePropStore = NULL;
\r
4012 WAVEFORMATEX* deviceFormat = NULL;
\r
4013 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4016 info.probed = false;
\r
4018 // Count capture devices
\r
4019 errorText_.clear();
\r
4020 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4021 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4022 if ( FAILED( hr ) ) {
\r
4023 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4027 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4028 if ( FAILED( hr ) ) {
\r
4029 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4033 // Count render devices
\r
4034 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4035 if ( FAILED( hr ) ) {
\r
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4040 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4041 if ( FAILED( hr ) ) {
\r
4042 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4046 // validate device index
\r
4047 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4048 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4049 errorType = RtAudioError::INVALID_USE;
\r
4053 // determine whether index falls within capture or render devices
\r
4054 if ( device >= renderDeviceCount ) {
\r
4055 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4056 if ( FAILED( hr ) ) {
\r
4057 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4060 isCaptureDevice = true;
\r
4063 hr = renderDevices->Item( device, &devicePtr );
\r
4064 if ( FAILED( hr ) ) {
\r
4065 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4068 isCaptureDevice = false;
\r
4071 // get default device name
\r
4072 if ( isCaptureDevice ) {
\r
4073 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4074 if ( FAILED( hr ) ) {
\r
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4080 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4081 if ( FAILED( hr ) ) {
\r
4082 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4087 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4088 if ( FAILED( hr ) ) {
\r
4089 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4092 PropVariantInit( &defaultDeviceNameProp );
\r
4094 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4095 if ( FAILED( hr ) ) {
\r
4096 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4100 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4103 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4104 if ( FAILED( hr ) ) {
\r
4105 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4109 PropVariantInit( &deviceNameProp );
\r
4111 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4112 if ( FAILED( hr ) ) {
\r
4113 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4117 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4120 if ( isCaptureDevice ) {
\r
4121 info.isDefaultInput = info.name == defaultDeviceName;
\r
4122 info.isDefaultOutput = false;
\r
4125 info.isDefaultInput = false;
\r
4126 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4130 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4131 if ( FAILED( hr ) ) {
\r
4132 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4136 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4137 if ( FAILED( hr ) ) {
\r
4138 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4142 if ( isCaptureDevice ) {
\r
4143 info.inputChannels = deviceFormat->nChannels;
\r
4144 info.outputChannels = 0;
\r
4145 info.duplexChannels = 0;
\r
4148 info.inputChannels = 0;
\r
4149 info.outputChannels = deviceFormat->nChannels;
\r
4150 info.duplexChannels = 0;
\r
4154 info.sampleRates.clear();
\r
4156 // allow support for all sample rates as we have a built-in sample rate converter
\r
4157 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4158 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4160 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4163 info.nativeFormats = 0;
\r
4165 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4166 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4167 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4169 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4170 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4172 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4173 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4176 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4177 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4178 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4180 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4181 info.nativeFormats |= RTAUDIO_SINT8;
\r
4183 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4184 info.nativeFormats |= RTAUDIO_SINT16;
\r
4186 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4187 info.nativeFormats |= RTAUDIO_SINT24;
\r
4189 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4190 info.nativeFormats |= RTAUDIO_SINT32;
\r
4195 info.probed = true;
\r
4198 // release all references
\r
4199 PropVariantClear( &deviceNameProp );
\r
4200 PropVariantClear( &defaultDeviceNameProp );
\r
4202 SAFE_RELEASE( captureDevices );
\r
4203 SAFE_RELEASE( renderDevices );
\r
4204 SAFE_RELEASE( devicePtr );
\r
4205 SAFE_RELEASE( defaultDevicePtr );
\r
4206 SAFE_RELEASE( audioClient );
\r
4207 SAFE_RELEASE( devicePropStore );
\r
4208 SAFE_RELEASE( defaultDevicePropStore );
\r
4210 CoTaskMemFree( deviceFormat );
\r
4211 CoTaskMemFree( closestMatchFormat );
\r
4213 if ( !errorText_.empty() )
\r
4214 error( errorType );
\r
4218 //-----------------------------------------------------------------------------
\r
4220 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4222 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4223 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4231 //-----------------------------------------------------------------------------
\r
4233 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4235 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4236 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4244 //-----------------------------------------------------------------------------
\r
4246 void RtApiWasapi::closeStream( void )
\r
4248 if ( stream_.state == STREAM_CLOSED ) {
\r
4249 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4250 error( RtAudioError::WARNING );
\r
4254 if ( stream_.state != STREAM_STOPPED )
\r
4257 // clean up stream memory
\r
4258 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4259 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4261 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4262 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4264 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4265 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4267 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4268 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4270 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4271 stream_.apiHandle = NULL;
\r
4273 for ( int i = 0; i < 2; i++ ) {
\r
4274 if ( stream_.userBuffer[i] ) {
\r
4275 free( stream_.userBuffer[i] );
\r
4276 stream_.userBuffer[i] = 0;
\r
4280 if ( stream_.deviceBuffer ) {
\r
4281 free( stream_.deviceBuffer );
\r
4282 stream_.deviceBuffer = 0;
\r
4285 // update stream state
\r
4286 stream_.state = STREAM_CLOSED;
\r
4289 //-----------------------------------------------------------------------------
\r
4291 void RtApiWasapi::startStream( void )
\r
4295 if ( stream_.state == STREAM_RUNNING ) {
\r
4296 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4297 error( RtAudioError::WARNING );
\r
4301 // update stream state
\r
4302 stream_.state = STREAM_RUNNING;
\r
4304 // create WASAPI stream thread
\r
4305 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4307 if ( !stream_.callbackInfo.thread ) {
\r
4308 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4309 error( RtAudioError::THREAD_ERROR );
\r
4312 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4313 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4317 //-----------------------------------------------------------------------------
\r
4319 void RtApiWasapi::stopStream( void )
\r
4323 if ( stream_.state == STREAM_STOPPED ) {
\r
4324 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4325 error( RtAudioError::WARNING );
\r
4329 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4330 stream_.state = STREAM_STOPPING;
\r
4332 // wait until stream thread is stopped
\r
4333 while( stream_.state != STREAM_STOPPED ) {
\r
4337 // Wait for the last buffer to play before stopping.
\r
4338 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4340 // stop capture client if applicable
\r
4341 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4342 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4343 if ( FAILED( hr ) ) {
\r
4344 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4345 error( RtAudioError::DRIVER_ERROR );
\r
4350 // stop render client if applicable
\r
4351 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4352 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4353 if ( FAILED( hr ) ) {
\r
4354 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4355 error( RtAudioError::DRIVER_ERROR );
\r
4360 // close thread handle
\r
4361 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4362 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4363 error( RtAudioError::THREAD_ERROR );
\r
4367 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4370 //-----------------------------------------------------------------------------
\r
4372 void RtApiWasapi::abortStream( void )
\r
4376 if ( stream_.state == STREAM_STOPPED ) {
\r
4377 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4378 error( RtAudioError::WARNING );
\r
4382 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4383 stream_.state = STREAM_STOPPING;
\r
4385 // wait until stream thread is stopped
\r
4386 while ( stream_.state != STREAM_STOPPED ) {
\r
4390 // stop capture client if applicable
\r
4391 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4392 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4393 if ( FAILED( hr ) ) {
\r
4394 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4395 error( RtAudioError::DRIVER_ERROR );
\r
4400 // stop render client if applicable
\r
4401 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4402 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4403 if ( FAILED( hr ) ) {
\r
4404 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4405 error( RtAudioError::DRIVER_ERROR );
\r
4410 // close thread handle
\r
4411 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4412 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4413 error( RtAudioError::THREAD_ERROR );
\r
4417 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4420 //-----------------------------------------------------------------------------
\r
4422 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4423 unsigned int firstChannel, unsigned int sampleRate,
\r
4424 RtAudioFormat format, unsigned int* bufferSize,
\r
4425 RtAudio::StreamOptions* options )
\r
4427 bool methodResult = FAILURE;
\r
4428 unsigned int captureDeviceCount = 0;
\r
4429 unsigned int renderDeviceCount = 0;
\r
4431 IMMDeviceCollection* captureDevices = NULL;
\r
4432 IMMDeviceCollection* renderDevices = NULL;
\r
4433 IMMDevice* devicePtr = NULL;
\r
4434 WAVEFORMATEX* deviceFormat = NULL;
\r
4435 unsigned int bufferBytes;
\r
4436 stream_.state = STREAM_STOPPED;
\r
4438 // create API Handle if not already created
\r
4439 if ( !stream_.apiHandle )
\r
4440 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4442 // Count capture devices
\r
4443 errorText_.clear();
\r
4444 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4445 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4446 if ( FAILED( hr ) ) {
\r
4447 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4451 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4452 if ( FAILED( hr ) ) {
\r
4453 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4457 // Count render devices
\r
4458 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4459 if ( FAILED( hr ) ) {
\r
4460 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4464 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4465 if ( FAILED( hr ) ) {
\r
4466 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4470 // validate device index
\r
4471 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4472 errorType = RtAudioError::INVALID_USE;
\r
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4477 // determine whether index falls within capture or render devices
\r
4478 if ( device >= renderDeviceCount ) {
\r
4479 if ( mode != INPUT ) {
\r
4480 errorType = RtAudioError::INVALID_USE;
\r
4481 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4485 // retrieve captureAudioClient from devicePtr
\r
4486 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4488 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4489 if ( FAILED( hr ) ) {
\r
4490 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4494 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4495 NULL, ( void** ) &captureAudioClient );
\r
4496 if ( FAILED( hr ) ) {
\r
4497 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4501 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4502 if ( FAILED( hr ) ) {
\r
4503 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4507 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4508 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4511 if ( mode != OUTPUT ) {
\r
4512 errorType = RtAudioError::INVALID_USE;
\r
4513 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4517 // retrieve renderAudioClient from devicePtr
\r
4518 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4520 hr = renderDevices->Item( device, &devicePtr );
\r
4521 if ( FAILED( hr ) ) {
\r
4522 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4526 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4527 NULL, ( void** ) &renderAudioClient );
\r
4528 if ( FAILED( hr ) ) {
\r
4529 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4533 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4534 if ( FAILED( hr ) ) {
\r
4535 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4539 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4540 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4543 // fill stream data
\r
4544 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4545 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4546 stream_.mode = DUPLEX;
\r
4549 stream_.mode = mode;
\r
4552 stream_.device[mode] = device;
\r
4553 stream_.doByteSwap[mode] = false;
\r
4554 stream_.sampleRate = sampleRate;
\r
4555 stream_.bufferSize = *bufferSize;
\r
4556 stream_.nBuffers = 1;
\r
4557 stream_.nUserChannels[mode] = channels;
\r
4558 stream_.channelOffset[mode] = firstChannel;
\r
4559 stream_.userFormat = format;
\r
4560 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4562 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4563 stream_.userInterleaved = false;
\r
4565 stream_.userInterleaved = true;
\r
4566 stream_.deviceInterleaved[mode] = true;
\r
4568 // Set flags for buffer conversion.
\r
4569 stream_.doConvertBuffer[mode] = false;
\r
4570 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4571 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4572 stream_.doConvertBuffer[mode] = true;
\r
4573 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4574 stream_.nUserChannels[mode] > 1 )
\r
4575 stream_.doConvertBuffer[mode] = true;
\r
4577 if ( stream_.doConvertBuffer[mode] )
\r
4578 setConvertInfo( mode, 0 );
\r
4580 // Allocate necessary internal buffers
\r
4581 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4583 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4584 if ( !stream_.userBuffer[mode] ) {
\r
4585 errorType = RtAudioError::MEMORY_ERROR;
\r
4586 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4590 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4591 stream_.callbackInfo.priority = 15;
\r
4593 stream_.callbackInfo.priority = 0;
\r
4595 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4596 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4598 methodResult = SUCCESS;
\r
4602 SAFE_RELEASE( captureDevices );
\r
4603 SAFE_RELEASE( renderDevices );
\r
4604 SAFE_RELEASE( devicePtr );
\r
4605 CoTaskMemFree( deviceFormat );
\r
4607 // if method failed, close the stream
\r
4608 if ( methodResult == FAILURE )
\r
4611 if ( !errorText_.empty() )
\r
4612 error( errorType );
\r
4613 return methodResult;
\r
4616 //=============================================================================
\r
4618 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4621 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4626 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4629 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4634 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4637 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4642 //-----------------------------------------------------------------------------
\r
4644 void RtApiWasapi::wasapiThread()
\r
4646 // as this is a new thread, we must CoInitialize it
\r
4647 CoInitialize( NULL );
\r
4651 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4652 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4653 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4654 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4655 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4656 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4658 WAVEFORMATEX* captureFormat = NULL;
\r
4659 WAVEFORMATEX* renderFormat = NULL;
\r
4660 float captureSrRatio = 0.0f;
\r
4661 float renderSrRatio = 0.0f;
\r
4662 WasapiBuffer captureBuffer;
\r
4663 WasapiBuffer renderBuffer;
\r
4665 // declare local stream variables
\r
4666 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4667 BYTE* streamBuffer = NULL;
\r
4668 unsigned long captureFlags = 0;
\r
4669 unsigned int bufferFrameCount = 0;
\r
4670 unsigned int numFramesPadding = 0;
\r
4671 unsigned int convBufferSize = 0;
\r
4672 bool callbackPushed = false;
\r
4673 bool callbackPulled = false;
\r
4674 bool callbackStopped = false;
\r
4675 int callbackResult = 0;
\r
4677 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4678 char* convBuffer = NULL;
\r
4679 unsigned int convBuffSize = 0;
\r
4680 unsigned int deviceBuffSize = 0;
\r
4682 errorText_.clear();
\r
4683 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4685 // Attempt to assign "Pro Audio" characteristic to thread
\r
4686 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4688 DWORD taskIndex = 0;
\r
4689 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4690 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4691 FreeLibrary( AvrtDll );
\r
4694 // start capture stream if applicable
\r
4695 if ( captureAudioClient ) {
\r
4696 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4697 if ( FAILED( hr ) ) {
\r
4698 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4702 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4704 // initialize capture stream according to desire buffer size
\r
4705 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4706 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4708 if ( !captureClient ) {
\r
4709 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4710 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4711 desiredBufferPeriod,
\r
4712 desiredBufferPeriod,
\r
4715 if ( FAILED( hr ) ) {
\r
4716 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4720 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4721 ( void** ) &captureClient );
\r
4722 if ( FAILED( hr ) ) {
\r
4723 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4727 // configure captureEvent to trigger on every available capture buffer
\r
4728 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4729 if ( !captureEvent ) {
\r
4730 errorType = RtAudioError::SYSTEM_ERROR;
\r
4731 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4735 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4736 if ( FAILED( hr ) ) {
\r
4737 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4741 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4742 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4745 unsigned int inBufferSize = 0;
\r
4746 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4747 if ( FAILED( hr ) ) {
\r
4748 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4752 // scale outBufferSize according to stream->user sample rate ratio
\r
4753 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4754 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4756 // set captureBuffer size
\r
4757 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4759 // reset the capture stream
\r
4760 hr = captureAudioClient->Reset();
\r
4761 if ( FAILED( hr ) ) {
\r
4762 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4766 // start the capture stream
\r
4767 hr = captureAudioClient->Start();
\r
4768 if ( FAILED( hr ) ) {
\r
4769 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4774 // start render stream if applicable
\r
4775 if ( renderAudioClient ) {
\r
4776 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4777 if ( FAILED( hr ) ) {
\r
4778 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4782 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4784 // initialize render stream according to desire buffer size
\r
4785 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4786 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4788 if ( !renderClient ) {
\r
4789 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4790 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4791 desiredBufferPeriod,
\r
4792 desiredBufferPeriod,
\r
4795 if ( FAILED( hr ) ) {
\r
4796 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4800 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4801 ( void** ) &renderClient );
\r
4802 if ( FAILED( hr ) ) {
\r
4803 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4807 // configure renderEvent to trigger on every available render buffer
\r
4808 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4809 if ( !renderEvent ) {
\r
4810 errorType = RtAudioError::SYSTEM_ERROR;
\r
4811 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4815 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4816 if ( FAILED( hr ) ) {
\r
4817 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4821 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4822 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4825 unsigned int outBufferSize = 0;
\r
4826 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4827 if ( FAILED( hr ) ) {
\r
4828 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4832 // scale inBufferSize according to user->stream sample rate ratio
\r
4833 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4834 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4836 // set renderBuffer size
\r
4837 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4839 // reset the render stream
\r
4840 hr = renderAudioClient->Reset();
\r
4841 if ( FAILED( hr ) ) {
\r
4842 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4846 // start the render stream
\r
4847 hr = renderAudioClient->Start();
\r
4848 if ( FAILED( hr ) ) {
\r
4849 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4854 if ( stream_.mode == INPUT ) {
\r
4855 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4856 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4858 else if ( stream_.mode == OUTPUT ) {
\r
4859 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4860 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4862 else if ( stream_.mode == DUPLEX ) {
\r
4863 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4864 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4865 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4866 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4869 convBuffer = ( char* ) malloc( convBuffSize );
\r
4870 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4871 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4872 errorType = RtAudioError::MEMORY_ERROR;
\r
4873 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4877 // stream process loop
\r
4878 while ( stream_.state != STREAM_STOPPING ) {
\r
4879 if ( !callbackPulled ) {
\r
4882 // 1. Pull callback buffer from inputBuffer
\r
4883 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4884 // Convert callback buffer to user format
\r
4886 if ( captureAudioClient ) {
\r
4887 // Pull callback buffer from inputBuffer
\r
4888 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4889 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4890 stream_.deviceFormat[INPUT] );
\r
4892 if ( callbackPulled ) {
\r
4893 // Convert callback buffer to user sample rate
\r
4894 convertBufferWasapi( stream_.deviceBuffer,
\r
4896 stream_.nDeviceChannels[INPUT],
\r
4897 captureFormat->nSamplesPerSec,
\r
4898 stream_.sampleRate,
\r
4899 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4901 stream_.deviceFormat[INPUT] );
\r
4903 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4904 // Convert callback buffer to user format
\r
4905 convertBuffer( stream_.userBuffer[INPUT],
\r
4906 stream_.deviceBuffer,
\r
4907 stream_.convertInfo[INPUT] );
\r
4910 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4911 memcpy( stream_.userBuffer[INPUT],
\r
4912 stream_.deviceBuffer,
\r
4913 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4918 // if there is no capture stream, set callbackPulled flag
\r
4919 callbackPulled = true;
\r
4922 // Execute Callback
\r
4923 // ================
\r
4924 // 1. Execute user callback method
\r
4925 // 2. Handle return value from callback
\r
4927 // if callback has not requested the stream to stop
\r
4928 if ( callbackPulled && !callbackStopped ) {
\r
4929 // Execute user callback method
\r
4930 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4931 stream_.userBuffer[INPUT],
\r
4932 stream_.bufferSize,
\r
4934 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4935 stream_.callbackInfo.userData );
\r
4937 // Handle return value from callback
\r
4938 if ( callbackResult == 1 ) {
\r
4939 // instantiate a thread to stop this thread
\r
4940 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4941 if ( !threadHandle ) {
\r
4942 errorType = RtAudioError::THREAD_ERROR;
\r
4943 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4946 else if ( !CloseHandle( threadHandle ) ) {
\r
4947 errorType = RtAudioError::THREAD_ERROR;
\r
4948 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4952 callbackStopped = true;
\r
4954 else if ( callbackResult == 2 ) {
\r
4955 // instantiate a thread to stop this thread
\r
4956 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4957 if ( !threadHandle ) {
\r
4958 errorType = RtAudioError::THREAD_ERROR;
\r
4959 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4962 else if ( !CloseHandle( threadHandle ) ) {
\r
4963 errorType = RtAudioError::THREAD_ERROR;
\r
4964 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4968 callbackStopped = true;
\r
4973 // Callback Output
\r
4974 // ===============
\r
4975 // 1. Convert callback buffer to stream format
\r
4976 // 2. Convert callback buffer to stream sample rate and channel count
\r
4977 // 3. Push callback buffer into outputBuffer
\r
4979 if ( renderAudioClient && callbackPulled ) {
\r
4980 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4981 // Convert callback buffer to stream format
\r
4982 convertBuffer( stream_.deviceBuffer,
\r
4983 stream_.userBuffer[OUTPUT],
\r
4984 stream_.convertInfo[OUTPUT] );
\r
4988 // Convert callback buffer to stream sample rate
\r
4989 convertBufferWasapi( convBuffer,
\r
4990 stream_.deviceBuffer,
\r
4991 stream_.nDeviceChannels[OUTPUT],
\r
4992 stream_.sampleRate,
\r
4993 renderFormat->nSamplesPerSec,
\r
4994 stream_.bufferSize,
\r
4996 stream_.deviceFormat[OUTPUT] );
\r
4998 // Push callback buffer into outputBuffer
\r
4999 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5000 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5001 stream_.deviceFormat[OUTPUT] );
\r
5004 // if there is no render stream, set callbackPushed flag
\r
5005 callbackPushed = true;
\r
5010 // 1. Get capture buffer from stream
\r
5011 // 2. Push capture buffer into inputBuffer
\r
5012 // 3. If 2. was successful: Release capture buffer
\r
5014 if ( captureAudioClient ) {
\r
5015 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5016 if ( !callbackPulled ) {
\r
5017 WaitForSingleObject( captureEvent, INFINITE );
\r
5020 // Get capture buffer from stream
\r
5021 hr = captureClient->GetBuffer( &streamBuffer,
\r
5022 &bufferFrameCount,
\r
5023 &captureFlags, NULL, NULL );
\r
5024 if ( FAILED( hr ) ) {
\r
5025 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5029 if ( bufferFrameCount != 0 ) {
\r
5030 // Push capture buffer into inputBuffer
\r
5031 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5032 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5033 stream_.deviceFormat[INPUT] ) )
\r
5035 // Release capture buffer
\r
5036 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5037 if ( FAILED( hr ) ) {
\r
5038 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5044 // Inform WASAPI that capture was unsuccessful
\r
5045 hr = captureClient->ReleaseBuffer( 0 );
\r
5046 if ( FAILED( hr ) ) {
\r
5047 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5054 // Inform WASAPI that capture was unsuccessful
\r
5055 hr = captureClient->ReleaseBuffer( 0 );
\r
5056 if ( FAILED( hr ) ) {
\r
5057 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5065 // 1. Get render buffer from stream
\r
5066 // 2. Pull next buffer from outputBuffer
\r
5067 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5068 // Release render buffer
\r
5070 if ( renderAudioClient ) {
\r
5071 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5072 if ( callbackPulled && !callbackPushed ) {
\r
5073 WaitForSingleObject( renderEvent, INFINITE );
\r
5076 // Get render buffer from stream
\r
5077 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5078 if ( FAILED( hr ) ) {
\r
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5083 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5084 if ( FAILED( hr ) ) {
\r
5085 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5089 bufferFrameCount -= numFramesPadding;
\r
5091 if ( bufferFrameCount != 0 ) {
\r
5092 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5093 if ( FAILED( hr ) ) {
\r
5094 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5098 // Pull next buffer from outputBuffer
\r
5099 // Fill render buffer with next buffer
\r
5100 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5101 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5102 stream_.deviceFormat[OUTPUT] ) )
\r
5104 // Release render buffer
\r
5105 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5106 if ( FAILED( hr ) ) {
\r
5107 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5113 // Inform WASAPI that render was unsuccessful
\r
5114 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5115 if ( FAILED( hr ) ) {
\r
5116 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5123 // Inform WASAPI that render was unsuccessful
\r
5124 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5125 if ( FAILED( hr ) ) {
\r
5126 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5132 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5133 if ( callbackPushed ) {
\r
5134 callbackPulled = false;
\r
5137 // tick stream time
\r
5138 RtApi::tickStreamTime();
\r
5143 CoTaskMemFree( captureFormat );
\r
5144 CoTaskMemFree( renderFormat );
\r
5146 free ( convBuffer );
\r
5150 // update stream state
\r
5151 stream_.state = STREAM_STOPPED;
\r
5153 if ( errorText_.empty() )
\r
5156 error( errorType );
\r
5159 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5163 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5165 // Modified by Robin Davies, October 2005
\r
5166 // - Improvements to DirectX pointer chasing.
\r
5167 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5168 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5169 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5170 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5172 #include <dsound.h>
\r
5173 #include <assert.h>
\r
5174 #include <algorithm>
\r
5176 #if defined(__MINGW32__)
\r
5177 // missing from latest mingw winapi
\r
5178 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5179 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5180 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5181 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5184 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5186 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5187 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5190 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5192 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5193 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5194 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5195 return pointer >= earlierPointer && pointer < laterPointer;
\r
5198 // A structure to hold various information related to the DirectSound
\r
5199 // API implementation.
\r
5201 unsigned int drainCounter; // Tracks callback counts when draining
\r
5202 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5206 UINT bufferPointer[2];
\r
5207 DWORD dsBufferSize[2];
\r
5208 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5212 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5215 // Declarations for utility functions, callbacks, and structures
\r
5216 // specific to the DirectSound implementation.
\r
5217 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5218 LPCTSTR description,
\r
5220 LPVOID lpContext );
\r
5222 static const char* getErrorString( int code );
\r
5224 static unsigned __stdcall callbackHandler( void *ptr );
\r
5233 : found(false) { validId[0] = false; validId[1] = false; }
\r
5236 struct DsProbeData {
\r
5238 std::vector<struct DsDevice>* dsDevices;
\r
5241 RtApiDs :: RtApiDs()
\r
5243 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5244 // accept whatever the mainline chose for a threading model.
\r
5245 coInitialized_ = false;
\r
5246 HRESULT hr = CoInitialize( NULL );
\r
5247 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5250 RtApiDs :: ~RtApiDs()
\r
5252 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5253 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5256 // The DirectSound default output is always the first device.
\r
5257 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5262 // The DirectSound default input is always the first input device,
\r
5263 // which is the first capture device enumerated.
\r
5264 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5269 unsigned int RtApiDs :: getDeviceCount( void )
\r
5271 // Set query flag for previously found devices to false, so that we
\r
5272 // can check for any devices that have disappeared.
\r
5273 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5274 dsDevices[i].found = false;
\r
5276 // Query DirectSound devices.
\r
5277 struct DsProbeData probeInfo;
\r
5278 probeInfo.isInput = false;
\r
5279 probeInfo.dsDevices = &dsDevices;
\r
5280 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5281 if ( FAILED( result ) ) {
\r
5282 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5283 errorText_ = errorStream_.str();
\r
5284 error( RtAudioError::WARNING );
\r
5287 // Query DirectSoundCapture devices.
\r
5288 probeInfo.isInput = true;
\r
5289 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5290 if ( FAILED( result ) ) {
\r
5291 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5292 errorText_ = errorStream_.str();
\r
5293 error( RtAudioError::WARNING );
\r
5296 // Clean out any devices that may have disappeared.
\r
5297 std::vector< int > indices;
\r
5298 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5299 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5300 //unsigned int nErased = 0;
\r
5301 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5302 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5303 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5305 return static_cast<unsigned int>(dsDevices.size());
\r
5308 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5310 RtAudio::DeviceInfo info;
\r
5311 info.probed = false;
\r
5313 if ( dsDevices.size() == 0 ) {
\r
5314 // Force a query of all devices
\r
5316 if ( dsDevices.size() == 0 ) {
\r
5317 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5318 error( RtAudioError::INVALID_USE );
\r
5323 if ( device >= dsDevices.size() ) {
\r
5324 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5325 error( RtAudioError::INVALID_USE );
\r
5330 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5332 LPDIRECTSOUND output;
\r
5334 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5335 if ( FAILED( result ) ) {
\r
5336 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5337 errorText_ = errorStream_.str();
\r
5338 error( RtAudioError::WARNING );
\r
5342 outCaps.dwSize = sizeof( outCaps );
\r
5343 result = output->GetCaps( &outCaps );
\r
5344 if ( FAILED( result ) ) {
\r
5345 output->Release();
\r
5346 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5347 errorText_ = errorStream_.str();
\r
5348 error( RtAudioError::WARNING );
\r
5352 // Get output channel information.
\r
5353 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5355 // Get sample rate information.
\r
5356 info.sampleRates.clear();
\r
5357 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5358 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5359 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5360 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5362 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5363 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5367 // Get format information.
\r
5368 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5369 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5371 output->Release();
\r
5373 if ( getDefaultOutputDevice() == device )
\r
5374 info.isDefaultOutput = true;
\r
5376 if ( dsDevices[ device ].validId[1] == false ) {
\r
5377 info.name = dsDevices[ device ].name;
\r
5378 info.probed = true;
\r
5384 LPDIRECTSOUNDCAPTURE input;
\r
5385 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5386 if ( FAILED( result ) ) {
\r
5387 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5388 errorText_ = errorStream_.str();
\r
5389 error( RtAudioError::WARNING );
\r
5394 inCaps.dwSize = sizeof( inCaps );
\r
5395 result = input->GetCaps( &inCaps );
\r
5396 if ( FAILED( result ) ) {
\r
5398 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5399 errorText_ = errorStream_.str();
\r
5400 error( RtAudioError::WARNING );
\r
5404 // Get input channel information.
\r
5405 info.inputChannels = inCaps.dwChannels;
\r
5407 // Get sample rate and format information.
\r
5408 std::vector<unsigned int> rates;
\r
5409 if ( inCaps.dwChannels >= 2 ) {
\r
5410 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5411 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5412 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5413 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5414 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5415 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5416 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5417 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5419 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5420 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5421 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5422 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5423 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5425 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5426 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5427 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5428 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5429 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5432 else if ( inCaps.dwChannels == 1 ) {
\r
5433 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5434 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5442 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5443 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5444 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5446 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5448 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5449 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5455 else info.inputChannels = 0; // technically, this would be an error
\r
5459 if ( info.inputChannels == 0 ) return info;
\r
5461 // Copy the supported rates to the info structure but avoid duplication.
\r
5463 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5465 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5466 if ( rates[i] == info.sampleRates[j] ) {
\r
5471 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5473 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5475 // If device opens for both playback and capture, we determine the channels.
\r
5476 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5477 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5479 if ( device == 0 ) info.isDefaultInput = true;
\r
5481 // Copy name and return.
\r
5482 info.name = dsDevices[ device ].name;
\r
5483 info.probed = true;
\r
5487 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5488 unsigned int firstChannel, unsigned int sampleRate,
\r
5489 RtAudioFormat format, unsigned int *bufferSize,
\r
5490 RtAudio::StreamOptions *options )
\r
5492 if ( channels + firstChannel > 2 ) {
\r
5493 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5497 size_t nDevices = dsDevices.size();
\r
5498 if ( nDevices == 0 ) {
\r
5499 // This should not happen because a check is made before this function is called.
\r
5500 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5504 if ( device >= nDevices ) {
\r
5505 // This should not happen because a check is made before this function is called.
\r
5506 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5510 if ( mode == OUTPUT ) {
\r
5511 if ( dsDevices[ device ].validId[0] == false ) {
\r
5512 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5513 errorText_ = errorStream_.str();
\r
5517 else { // mode == INPUT
\r
5518 if ( dsDevices[ device ].validId[1] == false ) {
\r
5519 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5520 errorText_ = errorStream_.str();
\r
5525 // According to a note in PortAudio, using GetDesktopWindow()
\r
5526 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5527 // that occur when the application's window is not the foreground
\r
5528 // window. Also, if the application window closes before the
\r
5529 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5530 // problems when using GetDesktopWindow() but it seems fine now
\r
5531 // (January 2010). I'll leave it commented here.
\r
5532 // HWND hWnd = GetForegroundWindow();
\r
5533 HWND hWnd = GetDesktopWindow();
\r
5535 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5536 // two. This is a judgement call and a value of two is probably too
\r
5537 // low for capture, but it should work for playback.
\r
5539 if ( options ) nBuffers = options->numberOfBuffers;
\r
5540 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5541 if ( nBuffers < 2 ) nBuffers = 3;
\r
5543 // Check the lower range of the user-specified buffer size and set
\r
5544 // (arbitrarily) to a lower bound of 32.
\r
5545 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5547 // Create the wave format structure. The data format setting will
\r
5548 // be determined later.
\r
5549 WAVEFORMATEX waveFormat;
\r
5550 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5551 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5552 waveFormat.nChannels = channels + firstChannel;
\r
5553 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5555 // Determine the device buffer size. By default, we'll use the value
\r
5556 // defined above (32K), but we will grow it to make allowances for
\r
5557 // very large software buffer sizes.
\r
5558 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5559 DWORD dsPointerLeadTime = 0;
\r
5561 void *ohandle = 0, *bhandle = 0;
\r
5563 if ( mode == OUTPUT ) {
\r
5565 LPDIRECTSOUND output;
\r
5566 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5567 if ( FAILED( result ) ) {
\r
5568 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5569 errorText_ = errorStream_.str();
\r
5574 outCaps.dwSize = sizeof( outCaps );
\r
5575 result = output->GetCaps( &outCaps );
\r
5576 if ( FAILED( result ) ) {
\r
5577 output->Release();
\r
5578 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5579 errorText_ = errorStream_.str();
\r
5583 // Check channel information.
\r
5584 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5585 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5586 errorText_ = errorStream_.str();
\r
5590 // Check format information. Use 16-bit format unless not
\r
5591 // supported or user requests 8-bit.
\r
5592 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5593 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5594 waveFormat.wBitsPerSample = 16;
\r
5595 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5598 waveFormat.wBitsPerSample = 8;
\r
5599 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5601 stream_.userFormat = format;
\r
5603 // Update wave format structure and buffer information.
\r
5604 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5605 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5606 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5608 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5609 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5610 dsBufferSize *= 2;
\r
5612 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5613 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5614 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5615 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5616 if ( FAILED( result ) ) {
\r
5617 output->Release();
\r
5618 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5619 errorText_ = errorStream_.str();
\r
5623 // Even though we will write to the secondary buffer, we need to
\r
5624 // access the primary buffer to set the correct output format
\r
5625 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5626 // buffer description.
\r
5627 DSBUFFERDESC bufferDescription;
\r
5628 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5629 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5630 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5632 // Obtain the primary buffer
\r
5633 LPDIRECTSOUNDBUFFER buffer;
\r
5634 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5635 if ( FAILED( result ) ) {
\r
5636 output->Release();
\r
5637 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5638 errorText_ = errorStream_.str();
\r
5642 // Set the primary DS buffer sound format.
\r
5643 result = buffer->SetFormat( &waveFormat );
\r
5644 if ( FAILED( result ) ) {
\r
5645 output->Release();
\r
5646 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5647 errorText_ = errorStream_.str();
\r
5651 // Setup the secondary DS buffer description.
\r
5652 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5653 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5654 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5655 DSBCAPS_GLOBALFOCUS |
\r
5656 DSBCAPS_GETCURRENTPOSITION2 |
\r
5657 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5658 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5659 bufferDescription.lpwfxFormat = &waveFormat;
\r
5661 // Try to create the secondary DS buffer. If that doesn't work,
\r
5662 // try to use software mixing. Otherwise, there's a problem.
\r
5663 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5664 if ( FAILED( result ) ) {
\r
5665 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5666 DSBCAPS_GLOBALFOCUS |
\r
5667 DSBCAPS_GETCURRENTPOSITION2 |
\r
5668 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5669 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5670 if ( FAILED( result ) ) {
\r
5671 output->Release();
\r
5672 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5673 errorText_ = errorStream_.str();
\r
5678 // Get the buffer size ... might be different from what we specified.
\r
5680 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5681 result = buffer->GetCaps( &dsbcaps );
\r
5682 if ( FAILED( result ) ) {
\r
5683 output->Release();
\r
5684 buffer->Release();
\r
5685 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5686 errorText_ = errorStream_.str();
\r
5690 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5692 // Lock the DS buffer
\r
5695 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5696 if ( FAILED( result ) ) {
\r
5697 output->Release();
\r
5698 buffer->Release();
\r
5699 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5700 errorText_ = errorStream_.str();
\r
5704 // Zero the DS buffer
\r
5705 ZeroMemory( audioPtr, dataLen );
\r
5707 // Unlock the DS buffer
\r
5708 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5709 if ( FAILED( result ) ) {
\r
5710 output->Release();
\r
5711 buffer->Release();
\r
5712 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5713 errorText_ = errorStream_.str();
\r
5717 ohandle = (void *) output;
\r
5718 bhandle = (void *) buffer;
\r
5721 if ( mode == INPUT ) {
\r
5723 LPDIRECTSOUNDCAPTURE input;
\r
5724 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5725 if ( FAILED( result ) ) {
\r
5726 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5727 errorText_ = errorStream_.str();
\r
5732 inCaps.dwSize = sizeof( inCaps );
\r
5733 result = input->GetCaps( &inCaps );
\r
5734 if ( FAILED( result ) ) {
\r
5736 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5737 errorText_ = errorStream_.str();
\r
5741 // Check channel information.
\r
5742 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5743 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5747 // Check format information. Use 16-bit format unless user
\r
5748 // requests 8-bit.
\r
5749 DWORD deviceFormats;
\r
5750 if ( channels + firstChannel == 2 ) {
\r
5751 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5752 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5753 waveFormat.wBitsPerSample = 8;
\r
5754 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5756 else { // assume 16-bit is supported
\r
5757 waveFormat.wBitsPerSample = 16;
\r
5758 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5761 else { // channel == 1
\r
5762 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5763 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5764 waveFormat.wBitsPerSample = 8;
\r
5765 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5767 else { // assume 16-bit is supported
\r
5768 waveFormat.wBitsPerSample = 16;
\r
5769 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5772 stream_.userFormat = format;
\r
5774 // Update wave format structure and buffer information.
\r
5775 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5776 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5777 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5779 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5780 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5781 dsBufferSize *= 2;
\r
5783 // Setup the secondary DS buffer description.
\r
5784 DSCBUFFERDESC bufferDescription;
\r
5785 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5786 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5787 bufferDescription.dwFlags = 0;
\r
5788 bufferDescription.dwReserved = 0;
\r
5789 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5790 bufferDescription.lpwfxFormat = &waveFormat;
\r
5792 // Create the capture buffer.
\r
5793 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5794 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5795 if ( FAILED( result ) ) {
\r
5797 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5798 errorText_ = errorStream_.str();
\r
5802 // Get the buffer size ... might be different from what we specified.
\r
5803 DSCBCAPS dscbcaps;
\r
5804 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5805 result = buffer->GetCaps( &dscbcaps );
\r
5806 if ( FAILED( result ) ) {
\r
5808 buffer->Release();
\r
5809 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5810 errorText_ = errorStream_.str();
\r
5814 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5816 // NOTE: We could have a problem here if this is a duplex stream
\r
5817 // and the play and capture hardware buffer sizes are different
\r
5818 // (I'm actually not sure if that is a problem or not).
\r
5819 // Currently, we are not verifying that.
\r
5821 // Lock the capture buffer
\r
5824 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5825 if ( FAILED( result ) ) {
\r
5827 buffer->Release();
\r
5828 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5829 errorText_ = errorStream_.str();
\r
5833 // Zero the buffer
\r
5834 ZeroMemory( audioPtr, dataLen );
\r
5836 // Unlock the buffer
\r
5837 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5838 if ( FAILED( result ) ) {
\r
5840 buffer->Release();
\r
5841 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5842 errorText_ = errorStream_.str();
\r
5846 ohandle = (void *) input;
\r
5847 bhandle = (void *) buffer;
\r
5850 // Set various stream parameters
\r
5851 DsHandle *handle = 0;
\r
5852 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5853 stream_.nUserChannels[mode] = channels;
\r
5854 stream_.bufferSize = *bufferSize;
\r
5855 stream_.channelOffset[mode] = firstChannel;
\r
5856 stream_.deviceInterleaved[mode] = true;
\r
5857 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5858 else stream_.userInterleaved = true;
\r
5860 // Set flag for buffer conversion
\r
5861 stream_.doConvertBuffer[mode] = false;
\r
5862 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5863 stream_.doConvertBuffer[mode] = true;
\r
5864 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5865 stream_.doConvertBuffer[mode] = true;
\r
5866 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5867 stream_.nUserChannels[mode] > 1 )
\r
5868 stream_.doConvertBuffer[mode] = true;
\r
5870 // Allocate necessary internal buffers
\r
5871 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5872 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5873 if ( stream_.userBuffer[mode] == NULL ) {
\r
5874 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5878 if ( stream_.doConvertBuffer[mode] ) {
\r
5880 bool makeBuffer = true;
\r
5881 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5882 if ( mode == INPUT ) {
\r
5883 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5884 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5885 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5889 if ( makeBuffer ) {
\r
5890 bufferBytes *= *bufferSize;
\r
5891 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5892 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5893 if ( stream_.deviceBuffer == NULL ) {
\r
5894 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5900 // Allocate our DsHandle structures for the stream.
\r
5901 if ( stream_.apiHandle == 0 ) {
\r
5903 handle = new DsHandle;
\r
5905 catch ( std::bad_alloc& ) {
\r
5906 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5910 // Create a manual-reset event.
\r
5911 handle->condition = CreateEvent( NULL, // no security
\r
5912 TRUE, // manual-reset
\r
5913 FALSE, // non-signaled initially
\r
5914 NULL ); // unnamed
\r
5915 stream_.apiHandle = (void *) handle;
\r
5918 handle = (DsHandle *) stream_.apiHandle;
\r
5919 handle->id[mode] = ohandle;
\r
5920 handle->buffer[mode] = bhandle;
\r
5921 handle->dsBufferSize[mode] = dsBufferSize;
\r
5922 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5924 stream_.device[mode] = device;
\r
5925 stream_.state = STREAM_STOPPED;
\r
5926 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5927 // We had already set up an output stream.
\r
5928 stream_.mode = DUPLEX;
\r
5930 stream_.mode = mode;
\r
5931 stream_.nBuffers = nBuffers;
\r
5932 stream_.sampleRate = sampleRate;
\r
5934 // Setup the buffer conversion information structure.
\r
5935 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5937 // Setup the callback thread.
\r
5938 if ( stream_.callbackInfo.isRunning == false ) {
\r
5939 unsigned threadId;
\r
5940 stream_.callbackInfo.isRunning = true;
\r
5941 stream_.callbackInfo.object = (void *) this;
\r
5942 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5943 &stream_.callbackInfo, 0, &threadId );
\r
5944 if ( stream_.callbackInfo.thread == 0 ) {
\r
5945 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5949 // Boost DS thread priority
\r
5950 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5956 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5957 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5958 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5959 if ( buffer ) buffer->Release();
\r
5960 object->Release();
\r
5962 if ( handle->buffer[1] ) {
\r
5963 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5964 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5965 if ( buffer ) buffer->Release();
\r
5966 object->Release();
\r
5968 CloseHandle( handle->condition );
\r
5970 stream_.apiHandle = 0;
\r
5973 for ( int i=0; i<2; i++ ) {
\r
5974 if ( stream_.userBuffer[i] ) {
\r
5975 free( stream_.userBuffer[i] );
\r
5976 stream_.userBuffer[i] = 0;
\r
5980 if ( stream_.deviceBuffer ) {
\r
5981 free( stream_.deviceBuffer );
\r
5982 stream_.deviceBuffer = 0;
\r
5985 stream_.state = STREAM_CLOSED;
\r
5989 void RtApiDs :: closeStream()
\r
5991 if ( stream_.state == STREAM_CLOSED ) {
\r
5992 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5993 error( RtAudioError::WARNING );
\r
5997 // Stop the callback thread.
\r
5998 stream_.callbackInfo.isRunning = false;
\r
5999 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6000 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6002 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6004 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6005 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6006 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6009 buffer->Release();
\r
6011 object->Release();
\r
6013 if ( handle->buffer[1] ) {
\r
6014 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6015 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6018 buffer->Release();
\r
6020 object->Release();
\r
6022 CloseHandle( handle->condition );
\r
6024 stream_.apiHandle = 0;
\r
6027 for ( int i=0; i<2; i++ ) {
\r
6028 if ( stream_.userBuffer[i] ) {
\r
6029 free( stream_.userBuffer[i] );
\r
6030 stream_.userBuffer[i] = 0;
\r
6034 if ( stream_.deviceBuffer ) {
\r
6035 free( stream_.deviceBuffer );
\r
6036 stream_.deviceBuffer = 0;
\r
6039 stream_.mode = UNINITIALIZED;
\r
6040 stream_.state = STREAM_CLOSED;
\r
6043 void RtApiDs :: startStream()
\r
6046 if ( stream_.state == STREAM_RUNNING ) {
\r
6047 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6048 error( RtAudioError::WARNING );
\r
6052 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6054 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6055 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6056 // this is already in effect.
\r
6057 timeBeginPeriod( 1 );
\r
6059 buffersRolling = false;
\r
6060 duplexPrerollBytes = 0;
\r
6062 if ( stream_.mode == DUPLEX ) {
\r
6063 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6064 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6067 HRESULT result = 0;
\r
6068 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6070 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6071 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6072 if ( FAILED( result ) ) {
\r
6073 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6074 errorText_ = errorStream_.str();
\r
6079 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6081 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6082 result = buffer->Start( DSCBSTART_LOOPING );
\r
6083 if ( FAILED( result ) ) {
\r
6084 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6085 errorText_ = errorStream_.str();
\r
6090 handle->drainCounter = 0;
\r
6091 handle->internalDrain = false;
\r
6092 ResetEvent( handle->condition );
\r
6093 stream_.state = STREAM_RUNNING;
\r
6096 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6099 void RtApiDs :: stopStream()
\r
6102 if ( stream_.state == STREAM_STOPPED ) {
\r
6103 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6104 error( RtAudioError::WARNING );
\r
6108 HRESULT result = 0;
\r
6111 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6112 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6113 if ( handle->drainCounter == 0 ) {
\r
6114 handle->drainCounter = 2;
\r
6115 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6118 stream_.state = STREAM_STOPPED;
\r
6120 MUTEX_LOCK( &stream_.mutex );
\r
6122 // Stop the buffer and clear memory
\r
6123 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6124 result = buffer->Stop();
\r
6125 if ( FAILED( result ) ) {
\r
6126 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6127 errorText_ = errorStream_.str();
\r
6131 // Lock the buffer and clear it so that if we start to play again,
\r
6132 // we won't have old data playing.
\r
6133 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6134 if ( FAILED( result ) ) {
\r
6135 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6136 errorText_ = errorStream_.str();
\r
6140 // Zero the DS buffer
\r
6141 ZeroMemory( audioPtr, dataLen );
\r
6143 // Unlock the DS buffer
\r
6144 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6145 if ( FAILED( result ) ) {
\r
6146 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6147 errorText_ = errorStream_.str();
\r
6151 // If we start playing again, we must begin at beginning of buffer.
\r
6152 handle->bufferPointer[0] = 0;
\r
6155 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6156 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6160 stream_.state = STREAM_STOPPED;
\r
6162 if ( stream_.mode != DUPLEX )
\r
6163 MUTEX_LOCK( &stream_.mutex );
\r
6165 result = buffer->Stop();
\r
6166 if ( FAILED( result ) ) {
\r
6167 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6168 errorText_ = errorStream_.str();
\r
6172 // Lock the buffer and clear it so that if we start to play again,
\r
6173 // we won't have old data playing.
\r
6174 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6175 if ( FAILED( result ) ) {
\r
6176 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6177 errorText_ = errorStream_.str();
\r
6181 // Zero the DS buffer
\r
6182 ZeroMemory( audioPtr, dataLen );
\r
6184 // Unlock the DS buffer
\r
6185 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6186 if ( FAILED( result ) ) {
\r
6187 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6188 errorText_ = errorStream_.str();
\r
6192 // If we start recording again, we must begin at beginning of buffer.
\r
6193 handle->bufferPointer[1] = 0;
\r
6197 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6198 MUTEX_UNLOCK( &stream_.mutex );
\r
6200 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6203 void RtApiDs :: abortStream()
\r
6206 if ( stream_.state == STREAM_STOPPED ) {
\r
6207 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6208 error( RtAudioError::WARNING );
\r
6212 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6213 handle->drainCounter = 2;
\r
6218 void RtApiDs :: callbackEvent()
\r
6220 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6221 Sleep( 50 ); // sleep 50 milliseconds
\r
6225 if ( stream_.state == STREAM_CLOSED ) {
\r
6226 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6227 error( RtAudioError::WARNING );
\r
6231 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6232 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6234 // Check if we were draining the stream and signal is finished.
\r
6235 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6237 stream_.state = STREAM_STOPPING;
\r
6238 if ( handle->internalDrain == false )
\r
6239 SetEvent( handle->condition );
\r
6245 // Invoke user callback to get fresh output data UNLESS we are
\r
6246 // draining stream.
\r
6247 if ( handle->drainCounter == 0 ) {
\r
6248 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6249 double streamTime = getStreamTime();
\r
6250 RtAudioStreamStatus status = 0;
\r
6251 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6252 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6253 handle->xrun[0] = false;
\r
6255 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6256 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6257 handle->xrun[1] = false;
\r
6259 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6260 stream_.bufferSize, streamTime, status, info->userData );
\r
6261 if ( cbReturnValue == 2 ) {
\r
6262 stream_.state = STREAM_STOPPING;
\r
6263 handle->drainCounter = 2;
\r
6267 else if ( cbReturnValue == 1 ) {
\r
6268 handle->drainCounter = 1;
\r
6269 handle->internalDrain = true;
\r
6274 DWORD currentWritePointer, safeWritePointer;
\r
6275 DWORD currentReadPointer, safeReadPointer;
\r
6276 UINT nextWritePointer;
\r
6278 LPVOID buffer1 = NULL;
\r
6279 LPVOID buffer2 = NULL;
\r
6280 DWORD bufferSize1 = 0;
\r
6281 DWORD bufferSize2 = 0;
\r
6286 MUTEX_LOCK( &stream_.mutex );
\r
6287 if ( stream_.state == STREAM_STOPPED ) {
\r
6288 MUTEX_UNLOCK( &stream_.mutex );
\r
6292 if ( buffersRolling == false ) {
\r
6293 if ( stream_.mode == DUPLEX ) {
\r
6294 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6296 // It takes a while for the devices to get rolling. As a result,
\r
6297 // there's no guarantee that the capture and write device pointers
\r
6298 // will move in lockstep. Wait here for both devices to start
\r
6299 // rolling, and then set our buffer pointers accordingly.
\r
6300 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6301 // bytes later than the write buffer.
\r
6303 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6304 // take place between the two GetCurrentPosition calls... but I'm
\r
6305 // really not sure how to solve the problem. Temporarily boost to
\r
6306 // Realtime priority, maybe; but I'm not sure what priority the
\r
6307 // DirectSound service threads run at. We *should* be roughly
\r
6308 // within a ms or so of correct.
\r
6310 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6311 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6313 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6315 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6316 if ( FAILED( result ) ) {
\r
6317 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6318 errorText_ = errorStream_.str();
\r
6319 error( RtAudioError::SYSTEM_ERROR );
\r
6322 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6323 if ( FAILED( result ) ) {
\r
6324 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6325 errorText_ = errorStream_.str();
\r
6326 error( RtAudioError::SYSTEM_ERROR );
\r
6330 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6331 if ( FAILED( result ) ) {
\r
6332 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6333 errorText_ = errorStream_.str();
\r
6334 error( RtAudioError::SYSTEM_ERROR );
\r
6337 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6338 if ( FAILED( result ) ) {
\r
6339 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6340 errorText_ = errorStream_.str();
\r
6341 error( RtAudioError::SYSTEM_ERROR );
\r
6344 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6348 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6350 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6351 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6352 handle->bufferPointer[1] = safeReadPointer;
\r
6354 else if ( stream_.mode == OUTPUT ) {
\r
6356 // Set the proper nextWritePosition after initial startup.
\r
6357 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6358 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6359 if ( FAILED( result ) ) {
\r
6360 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6361 errorText_ = errorStream_.str();
\r
6362 error( RtAudioError::SYSTEM_ERROR );
\r
6365 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6366 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6369 buffersRolling = true;
\r
6372 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6374 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6376 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6377 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6378 bufferBytes *= formatBytes( stream_.userFormat );
\r
6379 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6382 // Setup parameters and do buffer conversion if necessary.
\r
6383 if ( stream_.doConvertBuffer[0] ) {
\r
6384 buffer = stream_.deviceBuffer;
\r
6385 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6386 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6387 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6390 buffer = stream_.userBuffer[0];
\r
6391 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6392 bufferBytes *= formatBytes( stream_.userFormat );
\r
6395 // No byte swapping necessary in DirectSound implementation.
\r
6397 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6398 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6400 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6401 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6403 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6404 nextWritePointer = handle->bufferPointer[0];
\r
6406 DWORD endWrite, leadPointer;
\r
6408 // Find out where the read and "safe write" pointers are.
\r
6409 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6410 if ( FAILED( result ) ) {
\r
6411 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6412 errorText_ = errorStream_.str();
\r
6413 error( RtAudioError::SYSTEM_ERROR );
\r
6417 // We will copy our output buffer into the region between
\r
6418 // safeWritePointer and leadPointer. If leadPointer is not
\r
6419 // beyond the next endWrite position, wait until it is.
\r
6420 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6421 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6422 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6423 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6424 endWrite = nextWritePointer + bufferBytes;
\r
6426 // Check whether the entire write region is behind the play pointer.
\r
6427 if ( leadPointer >= endWrite ) break;
\r
6429 // If we are here, then we must wait until the leadPointer advances
\r
6430 // beyond the end of our next write region. We use the
\r
6431 // Sleep() function to suspend operation until that happens.
\r
6432 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6433 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6434 if ( millis < 1.0 ) millis = 1.0;
\r
6435 Sleep( (DWORD) millis );
\r
6438 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6439 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6440 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6441 handle->xrun[0] = true;
\r
6442 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6443 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6444 handle->bufferPointer[0] = nextWritePointer;
\r
6445 endWrite = nextWritePointer + bufferBytes;
\r
6448 // Lock free space in the buffer
\r
6449 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6450 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6451 if ( FAILED( result ) ) {
\r
6452 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6453 errorText_ = errorStream_.str();
\r
6454 error( RtAudioError::SYSTEM_ERROR );
\r
6458 // Copy our buffer into the DS buffer
\r
6459 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6460 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6462 // Update our buffer offset and unlock sound buffer
\r
6463 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6464 if ( FAILED( result ) ) {
\r
6465 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6466 errorText_ = errorStream_.str();
\r
6467 error( RtAudioError::SYSTEM_ERROR );
\r
6470 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6471 handle->bufferPointer[0] = nextWritePointer;
\r
6474 // Don't bother draining input
\r
6475 if ( handle->drainCounter ) {
\r
6476 handle->drainCounter++;
\r
6480 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6482 // Setup parameters.
\r
6483 if ( stream_.doConvertBuffer[1] ) {
\r
6484 buffer = stream_.deviceBuffer;
\r
6485 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6486 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6489 buffer = stream_.userBuffer[1];
\r
6490 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6491 bufferBytes *= formatBytes( stream_.userFormat );
\r
6494 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6495 long nextReadPointer = handle->bufferPointer[1];
\r
6496 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6498 // Find out where the write and "safe read" pointers are.
\r
6499 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6500 if ( FAILED( result ) ) {
\r
6501 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6502 errorText_ = errorStream_.str();
\r
6503 error( RtAudioError::SYSTEM_ERROR );
\r
6507 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6508 DWORD endRead = nextReadPointer + bufferBytes;
\r
6510 // Handling depends on whether we are INPUT or DUPLEX.
\r
6511 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6512 // then a wait here will drag the write pointers into the forbidden zone.
\r
6514 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6515 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6516 // practical way to sync up the read and write pointers reliably, given the
\r
6517 // the very complex relationship between phase and increment of the read and write
\r
6520 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6521 // provide a pre-roll period of 0.5 seconds in which we return
\r
6522 // zeros from the read buffer while the pointers sync up.
\r
6524 if ( stream_.mode == DUPLEX ) {
\r
6525 if ( safeReadPointer < endRead ) {
\r
6526 if ( duplexPrerollBytes <= 0 ) {
\r
6527 // Pre-roll time over. Be more agressive.
\r
6528 int adjustment = endRead-safeReadPointer;
\r
6530 handle->xrun[1] = true;
\r
6532 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6533 // and perform fine adjustments later.
\r
6534 // - small adjustments: back off by twice as much.
\r
6535 if ( adjustment >= 2*bufferBytes )
\r
6536 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6538 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6540 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6544 // In pre=roll time. Just do it.
\r
6545 nextReadPointer = safeReadPointer - bufferBytes;
\r
6546 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6548 endRead = nextReadPointer + bufferBytes;
\r
6551 else { // mode == INPUT
\r
6552 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6553 // See comments for playback.
\r
6554 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6555 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6556 if ( millis < 1.0 ) millis = 1.0;
\r
6557 Sleep( (DWORD) millis );
\r
6559 // Wake up and find out where we are now.
\r
6560 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6561 if ( FAILED( result ) ) {
\r
6562 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6563 errorText_ = errorStream_.str();
\r
6564 error( RtAudioError::SYSTEM_ERROR );
\r
6568 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6572 // Lock free space in the buffer
\r
6573 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6574 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6575 if ( FAILED( result ) ) {
\r
6576 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6577 errorText_ = errorStream_.str();
\r
6578 error( RtAudioError::SYSTEM_ERROR );
\r
6582 if ( duplexPrerollBytes <= 0 ) {
\r
6583 // Copy our buffer into the DS buffer
\r
6584 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6585 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6588 memset( buffer, 0, bufferSize1 );
\r
6589 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6590 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6593 // Update our buffer offset and unlock sound buffer
\r
6594 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6595 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6596 if ( FAILED( result ) ) {
\r
6597 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6598 errorText_ = errorStream_.str();
\r
6599 error( RtAudioError::SYSTEM_ERROR );
\r
6602 handle->bufferPointer[1] = nextReadPointer;
\r
6604 // No byte swapping necessary in DirectSound implementation.
\r
6606 // If necessary, convert 8-bit data from unsigned to signed.
\r
6607 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6608 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6610 // Do buffer conversion if necessary.
\r
6611 if ( stream_.doConvertBuffer[1] )
\r
6612 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6616 MUTEX_UNLOCK( &stream_.mutex );
\r
6617 RtApi::tickStreamTime();
\r
6620 // Definitions for utility functions and callbacks
\r
6621 // specific to the DirectSound implementation.
\r
6623 static unsigned __stdcall callbackHandler( void *ptr )
\r
6625 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6626 RtApiDs *object = (RtApiDs *) info->object;
\r
6627 bool* isRunning = &info->isRunning;
\r
6629 while ( *isRunning == true ) {
\r
6630 object->callbackEvent();
\r
6633 _endthreadex( 0 );
\r
6637 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6638 LPCTSTR description,
\r
6639 LPCTSTR /*module*/,
\r
6640 LPVOID lpContext )
\r
6642 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6643 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6646 bool validDevice = false;
\r
6647 if ( probeInfo.isInput == true ) {
\r
6649 LPDIRECTSOUNDCAPTURE object;
\r
6651 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6652 if ( hr != DS_OK ) return TRUE;
\r
6654 caps.dwSize = sizeof(caps);
\r
6655 hr = object->GetCaps( &caps );
\r
6656 if ( hr == DS_OK ) {
\r
6657 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6658 validDevice = true;
\r
6660 object->Release();
\r
6664 LPDIRECTSOUND object;
\r
6665 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6666 if ( hr != DS_OK ) return TRUE;
\r
6668 caps.dwSize = sizeof(caps);
\r
6669 hr = object->GetCaps( &caps );
\r
6670 if ( hr == DS_OK ) {
\r
6671 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6672 validDevice = true;
\r
6674 object->Release();
\r
6677 // If good device, then save its name and guid.
\r
6678 std::string name = convertCharPointerToStdString( description );
\r
6679 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6680 if ( lpguid == NULL )
\r
6681 name = "Default Device";
\r
6682 if ( validDevice ) {
\r
6683 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6684 if ( dsDevices[i].name == name ) {
\r
6685 dsDevices[i].found = true;
\r
6686 if ( probeInfo.isInput ) {
\r
6687 dsDevices[i].id[1] = lpguid;
\r
6688 dsDevices[i].validId[1] = true;
\r
6691 dsDevices[i].id[0] = lpguid;
\r
6692 dsDevices[i].validId[0] = true;
\r
6699 device.name = name;
\r
6700 device.found = true;
\r
6701 if ( probeInfo.isInput ) {
\r
6702 device.id[1] = lpguid;
\r
6703 device.validId[1] = true;
\r
6706 device.id[0] = lpguid;
\r
6707 device.validId[0] = true;
\r
6709 dsDevices.push_back( device );
\r
6715 static const char* getErrorString( int code )
\r
6719 case DSERR_ALLOCATED:
\r
6720 return "Already allocated";
\r
6722 case DSERR_CONTROLUNAVAIL:
\r
6723 return "Control unavailable";
\r
6725 case DSERR_INVALIDPARAM:
\r
6726 return "Invalid parameter";
\r
6728 case DSERR_INVALIDCALL:
\r
6729 return "Invalid call";
\r
6731 case DSERR_GENERIC:
\r
6732 return "Generic error";
\r
6734 case DSERR_PRIOLEVELNEEDED:
\r
6735 return "Priority level needed";
\r
6737 case DSERR_OUTOFMEMORY:
\r
6738 return "Out of memory";
\r
6740 case DSERR_BADFORMAT:
\r
6741 return "The sample rate or the channel format is not supported";
\r
6743 case DSERR_UNSUPPORTED:
\r
6744 return "Not supported";
\r
6746 case DSERR_NODRIVER:
\r
6747 return "No driver";
\r
6749 case DSERR_ALREADYINITIALIZED:
\r
6750 return "Already initialized";
\r
6752 case DSERR_NOAGGREGATION:
\r
6753 return "No aggregation";
\r
6755 case DSERR_BUFFERLOST:
\r
6756 return "Buffer lost";
\r
6758 case DSERR_OTHERAPPHASPRIO:
\r
6759 return "Another application already has priority";
\r
6761 case DSERR_UNINITIALIZED:
\r
6762 return "Uninitialized";
\r
6765 return "DirectSound unknown error";
\r
6768 //******************** End of __WINDOWS_DS__ *********************//
\r
6772 #if defined(__LINUX_ALSA__)
\r
6774 #include <alsa/asoundlib.h>
\r
6775 #include <unistd.h>
\r
6777 // A structure to hold various information related to the ALSA API
\r
6778 // implementation.
\r
6779 struct AlsaHandle {
\r
6780 snd_pcm_t *handles[2];
\r
6781 bool synchronized;
\r
6783 pthread_cond_t runnable_cv;
\r
6787 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6790 static void *alsaCallbackHandler( void * ptr );
\r
6792 RtApiAlsa :: RtApiAlsa()
\r
6794 // Nothing to do here.
\r
6797 RtApiAlsa :: ~RtApiAlsa()
\r
6799 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6802 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6804 unsigned nDevices = 0;
\r
6805 int result, subdevice, card;
\r
6807 snd_ctl_t *handle;
\r
6809 // Count cards and devices
\r
6811 snd_card_next( &card );
\r
6812 while ( card >= 0 ) {
\r
6813 sprintf( name, "hw:%d", card );
\r
6814 result = snd_ctl_open( &handle, name, 0 );
\r
6815 if ( result < 0 ) {
\r
6816 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6817 errorText_ = errorStream_.str();
\r
6818 error( RtAudioError::WARNING );
\r
6823 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6824 if ( result < 0 ) {
\r
6825 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6826 errorText_ = errorStream_.str();
\r
6827 error( RtAudioError::WARNING );
\r
6830 if ( subdevice < 0 )
\r
6835 snd_ctl_close( handle );
\r
6836 snd_card_next( &card );
\r
6839 result = snd_ctl_open( &handle, "default", 0 );
\r
6840 if (result == 0) {
\r
6842 snd_ctl_close( handle );
\r
6848 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6850 RtAudio::DeviceInfo info;
\r
6851 info.probed = false;
\r
6853 unsigned nDevices = 0;
\r
6854 int result, subdevice, card;
\r
6856 snd_ctl_t *chandle;
\r
6858 // Count cards and devices
\r
6860 snd_card_next( &card );
\r
6861 while ( card >= 0 ) {
\r
6862 sprintf( name, "hw:%d", card );
\r
6863 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6864 if ( result < 0 ) {
\r
6865 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6866 errorText_ = errorStream_.str();
\r
6867 error( RtAudioError::WARNING );
\r
6872 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6873 if ( result < 0 ) {
\r
6874 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6875 errorText_ = errorStream_.str();
\r
6876 error( RtAudioError::WARNING );
\r
6879 if ( subdevice < 0 ) break;
\r
6880 if ( nDevices == device ) {
\r
6881 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6887 snd_ctl_close( chandle );
\r
6888 snd_card_next( &card );
\r
6891 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6892 if ( result == 0 ) {
\r
6893 if ( nDevices == device ) {
\r
6894 strcpy( name, "default" );
\r
6900 if ( nDevices == 0 ) {
\r
6901 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6902 error( RtAudioError::INVALID_USE );
\r
6906 if ( device >= nDevices ) {
\r
6907 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6908 error( RtAudioError::INVALID_USE );
\r
6914 // If a stream is already open, we cannot probe the stream devices.
\r
6915 // Thus, use the saved results.
\r
6916 if ( stream_.state != STREAM_CLOSED &&
\r
6917 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6918 snd_ctl_close( chandle );
\r
6919 if ( device >= devices_.size() ) {
\r
6920 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6921 error( RtAudioError::WARNING );
\r
6924 return devices_[ device ];
\r
6927 int openMode = SND_PCM_ASYNC;
\r
6928 snd_pcm_stream_t stream;
\r
6929 snd_pcm_info_t *pcminfo;
\r
6930 snd_pcm_info_alloca( &pcminfo );
\r
6931 snd_pcm_t *phandle;
\r
6932 snd_pcm_hw_params_t *params;
\r
6933 snd_pcm_hw_params_alloca( ¶ms );
\r
6935 // First try for playback unless default device (which has subdev -1)
\r
6936 stream = SND_PCM_STREAM_PLAYBACK;
\r
6937 snd_pcm_info_set_stream( pcminfo, stream );
\r
6938 if ( subdevice != -1 ) {
\r
6939 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6940 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6942 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6943 if ( result < 0 ) {
\r
6944 // Device probably doesn't support playback.
\r
6945 goto captureProbe;
\r
6949 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6950 if ( result < 0 ) {
\r
6951 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6952 errorText_ = errorStream_.str();
\r
6953 error( RtAudioError::WARNING );
\r
6954 goto captureProbe;
\r
6957 // The device is open ... fill the parameter structure.
\r
6958 result = snd_pcm_hw_params_any( phandle, params );
\r
6959 if ( result < 0 ) {
\r
6960 snd_pcm_close( phandle );
\r
6961 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6962 errorText_ = errorStream_.str();
\r
6963 error( RtAudioError::WARNING );
\r
6964 goto captureProbe;
\r
6967 // Get output channel information.
\r
6968 unsigned int value;
\r
6969 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6970 if ( result < 0 ) {
\r
6971 snd_pcm_close( phandle );
\r
6972 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6973 errorText_ = errorStream_.str();
\r
6974 error( RtAudioError::WARNING );
\r
6975 goto captureProbe;
\r
6977 info.outputChannels = value;
\r
6978 snd_pcm_close( phandle );
\r
6981 stream = SND_PCM_STREAM_CAPTURE;
\r
6982 snd_pcm_info_set_stream( pcminfo, stream );
\r
6984 // Now try for capture unless default device (with subdev = -1)
\r
6985 if ( subdevice != -1 ) {
\r
6986 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6987 snd_ctl_close( chandle );
\r
6988 if ( result < 0 ) {
\r
6989 // Device probably doesn't support capture.
\r
6990 if ( info.outputChannels == 0 ) return info;
\r
6991 goto probeParameters;
\r
6995 snd_ctl_close( chandle );
\r
6997 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6998 if ( result < 0 ) {
\r
6999 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7000 errorText_ = errorStream_.str();
\r
7001 error( RtAudioError::WARNING );
\r
7002 if ( info.outputChannels == 0 ) return info;
\r
7003 goto probeParameters;
\r
7006 // The device is open ... fill the parameter structure.
\r
7007 result = snd_pcm_hw_params_any( phandle, params );
\r
7008 if ( result < 0 ) {
\r
7009 snd_pcm_close( phandle );
\r
7010 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7011 errorText_ = errorStream_.str();
\r
7012 error( RtAudioError::WARNING );
\r
7013 if ( info.outputChannels == 0 ) return info;
\r
7014 goto probeParameters;
\r
7017 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7018 if ( result < 0 ) {
\r
7019 snd_pcm_close( phandle );
\r
7020 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7021 errorText_ = errorStream_.str();
\r
7022 error( RtAudioError::WARNING );
\r
7023 if ( info.outputChannels == 0 ) return info;
\r
7024 goto probeParameters;
\r
7026 info.inputChannels = value;
\r
7027 snd_pcm_close( phandle );
\r
7029 // If device opens for both playback and capture, we determine the channels.
\r
7030 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7031 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7033 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7034 if ( device == 0 && info.outputChannels > 0 )
\r
7035 info.isDefaultOutput = true;
\r
7036 if ( device == 0 && info.inputChannels > 0 )
\r
7037 info.isDefaultInput = true;
\r
7040 // At this point, we just need to figure out the supported data
\r
7041 // formats and sample rates. We'll proceed by opening the device in
\r
7042 // the direction with the maximum number of channels, or playback if
\r
7043 // they are equal. This might limit our sample rate options, but so
\r
7046 if ( info.outputChannels >= info.inputChannels )
\r
7047 stream = SND_PCM_STREAM_PLAYBACK;
\r
7049 stream = SND_PCM_STREAM_CAPTURE;
\r
7050 snd_pcm_info_set_stream( pcminfo, stream );
\r
7052 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7053 if ( result < 0 ) {
\r
7054 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7055 errorText_ = errorStream_.str();
\r
7056 error( RtAudioError::WARNING );
\r
7060 // The device is open ... fill the parameter structure.
\r
7061 result = snd_pcm_hw_params_any( phandle, params );
\r
7062 if ( result < 0 ) {
\r
7063 snd_pcm_close( phandle );
\r
7064 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7065 errorText_ = errorStream_.str();
\r
7066 error( RtAudioError::WARNING );
\r
7070 // Test our discrete set of sample rate values.
\r
7071 info.sampleRates.clear();
\r
7072 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7073 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7074 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7076 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7077 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7080 if ( info.sampleRates.size() == 0 ) {
\r
7081 snd_pcm_close( phandle );
\r
7082 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7083 errorText_ = errorStream_.str();
\r
7084 error( RtAudioError::WARNING );
\r
7088 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7089 snd_pcm_format_t format;
\r
7090 info.nativeFormats = 0;
\r
7091 format = SND_PCM_FORMAT_S8;
\r
7092 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7093 info.nativeFormats |= RTAUDIO_SINT8;
\r
7094 format = SND_PCM_FORMAT_S16;
\r
7095 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7096 info.nativeFormats |= RTAUDIO_SINT16;
\r
7097 format = SND_PCM_FORMAT_S24;
\r
7098 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7099 info.nativeFormats |= RTAUDIO_SINT24;
\r
7100 format = SND_PCM_FORMAT_S32;
\r
7101 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7102 info.nativeFormats |= RTAUDIO_SINT32;
\r
7103 format = SND_PCM_FORMAT_FLOAT;
\r
7104 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7105 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7106 format = SND_PCM_FORMAT_FLOAT64;
\r
7107 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7108 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7110 // Check that we have at least one supported format
\r
7111 if ( info.nativeFormats == 0 ) {
\r
7112 snd_pcm_close( phandle );
\r
7113 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7114 errorText_ = errorStream_.str();
\r
7115 error( RtAudioError::WARNING );
\r
7119 // Get the device name
\r
7121 result = snd_card_get_name( card, &cardname );
\r
7122 if ( result >= 0 ) {
\r
7123 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7128 // That's all ... close the device and return
\r
7129 snd_pcm_close( phandle );
\r
7130 info.probed = true;
\r
7134 void RtApiAlsa :: saveDeviceInfo( void )
\r
7138 unsigned int nDevices = getDeviceCount();
\r
7139 devices_.resize( nDevices );
\r
7140 for ( unsigned int i=0; i<nDevices; i++ )
\r
7141 devices_[i] = getDeviceInfo( i );
\r
7144 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7145 unsigned int firstChannel, unsigned int sampleRate,
\r
7146 RtAudioFormat format, unsigned int *bufferSize,
\r
7147 RtAudio::StreamOptions *options )
\r
7150 #if defined(__RTAUDIO_DEBUG__)
\r
7151 snd_output_t *out;
\r
7152 snd_output_stdio_attach(&out, stderr, 0);
\r
7155 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7157 unsigned nDevices = 0;
\r
7158 int result, subdevice, card;
\r
7160 snd_ctl_t *chandle;
\r
7162 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7163 snprintf(name, sizeof(name), "%s", "default");
\r
7165 // Count cards and devices
\r
7167 snd_card_next( &card );
\r
7168 while ( card >= 0 ) {
\r
7169 sprintf( name, "hw:%d", card );
\r
7170 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7171 if ( result < 0 ) {
\r
7172 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7173 errorText_ = errorStream_.str();
\r
7178 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7179 if ( result < 0 ) break;
\r
7180 if ( subdevice < 0 ) break;
\r
7181 if ( nDevices == device ) {
\r
7182 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7183 snd_ctl_close( chandle );
\r
7188 snd_ctl_close( chandle );
\r
7189 snd_card_next( &card );
\r
7192 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7193 if ( result == 0 ) {
\r
7194 if ( nDevices == device ) {
\r
7195 strcpy( name, "default" );
\r
7201 if ( nDevices == 0 ) {
\r
7202 // This should not happen because a check is made before this function is called.
\r
7203 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7207 if ( device >= nDevices ) {
\r
7208 // This should not happen because a check is made before this function is called.
\r
7209 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7216 // The getDeviceInfo() function will not work for a device that is
\r
7217 // already open. Thus, we'll probe the system before opening a
\r
7218 // stream and save the results for use by getDeviceInfo().
\r
7219 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7220 this->saveDeviceInfo();
\r
7222 snd_pcm_stream_t stream;
\r
7223 if ( mode == OUTPUT )
\r
7224 stream = SND_PCM_STREAM_PLAYBACK;
\r
7226 stream = SND_PCM_STREAM_CAPTURE;
\r
7228 snd_pcm_t *phandle;
\r
7229 int openMode = SND_PCM_ASYNC;
\r
7230 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7231 if ( result < 0 ) {
\r
7232 if ( mode == OUTPUT )
\r
7233 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7235 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7236 errorText_ = errorStream_.str();
\r
7240 // Fill the parameter structure.
\r
7241 snd_pcm_hw_params_t *hw_params;
\r
7242 snd_pcm_hw_params_alloca( &hw_params );
\r
7243 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7244 if ( result < 0 ) {
\r
7245 snd_pcm_close( phandle );
\r
7246 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7247 errorText_ = errorStream_.str();
\r
7251 #if defined(__RTAUDIO_DEBUG__)
\r
7252 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7253 snd_pcm_hw_params_dump( hw_params, out );
\r
7256 // Set access ... check user preference.
\r
7257 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7258 stream_.userInterleaved = false;
\r
7259 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7260 if ( result < 0 ) {
\r
7261 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7262 stream_.deviceInterleaved[mode] = true;
\r
7265 stream_.deviceInterleaved[mode] = false;
\r
7268 stream_.userInterleaved = true;
\r
7269 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7270 if ( result < 0 ) {
\r
7271 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7272 stream_.deviceInterleaved[mode] = false;
\r
7275 stream_.deviceInterleaved[mode] = true;
\r
7278 if ( result < 0 ) {
\r
7279 snd_pcm_close( phandle );
\r
7280 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7281 errorText_ = errorStream_.str();
\r
7285 // Determine how to set the device format.
\r
7286 stream_.userFormat = format;
\r
7287 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7289 if ( format == RTAUDIO_SINT8 )
\r
7290 deviceFormat = SND_PCM_FORMAT_S8;
\r
7291 else if ( format == RTAUDIO_SINT16 )
\r
7292 deviceFormat = SND_PCM_FORMAT_S16;
\r
7293 else if ( format == RTAUDIO_SINT24 )
\r
7294 deviceFormat = SND_PCM_FORMAT_S24;
\r
7295 else if ( format == RTAUDIO_SINT32 )
\r
7296 deviceFormat = SND_PCM_FORMAT_S32;
\r
7297 else if ( format == RTAUDIO_FLOAT32 )
\r
7298 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7299 else if ( format == RTAUDIO_FLOAT64 )
\r
7300 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7302 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7303 stream_.deviceFormat[mode] = format;
\r
7307 // The user requested format is not natively supported by the device.
\r
7308 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7309 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7310 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7314 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7315 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7316 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7320 deviceFormat = SND_PCM_FORMAT_S32;
\r
7321 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7322 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7326 deviceFormat = SND_PCM_FORMAT_S24;
\r
7327 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7328 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7332 deviceFormat = SND_PCM_FORMAT_S16;
\r
7333 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7334 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7338 deviceFormat = SND_PCM_FORMAT_S8;
\r
7339 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7340 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7344 // If we get here, no supported format was found.
\r
7345 snd_pcm_close( phandle );
\r
7346 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7347 errorText_ = errorStream_.str();
\r
7351 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7352 if ( result < 0 ) {
\r
7353 snd_pcm_close( phandle );
\r
7354 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7355 errorText_ = errorStream_.str();
\r
7359 // Determine whether byte-swaping is necessary.
\r
7360 stream_.doByteSwap[mode] = false;
\r
7361 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7362 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7363 if ( result == 0 )
\r
7364 stream_.doByteSwap[mode] = true;
\r
7365 else if (result < 0) {
\r
7366 snd_pcm_close( phandle );
\r
7367 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7368 errorText_ = errorStream_.str();
\r
7373 // Set the sample rate.
\r
7374 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7375 if ( result < 0 ) {
\r
7376 snd_pcm_close( phandle );
\r
7377 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7378 errorText_ = errorStream_.str();
\r
7382 // Determine the number of channels for this device. We support a possible
\r
7383 // minimum device channel number > than the value requested by the user.
\r
7384 stream_.nUserChannels[mode] = channels;
\r
7385 unsigned int value;
\r
7386 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7387 unsigned int deviceChannels = value;
\r
7388 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7389 snd_pcm_close( phandle );
\r
7390 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7391 errorText_ = errorStream_.str();
\r
7395 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7396 if ( result < 0 ) {
\r
7397 snd_pcm_close( phandle );
\r
7398 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7399 errorText_ = errorStream_.str();
\r
7402 deviceChannels = value;
\r
7403 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7404 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7406 // Set the device channels.
\r
7407 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7408 if ( result < 0 ) {
\r
7409 snd_pcm_close( phandle );
\r
7410 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7411 errorText_ = errorStream_.str();
\r
7415 // Set the buffer (or period) size.
\r
7417 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7418 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7419 if ( result < 0 ) {
\r
7420 snd_pcm_close( phandle );
\r
7421 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7422 errorText_ = errorStream_.str();
\r
7425 *bufferSize = periodSize;
\r
7427 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7428 unsigned int periods = 0;
\r
7429 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7430 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7431 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7432 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7433 if ( result < 0 ) {
\r
7434 snd_pcm_close( phandle );
\r
7435 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7436 errorText_ = errorStream_.str();
\r
7440 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7441 // MUST be the same in both directions!
\r
7442 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7443 snd_pcm_close( phandle );
\r
7444 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7445 errorText_ = errorStream_.str();
\r
7449 stream_.bufferSize = *bufferSize;
\r
7451 // Install the hardware configuration
\r
7452 result = snd_pcm_hw_params( phandle, hw_params );
\r
7453 if ( result < 0 ) {
\r
7454 snd_pcm_close( phandle );
\r
7455 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7456 errorText_ = errorStream_.str();
\r
7460 #if defined(__RTAUDIO_DEBUG__)
\r
7461 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7462 snd_pcm_hw_params_dump( hw_params, out );
\r
7465 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7466 snd_pcm_sw_params_t *sw_params = NULL;
\r
7467 snd_pcm_sw_params_alloca( &sw_params );
\r
7468 snd_pcm_sw_params_current( phandle, sw_params );
\r
7469 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7470 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7471 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7473 // The following two settings were suggested by Theo Veenker
\r
7474 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7475 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7477 // here are two options for a fix
\r
7478 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7479 snd_pcm_uframes_t val;
\r
7480 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7481 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7483 result = snd_pcm_sw_params( phandle, sw_params );
\r
7484 if ( result < 0 ) {
\r
7485 snd_pcm_close( phandle );
\r
7486 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7487 errorText_ = errorStream_.str();
\r
7491 #if defined(__RTAUDIO_DEBUG__)
\r
7492 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7493 snd_pcm_sw_params_dump( sw_params, out );
\r
7496 // Set flags for buffer conversion
\r
7497 stream_.doConvertBuffer[mode] = false;
\r
7498 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7499 stream_.doConvertBuffer[mode] = true;
\r
7500 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7501 stream_.doConvertBuffer[mode] = true;
\r
7502 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7503 stream_.nUserChannels[mode] > 1 )
\r
7504 stream_.doConvertBuffer[mode] = true;
\r
7506 // Allocate the ApiHandle if necessary and then save.
\r
7507 AlsaHandle *apiInfo = 0;
\r
7508 if ( stream_.apiHandle == 0 ) {
\r
7510 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7512 catch ( std::bad_alloc& ) {
\r
7513 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7517 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7518 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7522 stream_.apiHandle = (void *) apiInfo;
\r
7523 apiInfo->handles[0] = 0;
\r
7524 apiInfo->handles[1] = 0;
\r
7527 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7529 apiInfo->handles[mode] = phandle;
\r
7532 // Allocate necessary internal buffers.
\r
7533 unsigned long bufferBytes;
\r
7534 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7535 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7536 if ( stream_.userBuffer[mode] == NULL ) {
\r
7537 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7541 if ( stream_.doConvertBuffer[mode] ) {
\r
7543 bool makeBuffer = true;
\r
7544 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7545 if ( mode == INPUT ) {
\r
7546 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7547 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7548 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7552 if ( makeBuffer ) {
\r
7553 bufferBytes *= *bufferSize;
\r
7554 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7555 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7556 if ( stream_.deviceBuffer == NULL ) {
\r
7557 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7563 stream_.sampleRate = sampleRate;
\r
7564 stream_.nBuffers = periods;
\r
7565 stream_.device[mode] = device;
\r
7566 stream_.state = STREAM_STOPPED;
\r
7568 // Setup the buffer conversion information structure.
\r
7569 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7571 // Setup thread if necessary.
\r
7572 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7573 // We had already set up an output stream.
\r
7574 stream_.mode = DUPLEX;
\r
7575 // Link the streams if possible.
\r
7576 apiInfo->synchronized = false;
\r
7577 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7578 apiInfo->synchronized = true;
\r
7580 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7581 error( RtAudioError::WARNING );
\r
7585 stream_.mode = mode;
\r
7587 // Setup callback thread.
\r
7588 stream_.callbackInfo.object = (void *) this;
\r
7590 // Set the thread attributes for joinable and realtime scheduling
\r
7591 // priority (optional). The higher priority will only take affect
\r
7592 // if the program is run as root or suid. Note, under Linux
\r
7593 // processes with CAP_SYS_NICE privilege, a user can change
\r
7594 // scheduling policy and priority (thus need not be root). See
\r
7595 // POSIX "capabilities".
\r
7596 pthread_attr_t attr;
\r
7597 pthread_attr_init( &attr );
\r
7598 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7600 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7601 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7602 // We previously attempted to increase the audio callback priority
\r
7603 // to SCHED_RR here via the attributes. However, while no errors
\r
7604 // were reported in doing so, it did not work. So, now this is
\r
7605 // done in the alsaCallbackHandler function.
\r
7606 stream_.callbackInfo.doRealtime = true;
\r
7607 int priority = options->priority;
\r
7608 int min = sched_get_priority_min( SCHED_RR );
\r
7609 int max = sched_get_priority_max( SCHED_RR );
\r
7610 if ( priority < min ) priority = min;
\r
7611 else if ( priority > max ) priority = max;
\r
7612 stream_.callbackInfo.priority = priority;
\r
7616 stream_.callbackInfo.isRunning = true;
\r
7617 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7618 pthread_attr_destroy( &attr );
\r
7620 stream_.callbackInfo.isRunning = false;
\r
7621 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7630 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7631 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7632 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7634 stream_.apiHandle = 0;
\r
7637 if ( phandle) snd_pcm_close( phandle );
\r
7639 for ( int i=0; i<2; i++ ) {
\r
7640 if ( stream_.userBuffer[i] ) {
\r
7641 free( stream_.userBuffer[i] );
\r
7642 stream_.userBuffer[i] = 0;
\r
7646 if ( stream_.deviceBuffer ) {
\r
7647 free( stream_.deviceBuffer );
\r
7648 stream_.deviceBuffer = 0;
\r
7651 stream_.state = STREAM_CLOSED;
\r
7655 void RtApiAlsa :: closeStream()
\r
7657 if ( stream_.state == STREAM_CLOSED ) {
\r
7658 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7659 error( RtAudioError::WARNING );
\r
7663 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7664 stream_.callbackInfo.isRunning = false;
\r
7665 MUTEX_LOCK( &stream_.mutex );
\r
7666 if ( stream_.state == STREAM_STOPPED ) {
\r
7667 apiInfo->runnable = true;
\r
7668 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7670 MUTEX_UNLOCK( &stream_.mutex );
\r
7671 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7673 if ( stream_.state == STREAM_RUNNING ) {
\r
7674 stream_.state = STREAM_STOPPED;
\r
7675 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7676 snd_pcm_drop( apiInfo->handles[0] );
\r
7677 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7678 snd_pcm_drop( apiInfo->handles[1] );
\r
7682 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7683 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7684 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7686 stream_.apiHandle = 0;
\r
7689 for ( int i=0; i<2; i++ ) {
\r
7690 if ( stream_.userBuffer[i] ) {
\r
7691 free( stream_.userBuffer[i] );
\r
7692 stream_.userBuffer[i] = 0;
\r
7696 if ( stream_.deviceBuffer ) {
\r
7697 free( stream_.deviceBuffer );
\r
7698 stream_.deviceBuffer = 0;
\r
7701 stream_.mode = UNINITIALIZED;
\r
7702 stream_.state = STREAM_CLOSED;
\r
7705 void RtApiAlsa :: startStream()
\r
7707 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7710 if ( stream_.state == STREAM_RUNNING ) {
\r
7711 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7712 error( RtAudioError::WARNING );
\r
7716 MUTEX_LOCK( &stream_.mutex );
\r
7719 snd_pcm_state_t state;
\r
7720 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7721 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7722 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7723 state = snd_pcm_state( handle[0] );
\r
7724 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7725 result = snd_pcm_prepare( handle[0] );
\r
7726 if ( result < 0 ) {
\r
7727 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7728 errorText_ = errorStream_.str();
\r
7734 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7735 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7736 state = snd_pcm_state( handle[1] );
\r
7737 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7738 result = snd_pcm_prepare( handle[1] );
\r
7739 if ( result < 0 ) {
\r
7740 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7741 errorText_ = errorStream_.str();
\r
7747 stream_.state = STREAM_RUNNING;
\r
7750 apiInfo->runnable = true;
\r
7751 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7752 MUTEX_UNLOCK( &stream_.mutex );
\r
7754 if ( result >= 0 ) return;
\r
7755 error( RtAudioError::SYSTEM_ERROR );
\r
7758 void RtApiAlsa :: stopStream()
\r
7761 if ( stream_.state == STREAM_STOPPED ) {
\r
7762 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7763 error( RtAudioError::WARNING );
\r
7767 stream_.state = STREAM_STOPPED;
\r
7768 MUTEX_LOCK( &stream_.mutex );
\r
7771 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7772 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7773 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7774 if ( apiInfo->synchronized )
\r
7775 result = snd_pcm_drop( handle[0] );
\r
7777 result = snd_pcm_drain( handle[0] );
\r
7778 if ( result < 0 ) {
\r
7779 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7780 errorText_ = errorStream_.str();
\r
7785 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7786 result = snd_pcm_drop( handle[1] );
\r
7787 if ( result < 0 ) {
\r
7788 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7789 errorText_ = errorStream_.str();
\r
7795 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7796 MUTEX_UNLOCK( &stream_.mutex );
\r
7798 if ( result >= 0 ) return;
\r
7799 error( RtAudioError::SYSTEM_ERROR );
\r
7802 void RtApiAlsa :: abortStream()
\r
7805 if ( stream_.state == STREAM_STOPPED ) {
\r
7806 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7807 error( RtAudioError::WARNING );
\r
7811 stream_.state = STREAM_STOPPED;
\r
7812 MUTEX_LOCK( &stream_.mutex );
\r
7815 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7816 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7817 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7818 result = snd_pcm_drop( handle[0] );
\r
7819 if ( result < 0 ) {
\r
7820 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7821 errorText_ = errorStream_.str();
\r
7826 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7827 result = snd_pcm_drop( handle[1] );
\r
7828 if ( result < 0 ) {
\r
7829 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7830 errorText_ = errorStream_.str();
\r
7836 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7837 MUTEX_UNLOCK( &stream_.mutex );
\r
7839 if ( result >= 0 ) return;
\r
7840 error( RtAudioError::SYSTEM_ERROR );
\r
7843 void RtApiAlsa :: callbackEvent()
\r
7845 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7846 if ( stream_.state == STREAM_STOPPED ) {
\r
7847 MUTEX_LOCK( &stream_.mutex );
\r
7848 while ( !apiInfo->runnable )
\r
7849 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7851 if ( stream_.state != STREAM_RUNNING ) {
\r
7852 MUTEX_UNLOCK( &stream_.mutex );
\r
7855 MUTEX_UNLOCK( &stream_.mutex );
\r
7858 if ( stream_.state == STREAM_CLOSED ) {
\r
7859 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7860 error( RtAudioError::WARNING );
\r
7864 int doStopStream = 0;
\r
7865 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7866 double streamTime = getStreamTime();
\r
7867 RtAudioStreamStatus status = 0;
\r
7868 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7869 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7870 apiInfo->xrun[0] = false;
\r
7872 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7873 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7874 apiInfo->xrun[1] = false;
\r
7876 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7877 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7879 if ( doStopStream == 2 ) {
\r
7884 MUTEX_LOCK( &stream_.mutex );
\r
7886 // The state might change while waiting on a mutex.
\r
7887 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7892 snd_pcm_t **handle;
\r
7893 snd_pcm_sframes_t frames;
\r
7894 RtAudioFormat format;
\r
7895 handle = (snd_pcm_t **) apiInfo->handles;
\r
7897 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7899 // Setup parameters.
\r
7900 if ( stream_.doConvertBuffer[1] ) {
\r
7901 buffer = stream_.deviceBuffer;
\r
7902 channels = stream_.nDeviceChannels[1];
\r
7903 format = stream_.deviceFormat[1];
\r
7906 buffer = stream_.userBuffer[1];
\r
7907 channels = stream_.nUserChannels[1];
\r
7908 format = stream_.userFormat;
\r
7911 // Read samples from device in interleaved/non-interleaved format.
\r
7912 if ( stream_.deviceInterleaved[1] )
\r
7913 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7915 void *bufs[channels];
\r
7916 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7917 for ( int i=0; i<channels; i++ )
\r
7918 bufs[i] = (void *) (buffer + (i * offset));
\r
7919 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7922 if ( result < (int) stream_.bufferSize ) {
\r
7923 // Either an error or overrun occured.
\r
7924 if ( result == -EPIPE ) {
\r
7925 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7926 if ( state == SND_PCM_STATE_XRUN ) {
\r
7927 apiInfo->xrun[1] = true;
\r
7928 result = snd_pcm_prepare( handle[1] );
\r
7929 if ( result < 0 ) {
\r
7930 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7931 errorText_ = errorStream_.str();
\r
7935 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7936 errorText_ = errorStream_.str();
\r
7940 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7941 errorText_ = errorStream_.str();
\r
7943 error( RtAudioError::WARNING );
\r
7947 // Do byte swapping if necessary.
\r
7948 if ( stream_.doByteSwap[1] )
\r
7949 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7951 // Do buffer conversion if necessary.
\r
7952 if ( stream_.doConvertBuffer[1] )
\r
7953 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7955 // Check stream latency
\r
7956 result = snd_pcm_delay( handle[1], &frames );
\r
7957 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7962 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7964 // Setup parameters and do buffer conversion if necessary.
\r
7965 if ( stream_.doConvertBuffer[0] ) {
\r
7966 buffer = stream_.deviceBuffer;
\r
7967 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7968 channels = stream_.nDeviceChannels[0];
\r
7969 format = stream_.deviceFormat[0];
\r
7972 buffer = stream_.userBuffer[0];
\r
7973 channels = stream_.nUserChannels[0];
\r
7974 format = stream_.userFormat;
\r
7977 // Do byte swapping if necessary.
\r
7978 if ( stream_.doByteSwap[0] )
\r
7979 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7981 // Write samples to device in interleaved/non-interleaved format.
\r
7982 if ( stream_.deviceInterleaved[0] )
\r
7983 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7985 void *bufs[channels];
\r
7986 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7987 for ( int i=0; i<channels; i++ )
\r
7988 bufs[i] = (void *) (buffer + (i * offset));
\r
7989 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7992 if ( result < (int) stream_.bufferSize ) {
\r
7993 // Either an error or underrun occured.
\r
7994 if ( result == -EPIPE ) {
\r
7995 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7996 if ( state == SND_PCM_STATE_XRUN ) {
\r
7997 apiInfo->xrun[0] = true;
\r
7998 result = snd_pcm_prepare( handle[0] );
\r
7999 if ( result < 0 ) {
\r
8000 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8001 errorText_ = errorStream_.str();
\r
8005 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8006 errorText_ = errorStream_.str();
\r
8010 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8011 errorText_ = errorStream_.str();
\r
8013 error( RtAudioError::WARNING );
\r
8017 // Check stream latency
\r
8018 result = snd_pcm_delay( handle[0], &frames );
\r
8019 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8023 MUTEX_UNLOCK( &stream_.mutex );
\r
8025 RtApi::tickStreamTime();
\r
8026 if ( doStopStream == 1 ) this->stopStream();
\r
8029 static void *alsaCallbackHandler( void *ptr )
\r
8031 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8032 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8033 bool *isRunning = &info->isRunning;
\r
8035 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8036 if ( &info->doRealtime ) {
\r
8037 pthread_t tID = pthread_self(); // ID of this thread
\r
8038 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8039 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8043 while ( *isRunning == true ) {
\r
8044 pthread_testcancel();
\r
8045 object->callbackEvent();
\r
8048 pthread_exit( NULL );
\r
8051 //******************** End of __LINUX_ALSA__ *********************//
\r
8054 #if defined(__LINUX_PULSE__)
\r
8056 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8057 // and Tristan Matthews.
\r
8059 #include <pulse/error.h>
\r
8060 #include <pulse/simple.h>
\r
8063 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8064 44100, 48000, 96000, 0};
\r
8066 struct rtaudio_pa_format_mapping_t {
\r
8067 RtAudioFormat rtaudio_format;
\r
8068 pa_sample_format_t pa_format;
\r
8071 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8072 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8073 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8074 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8075 {0, PA_SAMPLE_INVALID}};
\r
8077 struct PulseAudioHandle {
\r
8078 pa_simple *s_play;
\r
8081 pthread_cond_t runnable_cv;
\r
8083 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8086 RtApiPulse::~RtApiPulse()
\r
8088 if ( stream_.state != STREAM_CLOSED )
\r
8092 unsigned int RtApiPulse::getDeviceCount( void )
\r
8097 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8099 RtAudio::DeviceInfo info;
\r
8100 info.probed = true;
\r
8101 info.name = "PulseAudio";
\r
8102 info.outputChannels = 2;
\r
8103 info.inputChannels = 2;
\r
8104 info.duplexChannels = 2;
\r
8105 info.isDefaultOutput = true;
\r
8106 info.isDefaultInput = true;
\r
8108 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8109 info.sampleRates.push_back( *sr );
\r
8111 info.preferredSampleRate = 48000;
\r
8112 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8117 static void *pulseaudio_callback( void * user )
\r
8119 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8120 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8121 volatile bool *isRunning = &cbi->isRunning;
\r
8123 while ( *isRunning ) {
\r
8124 pthread_testcancel();
\r
8125 context->callbackEvent();
\r
8128 pthread_exit( NULL );
\r
8131 void RtApiPulse::closeStream( void )
\r
8133 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8135 stream_.callbackInfo.isRunning = false;
\r
8137 MUTEX_LOCK( &stream_.mutex );
\r
8138 if ( stream_.state == STREAM_STOPPED ) {
\r
8139 pah->runnable = true;
\r
8140 pthread_cond_signal( &pah->runnable_cv );
\r
8142 MUTEX_UNLOCK( &stream_.mutex );
\r
8144 pthread_join( pah->thread, 0 );
\r
8145 if ( pah->s_play ) {
\r
8146 pa_simple_flush( pah->s_play, NULL );
\r
8147 pa_simple_free( pah->s_play );
\r
8150 pa_simple_free( pah->s_rec );
\r
8152 pthread_cond_destroy( &pah->runnable_cv );
\r
8154 stream_.apiHandle = 0;
\r
8157 if ( stream_.userBuffer[0] ) {
\r
8158 free( stream_.userBuffer[0] );
\r
8159 stream_.userBuffer[0] = 0;
\r
8161 if ( stream_.userBuffer[1] ) {
\r
8162 free( stream_.userBuffer[1] );
\r
8163 stream_.userBuffer[1] = 0;
\r
8166 stream_.state = STREAM_CLOSED;
\r
8167 stream_.mode = UNINITIALIZED;
\r
8170 void RtApiPulse::callbackEvent( void )
\r
8172 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8174 if ( stream_.state == STREAM_STOPPED ) {
\r
8175 MUTEX_LOCK( &stream_.mutex );
\r
8176 while ( !pah->runnable )
\r
8177 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8179 if ( stream_.state != STREAM_RUNNING ) {
\r
8180 MUTEX_UNLOCK( &stream_.mutex );
\r
8183 MUTEX_UNLOCK( &stream_.mutex );
\r
8186 if ( stream_.state == STREAM_CLOSED ) {
\r
8187 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8188 "this shouldn't happen!";
\r
8189 error( RtAudioError::WARNING );
\r
8193 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8194 double streamTime = getStreamTime();
\r
8195 RtAudioStreamStatus status = 0;
\r
8196 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8197 stream_.bufferSize, streamTime, status,
\r
8198 stream_.callbackInfo.userData );
\r
8200 if ( doStopStream == 2 ) {
\r
8205 MUTEX_LOCK( &stream_.mutex );
\r
8206 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8207 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8209 if ( stream_.state != STREAM_RUNNING )
\r
8214 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8215 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8216 convertBuffer( stream_.deviceBuffer,
\r
8217 stream_.userBuffer[OUTPUT],
\r
8218 stream_.convertInfo[OUTPUT] );
\r
8219 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8220 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8222 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8223 formatBytes( stream_.userFormat );
\r
8225 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8226 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8227 pa_strerror( pa_error ) << ".";
\r
8228 errorText_ = errorStream_.str();
\r
8229 error( RtAudioError::WARNING );
\r
8233 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8234 if ( stream_.doConvertBuffer[INPUT] )
\r
8235 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8236 formatBytes( stream_.deviceFormat[INPUT] );
\r
8238 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8239 formatBytes( stream_.userFormat );
\r
8241 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8242 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8243 pa_strerror( pa_error ) << ".";
\r
8244 errorText_ = errorStream_.str();
\r
8245 error( RtAudioError::WARNING );
\r
8247 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8248 convertBuffer( stream_.userBuffer[INPUT],
\r
8249 stream_.deviceBuffer,
\r
8250 stream_.convertInfo[INPUT] );
\r
8255 MUTEX_UNLOCK( &stream_.mutex );
\r
8256 RtApi::tickStreamTime();
\r
8258 if ( doStopStream == 1 )
\r
8262 void RtApiPulse::startStream( void )
\r
8264 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8266 if ( stream_.state == STREAM_CLOSED ) {
\r
8267 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8268 error( RtAudioError::INVALID_USE );
\r
8271 if ( stream_.state == STREAM_RUNNING ) {
\r
8272 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8273 error( RtAudioError::WARNING );
\r
8277 MUTEX_LOCK( &stream_.mutex );
\r
8279 stream_.state = STREAM_RUNNING;
\r
8281 pah->runnable = true;
\r
8282 pthread_cond_signal( &pah->runnable_cv );
\r
8283 MUTEX_UNLOCK( &stream_.mutex );
\r
8286 void RtApiPulse::stopStream( void )
\r
8288 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8290 if ( stream_.state == STREAM_CLOSED ) {
\r
8291 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8292 error( RtAudioError::INVALID_USE );
\r
8295 if ( stream_.state == STREAM_STOPPED ) {
\r
8296 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8297 error( RtAudioError::WARNING );
\r
8301 stream_.state = STREAM_STOPPED;
\r
8302 MUTEX_LOCK( &stream_.mutex );
\r
8304 if ( pah && pah->s_play ) {
\r
8306 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8307 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8308 pa_strerror( pa_error ) << ".";
\r
8309 errorText_ = errorStream_.str();
\r
8310 MUTEX_UNLOCK( &stream_.mutex );
\r
8311 error( RtAudioError::SYSTEM_ERROR );
\r
8316 stream_.state = STREAM_STOPPED;
\r
8317 MUTEX_UNLOCK( &stream_.mutex );
\r
8320 void RtApiPulse::abortStream( void )
\r
8322 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8324 if ( stream_.state == STREAM_CLOSED ) {
\r
8325 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8326 error( RtAudioError::INVALID_USE );
\r
8329 if ( stream_.state == STREAM_STOPPED ) {
\r
8330 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8331 error( RtAudioError::WARNING );
\r
8335 stream_.state = STREAM_STOPPED;
\r
8336 MUTEX_LOCK( &stream_.mutex );
\r
8338 if ( pah && pah->s_play ) {
\r
8340 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8341 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8342 pa_strerror( pa_error ) << ".";
\r
8343 errorText_ = errorStream_.str();
\r
8344 MUTEX_UNLOCK( &stream_.mutex );
\r
8345 error( RtAudioError::SYSTEM_ERROR );
\r
8350 stream_.state = STREAM_STOPPED;
\r
8351 MUTEX_UNLOCK( &stream_.mutex );
\r
8354 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8355 unsigned int channels, unsigned int firstChannel,
\r
8356 unsigned int sampleRate, RtAudioFormat format,
\r
8357 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8359 PulseAudioHandle *pah = 0;
\r
8360 unsigned long bufferBytes = 0;
\r
8361 pa_sample_spec ss;
\r
8363 if ( device != 0 ) return false;
\r
8364 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8365 if ( channels != 1 && channels != 2 ) {
\r
8366 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8369 ss.channels = channels;
\r
8371 if ( firstChannel != 0 ) return false;
\r
8373 bool sr_found = false;
\r
8374 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8375 if ( sampleRate == *sr ) {
\r
8377 stream_.sampleRate = sampleRate;
\r
8378 ss.rate = sampleRate;
\r
8382 if ( !sr_found ) {
\r
8383 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8387 bool sf_found = 0;
\r
8388 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8389 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8390 if ( format == sf->rtaudio_format ) {
\r
8392 stream_.userFormat = sf->rtaudio_format;
\r
8393 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8394 ss.format = sf->pa_format;
\r
8398 if ( !sf_found ) { // Use internal data format conversion.
\r
8399 stream_.userFormat = format;
\r
8400 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8401 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8404 // Set other stream parameters.
\r
8405 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8406 else stream_.userInterleaved = true;
\r
8407 stream_.deviceInterleaved[mode] = true;
\r
8408 stream_.nBuffers = 1;
\r
8409 stream_.doByteSwap[mode] = false;
\r
8410 stream_.nUserChannels[mode] = channels;
\r
8411 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8412 stream_.channelOffset[mode] = 0;
\r
8413 std::string streamName = "RtAudio";
\r
8415 // Set flags for buffer conversion.
\r
8416 stream_.doConvertBuffer[mode] = false;
\r
8417 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8418 stream_.doConvertBuffer[mode] = true;
\r
8419 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8420 stream_.doConvertBuffer[mode] = true;
\r
8422 // Allocate necessary internal buffers.
\r
8423 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8424 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8425 if ( stream_.userBuffer[mode] == NULL ) {
\r
8426 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8429 stream_.bufferSize = *bufferSize;
\r
8431 if ( stream_.doConvertBuffer[mode] ) {
\r
8433 bool makeBuffer = true;
\r
8434 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8435 if ( mode == INPUT ) {
\r
8436 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8437 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8438 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8442 if ( makeBuffer ) {
\r
8443 bufferBytes *= *bufferSize;
\r
8444 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8445 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8446 if ( stream_.deviceBuffer == NULL ) {
\r
8447 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8453 stream_.device[mode] = device;
\r
8455 // Setup the buffer conversion information structure.
\r
8456 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8458 if ( !stream_.apiHandle ) {
\r
8459 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8461 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8465 stream_.apiHandle = pah;
\r
8466 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8467 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8471 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8474 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8477 pa_buffer_attr buffer_attr;
\r
8478 buffer_attr.fragsize = bufferBytes;
\r
8479 buffer_attr.maxlength = -1;
\r
8481 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8482 if ( !pah->s_rec ) {
\r
8483 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8488 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8489 if ( !pah->s_play ) {
\r
8490 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8498 if ( stream_.mode == UNINITIALIZED )
\r
8499 stream_.mode = mode;
\r
8500 else if ( stream_.mode == mode )
\r
8503 stream_.mode = DUPLEX;
\r
8505 if ( !stream_.callbackInfo.isRunning ) {
\r
8506 stream_.callbackInfo.object = this;
\r
8507 stream_.callbackInfo.isRunning = true;
\r
8508 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8509 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8514 stream_.state = STREAM_STOPPED;
\r
8518 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8519 pthread_cond_destroy( &pah->runnable_cv );
\r
8521 stream_.apiHandle = 0;
\r
8524 for ( int i=0; i<2; i++ ) {
\r
8525 if ( stream_.userBuffer[i] ) {
\r
8526 free( stream_.userBuffer[i] );
\r
8527 stream_.userBuffer[i] = 0;
\r
8531 if ( stream_.deviceBuffer ) {
\r
8532 free( stream_.deviceBuffer );
\r
8533 stream_.deviceBuffer = 0;
\r
8539 //******************** End of __LINUX_PULSE__ *********************//
\r
8542 #if defined(__LINUX_OSS__)
\r
8544 #include <unistd.h>
\r
8545 #include <sys/ioctl.h>
\r
8546 #include <unistd.h>
\r
8547 #include <fcntl.h>
\r
8548 #include <sys/soundcard.h>
\r
8549 #include <errno.h>
\r
8552 static void *ossCallbackHandler(void * ptr);
\r
8554 // A structure to hold various information related to the OSS API
\r
8555 // implementation.
\r
8556 struct OssHandle {
\r
8557 int id[2]; // device ids
\r
8560 pthread_cond_t runnable;
\r
8563 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8566 RtApiOss :: RtApiOss()
\r
8568 // Nothing to do here.
\r
8571 RtApiOss :: ~RtApiOss()
\r
8573 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8576 unsigned int RtApiOss :: getDeviceCount( void )
\r
8578 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8579 if ( mixerfd == -1 ) {
\r
8580 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8581 error( RtAudioError::WARNING );
\r
8585 oss_sysinfo sysinfo;
\r
8586 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8588 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8589 error( RtAudioError::WARNING );
\r
8594 return sysinfo.numaudios;
\r
8597 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8599 RtAudio::DeviceInfo info;
\r
8600 info.probed = false;
\r
8602 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8603 if ( mixerfd == -1 ) {
\r
8604 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8605 error( RtAudioError::WARNING );
\r
8609 oss_sysinfo sysinfo;
\r
8610 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8611 if ( result == -1 ) {
\r
8613 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8614 error( RtAudioError::WARNING );
\r
8618 unsigned nDevices = sysinfo.numaudios;
\r
8619 if ( nDevices == 0 ) {
\r
8621 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8622 error( RtAudioError::INVALID_USE );
\r
8626 if ( device >= nDevices ) {
\r
8628 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8629 error( RtAudioError::INVALID_USE );
\r
8633 oss_audioinfo ainfo;
\r
8634 ainfo.dev = device;
\r
8635 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8637 if ( result == -1 ) {
\r
8638 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8639 errorText_ = errorStream_.str();
\r
8640 error( RtAudioError::WARNING );
\r
8645 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8646 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8647 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8648 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8649 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8652 // Probe data formats ... do for input
\r
8653 unsigned long mask = ainfo.iformats;
\r
8654 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8655 info.nativeFormats |= RTAUDIO_SINT16;
\r
8656 if ( mask & AFMT_S8 )
\r
8657 info.nativeFormats |= RTAUDIO_SINT8;
\r
8658 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8659 info.nativeFormats |= RTAUDIO_SINT32;
\r
8660 if ( mask & AFMT_FLOAT )
\r
8661 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8662 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8663 info.nativeFormats |= RTAUDIO_SINT24;
\r
8665 // Check that we have at least one supported format
\r
8666 if ( info.nativeFormats == 0 ) {
\r
8667 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8668 errorText_ = errorStream_.str();
\r
8669 error( RtAudioError::WARNING );
\r
8673 // Probe the supported sample rates.
\r
8674 info.sampleRates.clear();
\r
8675 if ( ainfo.nrates ) {
\r
8676 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8677 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8678 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8679 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8681 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8682 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8690 // Check min and max rate values;
\r
8691 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8692 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8693 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8695 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8696 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8701 if ( info.sampleRates.size() == 0 ) {
\r
8702 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8703 errorText_ = errorStream_.str();
\r
8704 error( RtAudioError::WARNING );
\r
8707 info.probed = true;
\r
8708 info.name = ainfo.name;
\r
8715 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8716 unsigned int firstChannel, unsigned int sampleRate,
\r
8717 RtAudioFormat format, unsigned int *bufferSize,
\r
8718 RtAudio::StreamOptions *options )
\r
8720 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8721 if ( mixerfd == -1 ) {
\r
8722 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8726 oss_sysinfo sysinfo;
\r
8727 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8728 if ( result == -1 ) {
\r
8730 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8734 unsigned nDevices = sysinfo.numaudios;
\r
8735 if ( nDevices == 0 ) {
\r
8736 // This should not happen because a check is made before this function is called.
\r
8738 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8742 if ( device >= nDevices ) {
\r
8743 // This should not happen because a check is made before this function is called.
\r
8745 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8749 oss_audioinfo ainfo;
\r
8750 ainfo.dev = device;
\r
8751 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8753 if ( result == -1 ) {
\r
8754 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8755 errorText_ = errorStream_.str();
\r
8759 // Check if device supports input or output
\r
8760 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8761 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8762 if ( mode == OUTPUT )
\r
8763 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8765 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8766 errorText_ = errorStream_.str();
\r
8771 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8772 if ( mode == OUTPUT )
\r
8773 flags |= O_WRONLY;
\r
8774 else { // mode == INPUT
\r
8775 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8776 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8777 close( handle->id[0] );
\r
8778 handle->id[0] = 0;
\r
8779 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8780 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8781 errorText_ = errorStream_.str();
\r
8784 // Check that the number previously set channels is the same.
\r
8785 if ( stream_.nUserChannels[0] != channels ) {
\r
8786 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8787 errorText_ = errorStream_.str();
\r
8793 flags |= O_RDONLY;
\r
8796 // Set exclusive access if specified.
\r
8797 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8799 // Try to open the device.
\r
8801 fd = open( ainfo.devnode, flags, 0 );
\r
8803 if ( errno == EBUSY )
\r
8804 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8806 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8807 errorText_ = errorStream_.str();
\r
8811 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8813 if ( flags | O_RDWR ) {
\r
8814 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8815 if ( result == -1) {
\r
8816 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8817 errorText_ = errorStream_.str();
\r
8823 // Check the device channel support.
\r
8824 stream_.nUserChannels[mode] = channels;
\r
8825 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8827 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8828 errorText_ = errorStream_.str();
\r
8832 // Set the number of channels.
\r
8833 int deviceChannels = channels + firstChannel;
\r
8834 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8835 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8837 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8838 errorText_ = errorStream_.str();
\r
8841 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8843 // Get the data format mask
\r
8845 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8846 if ( result == -1 ) {
\r
8848 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8849 errorText_ = errorStream_.str();
\r
8853 // Determine how to set the device format.
\r
8854 stream_.userFormat = format;
\r
8855 int deviceFormat = -1;
\r
8856 stream_.doByteSwap[mode] = false;
\r
8857 if ( format == RTAUDIO_SINT8 ) {
\r
8858 if ( mask & AFMT_S8 ) {
\r
8859 deviceFormat = AFMT_S8;
\r
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8863 else if ( format == RTAUDIO_SINT16 ) {
\r
8864 if ( mask & AFMT_S16_NE ) {
\r
8865 deviceFormat = AFMT_S16_NE;
\r
8866 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8868 else if ( mask & AFMT_S16_OE ) {
\r
8869 deviceFormat = AFMT_S16_OE;
\r
8870 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8871 stream_.doByteSwap[mode] = true;
\r
8874 else if ( format == RTAUDIO_SINT24 ) {
\r
8875 if ( mask & AFMT_S24_NE ) {
\r
8876 deviceFormat = AFMT_S24_NE;
\r
8877 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8879 else if ( mask & AFMT_S24_OE ) {
\r
8880 deviceFormat = AFMT_S24_OE;
\r
8881 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8882 stream_.doByteSwap[mode] = true;
\r
8885 else if ( format == RTAUDIO_SINT32 ) {
\r
8886 if ( mask & AFMT_S32_NE ) {
\r
8887 deviceFormat = AFMT_S32_NE;
\r
8888 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8890 else if ( mask & AFMT_S32_OE ) {
\r
8891 deviceFormat = AFMT_S32_OE;
\r
8892 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8893 stream_.doByteSwap[mode] = true;
\r
8897 if ( deviceFormat == -1 ) {
\r
8898 // The user requested format is not natively supported by the device.
\r
8899 if ( mask & AFMT_S16_NE ) {
\r
8900 deviceFormat = AFMT_S16_NE;
\r
8901 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8903 else if ( mask & AFMT_S32_NE ) {
\r
8904 deviceFormat = AFMT_S32_NE;
\r
8905 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8907 else if ( mask & AFMT_S24_NE ) {
\r
8908 deviceFormat = AFMT_S24_NE;
\r
8909 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8911 else if ( mask & AFMT_S16_OE ) {
\r
8912 deviceFormat = AFMT_S16_OE;
\r
8913 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8914 stream_.doByteSwap[mode] = true;
\r
8916 else if ( mask & AFMT_S32_OE ) {
\r
8917 deviceFormat = AFMT_S32_OE;
\r
8918 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8919 stream_.doByteSwap[mode] = true;
\r
8921 else if ( mask & AFMT_S24_OE ) {
\r
8922 deviceFormat = AFMT_S24_OE;
\r
8923 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8924 stream_.doByteSwap[mode] = true;
\r
8926 else if ( mask & AFMT_S8) {
\r
8927 deviceFormat = AFMT_S8;
\r
8928 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8932 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8933 // This really shouldn't happen ...
\r
8935 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8936 errorText_ = errorStream_.str();
\r
8940 // Set the data format.
\r
8941 int temp = deviceFormat;
\r
8942 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8943 if ( result == -1 || deviceFormat != temp ) {
\r
8945 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8946 errorText_ = errorStream_.str();
\r
8950 // Attempt to set the buffer size. According to OSS, the minimum
\r
8951 // number of buffers is two. The supposed minimum buffer size is 16
\r
8952 // bytes, so that will be our lower bound. The argument to this
\r
8953 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8954 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8955 // We'll check the actual value used near the end of the setup
\r
8957 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8958 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8960 if ( options ) buffers = options->numberOfBuffers;
\r
8961 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8962 if ( buffers < 2 ) buffers = 3;
\r
8963 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8964 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8965 if ( result == -1 ) {
\r
8967 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8968 errorText_ = errorStream_.str();
\r
8971 stream_.nBuffers = buffers;
\r
8973 // Save buffer size (in sample frames).
\r
8974 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8975 stream_.bufferSize = *bufferSize;
\r
8977 // Set the sample rate.
\r
8978 int srate = sampleRate;
\r
8979 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8980 if ( result == -1 ) {
\r
8982 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8983 errorText_ = errorStream_.str();
\r
8987 // Verify the sample rate setup worked.
\r
8988 if ( abs( srate - sampleRate ) > 100 ) {
\r
8990 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8991 errorText_ = errorStream_.str();
\r
8994 stream_.sampleRate = sampleRate;
\r
8996 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8997 // We're doing duplex setup here.
\r
8998 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8999 stream_.nDeviceChannels[0] = deviceChannels;
\r
9002 // Set interleaving parameters.
\r
9003 stream_.userInterleaved = true;
\r
9004 stream_.deviceInterleaved[mode] = true;
\r
9005 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9006 stream_.userInterleaved = false;
\r
9008 // Set flags for buffer conversion
\r
9009 stream_.doConvertBuffer[mode] = false;
\r
9010 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9011 stream_.doConvertBuffer[mode] = true;
\r
9012 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9013 stream_.doConvertBuffer[mode] = true;
\r
9014 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9015 stream_.nUserChannels[mode] > 1 )
\r
9016 stream_.doConvertBuffer[mode] = true;
\r
9018 // Allocate the stream handles if necessary and then save.
\r
9019 if ( stream_.apiHandle == 0 ) {
\r
9021 handle = new OssHandle;
\r
9023 catch ( std::bad_alloc& ) {
\r
9024 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9028 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9029 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9033 stream_.apiHandle = (void *) handle;
\r
9036 handle = (OssHandle *) stream_.apiHandle;
\r
9038 handle->id[mode] = fd;
\r
9040 // Allocate necessary internal buffers.
\r
9041 unsigned long bufferBytes;
\r
9042 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9043 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9044 if ( stream_.userBuffer[mode] == NULL ) {
\r
9045 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9049 if ( stream_.doConvertBuffer[mode] ) {
\r
9051 bool makeBuffer = true;
\r
9052 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9053 if ( mode == INPUT ) {
\r
9054 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9055 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9056 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9060 if ( makeBuffer ) {
\r
9061 bufferBytes *= *bufferSize;
\r
9062 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9063 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9064 if ( stream_.deviceBuffer == NULL ) {
\r
9065 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9071 stream_.device[mode] = device;
\r
9072 stream_.state = STREAM_STOPPED;
\r
9074 // Setup the buffer conversion information structure.
\r
9075 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9077 // Setup thread if necessary.
\r
9078 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9079 // We had already set up an output stream.
\r
9080 stream_.mode = DUPLEX;
\r
9081 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9084 stream_.mode = mode;
\r
9086 // Setup callback thread.
\r
9087 stream_.callbackInfo.object = (void *) this;
\r
9089 // Set the thread attributes for joinable and realtime scheduling
\r
9090 // priority. The higher priority will only take affect if the
\r
9091 // program is run as root or suid.
\r
9092 pthread_attr_t attr;
\r
9093 pthread_attr_init( &attr );
\r
9094 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9095 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9096 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9097 struct sched_param param;
\r
9098 int priority = options->priority;
\r
9099 int min = sched_get_priority_min( SCHED_RR );
\r
9100 int max = sched_get_priority_max( SCHED_RR );
\r
9101 if ( priority < min ) priority = min;
\r
9102 else if ( priority > max ) priority = max;
\r
9103 param.sched_priority = priority;
\r
9104 pthread_attr_setschedparam( &attr, ¶m );
\r
9105 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9108 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9110 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9113 stream_.callbackInfo.isRunning = true;
\r
9114 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9115 pthread_attr_destroy( &attr );
\r
9117 stream_.callbackInfo.isRunning = false;
\r
9118 errorText_ = "RtApiOss::error creating callback thread!";
\r
9127 pthread_cond_destroy( &handle->runnable );
\r
9128 if ( handle->id[0] ) close( handle->id[0] );
\r
9129 if ( handle->id[1] ) close( handle->id[1] );
\r
9131 stream_.apiHandle = 0;
\r
9134 for ( int i=0; i<2; i++ ) {
\r
9135 if ( stream_.userBuffer[i] ) {
\r
9136 free( stream_.userBuffer[i] );
\r
9137 stream_.userBuffer[i] = 0;
\r
9141 if ( stream_.deviceBuffer ) {
\r
9142 free( stream_.deviceBuffer );
\r
9143 stream_.deviceBuffer = 0;
\r
9149 void RtApiOss :: closeStream()
\r
9151 if ( stream_.state == STREAM_CLOSED ) {
\r
9152 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9153 error( RtAudioError::WARNING );
\r
9157 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9158 stream_.callbackInfo.isRunning = false;
\r
9159 MUTEX_LOCK( &stream_.mutex );
\r
9160 if ( stream_.state == STREAM_STOPPED )
\r
9161 pthread_cond_signal( &handle->runnable );
\r
9162 MUTEX_UNLOCK( &stream_.mutex );
\r
9163 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9165 if ( stream_.state == STREAM_RUNNING ) {
\r
9166 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9167 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9169 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9170 stream_.state = STREAM_STOPPED;
\r
9174 pthread_cond_destroy( &handle->runnable );
\r
9175 if ( handle->id[0] ) close( handle->id[0] );
\r
9176 if ( handle->id[1] ) close( handle->id[1] );
\r
9178 stream_.apiHandle = 0;
\r
9181 for ( int i=0; i<2; i++ ) {
\r
9182 if ( stream_.userBuffer[i] ) {
\r
9183 free( stream_.userBuffer[i] );
\r
9184 stream_.userBuffer[i] = 0;
\r
9188 if ( stream_.deviceBuffer ) {
\r
9189 free( stream_.deviceBuffer );
\r
9190 stream_.deviceBuffer = 0;
\r
9193 stream_.mode = UNINITIALIZED;
\r
9194 stream_.state = STREAM_CLOSED;
\r
9197 void RtApiOss :: startStream()
\r
9200 if ( stream_.state == STREAM_RUNNING ) {
\r
9201 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9202 error( RtAudioError::WARNING );
\r
9206 MUTEX_LOCK( &stream_.mutex );
\r
9208 stream_.state = STREAM_RUNNING;
\r
9210 // No need to do anything else here ... OSS automatically starts
\r
9211 // when fed samples.
\r
9213 MUTEX_UNLOCK( &stream_.mutex );
\r
9215 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9216 pthread_cond_signal( &handle->runnable );
\r
9219 void RtApiOss :: stopStream()
\r
9222 if ( stream_.state == STREAM_STOPPED ) {
\r
9223 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9224 error( RtAudioError::WARNING );
\r
9228 MUTEX_LOCK( &stream_.mutex );
\r
9230 // The state might change while waiting on a mutex.
\r
9231 if ( stream_.state == STREAM_STOPPED ) {
\r
9232 MUTEX_UNLOCK( &stream_.mutex );
\r
9237 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9238 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9240 // Flush the output with zeros a few times.
\r
9243 RtAudioFormat format;
\r
9245 if ( stream_.doConvertBuffer[0] ) {
\r
9246 buffer = stream_.deviceBuffer;
\r
9247 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9248 format = stream_.deviceFormat[0];
\r
9251 buffer = stream_.userBuffer[0];
\r
9252 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9253 format = stream_.userFormat;
\r
9256 memset( buffer, 0, samples * formatBytes(format) );
\r
9257 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9258 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9259 if ( result == -1 ) {
\r
9260 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9261 error( RtAudioError::WARNING );
\r
9265 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9266 if ( result == -1 ) {
\r
9267 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9268 errorText_ = errorStream_.str();
\r
9271 handle->triggered = false;
\r
9274 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9275 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9276 if ( result == -1 ) {
\r
9277 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9278 errorText_ = errorStream_.str();
\r
9284 stream_.state = STREAM_STOPPED;
\r
9285 MUTEX_UNLOCK( &stream_.mutex );
\r
9287 if ( result != -1 ) return;
\r
9288 error( RtAudioError::SYSTEM_ERROR );
\r
9291 void RtApiOss :: abortStream()
\r
9294 if ( stream_.state == STREAM_STOPPED ) {
\r
9295 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9296 error( RtAudioError::WARNING );
\r
9300 MUTEX_LOCK( &stream_.mutex );
\r
9302 // The state might change while waiting on a mutex.
\r
9303 if ( stream_.state == STREAM_STOPPED ) {
\r
9304 MUTEX_UNLOCK( &stream_.mutex );
\r
9309 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9310 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9311 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9312 if ( result == -1 ) {
\r
9313 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9314 errorText_ = errorStream_.str();
\r
9317 handle->triggered = false;
\r
9320 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9321 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9322 if ( result == -1 ) {
\r
9323 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9324 errorText_ = errorStream_.str();
\r
9330 stream_.state = STREAM_STOPPED;
\r
9331 MUTEX_UNLOCK( &stream_.mutex );
\r
9333 if ( result != -1 ) return;
\r
9334 error( RtAudioError::SYSTEM_ERROR );
\r
9337 void RtApiOss :: callbackEvent()
\r
9339 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9340 if ( stream_.state == STREAM_STOPPED ) {
\r
9341 MUTEX_LOCK( &stream_.mutex );
\r
9342 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9343 if ( stream_.state != STREAM_RUNNING ) {
\r
9344 MUTEX_UNLOCK( &stream_.mutex );
\r
9347 MUTEX_UNLOCK( &stream_.mutex );
\r
9350 if ( stream_.state == STREAM_CLOSED ) {
\r
9351 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9352 error( RtAudioError::WARNING );
\r
9356 // Invoke user callback to get fresh output data.
\r
9357 int doStopStream = 0;
\r
9358 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9359 double streamTime = getStreamTime();
\r
9360 RtAudioStreamStatus status = 0;
\r
9361 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9362 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9363 handle->xrun[0] = false;
\r
9365 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9366 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9367 handle->xrun[1] = false;
\r
9369 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9370 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9371 if ( doStopStream == 2 ) {
\r
9372 this->abortStream();
\r
9376 MUTEX_LOCK( &stream_.mutex );
\r
9378 // The state might change while waiting on a mutex.
\r
9379 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9384 RtAudioFormat format;
\r
9386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9388 // Setup parameters and do buffer conversion if necessary.
\r
9389 if ( stream_.doConvertBuffer[0] ) {
\r
9390 buffer = stream_.deviceBuffer;
\r
9391 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9392 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9393 format = stream_.deviceFormat[0];
\r
9396 buffer = stream_.userBuffer[0];
\r
9397 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9398 format = stream_.userFormat;
\r
9401 // Do byte swapping if necessary.
\r
9402 if ( stream_.doByteSwap[0] )
\r
9403 byteSwapBuffer( buffer, samples, format );
\r
9405 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9407 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9408 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9409 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9410 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9411 handle->triggered = true;
\r
9414 // Write samples to device.
\r
9415 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9417 if ( result == -1 ) {
\r
9418 // We'll assume this is an underrun, though there isn't a
\r
9419 // specific means for determining that.
\r
9420 handle->xrun[0] = true;
\r
9421 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9422 error( RtAudioError::WARNING );
\r
9423 // Continue on to input section.
\r
9427 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9429 // Setup parameters.
\r
9430 if ( stream_.doConvertBuffer[1] ) {
\r
9431 buffer = stream_.deviceBuffer;
\r
9432 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9433 format = stream_.deviceFormat[1];
\r
9436 buffer = stream_.userBuffer[1];
\r
9437 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9438 format = stream_.userFormat;
\r
9441 // Read samples from device.
\r
9442 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9444 if ( result == -1 ) {
\r
9445 // We'll assume this is an overrun, though there isn't a
\r
9446 // specific means for determining that.
\r
9447 handle->xrun[1] = true;
\r
9448 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9449 error( RtAudioError::WARNING );
\r
9453 // Do byte swapping if necessary.
\r
9454 if ( stream_.doByteSwap[1] )
\r
9455 byteSwapBuffer( buffer, samples, format );
\r
9457 // Do buffer conversion if necessary.
\r
9458 if ( stream_.doConvertBuffer[1] )
\r
9459 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9463 MUTEX_UNLOCK( &stream_.mutex );
\r
9465 RtApi::tickStreamTime();
\r
9466 if ( doStopStream == 1 ) this->stopStream();
\r
9469 static void *ossCallbackHandler( void *ptr )
\r
9471 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9472 RtApiOss *object = (RtApiOss *) info->object;
\r
9473 bool *isRunning = &info->isRunning;
\r
9475 while ( *isRunning == true ) {
\r
9476 pthread_testcancel();
\r
9477 object->callbackEvent();
\r
9480 pthread_exit( NULL );
\r
9483 //******************** End of __LINUX_OSS__ *********************//
\r
9487 // *************************************************** //
\r
9489 // Protected common (OS-independent) RtAudio methods.
\r
9491 // *************************************************** //
\r
9493 // This method can be modified to control the behavior of error
\r
9494 // message printing.
\r
9495 void RtApi :: error( RtAudioError::Type type )
\r
9497 errorStream_.str(""); // clear the ostringstream
\r
9499 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9500 if ( errorCallback ) {
\r
9501 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9503 if ( firstErrorOccurred_ )
\r
9506 firstErrorOccurred_ = true;
\r
9507 const std::string errorMessage = errorText_;
\r
9509 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9510 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9514 errorCallback( type, errorMessage );
\r
9515 firstErrorOccurred_ = false;
\r
9519 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9520 std::cerr << '\n' << errorText_ << "\n\n";
\r
9521 else if ( type != RtAudioError::WARNING )
\r
9522 throw( RtAudioError( errorText_, type ) );
\r
9525 void RtApi :: verifyStream()
\r
9527 if ( stream_.state == STREAM_CLOSED ) {
\r
9528 errorText_ = "RtApi:: a stream is not open!";
\r
9529 error( RtAudioError::INVALID_USE );
\r
9533 void RtApi :: clearStreamInfo()
\r
9535 stream_.mode = UNINITIALIZED;
\r
9536 stream_.state = STREAM_CLOSED;
\r
9537 stream_.sampleRate = 0;
\r
9538 stream_.bufferSize = 0;
\r
9539 stream_.nBuffers = 0;
\r
9540 stream_.userFormat = 0;
\r
9541 stream_.userInterleaved = true;
\r
9542 stream_.streamTime = 0.0;
\r
9543 stream_.apiHandle = 0;
\r
9544 stream_.deviceBuffer = 0;
\r
9545 stream_.callbackInfo.callback = 0;
\r
9546 stream_.callbackInfo.userData = 0;
\r
9547 stream_.callbackInfo.isRunning = false;
\r
9548 stream_.callbackInfo.errorCallback = 0;
\r
9549 for ( int i=0; i<2; i++ ) {
\r
9550 stream_.device[i] = 11111;
\r
9551 stream_.doConvertBuffer[i] = false;
\r
9552 stream_.deviceInterleaved[i] = true;
\r
9553 stream_.doByteSwap[i] = false;
\r
9554 stream_.nUserChannels[i] = 0;
\r
9555 stream_.nDeviceChannels[i] = 0;
\r
9556 stream_.channelOffset[i] = 0;
\r
9557 stream_.deviceFormat[i] = 0;
\r
9558 stream_.latency[i] = 0;
\r
9559 stream_.userBuffer[i] = 0;
\r
9560 stream_.convertInfo[i].channels = 0;
\r
9561 stream_.convertInfo[i].inJump = 0;
\r
9562 stream_.convertInfo[i].outJump = 0;
\r
9563 stream_.convertInfo[i].inFormat = 0;
\r
9564 stream_.convertInfo[i].outFormat = 0;
\r
9565 stream_.convertInfo[i].inOffset.clear();
\r
9566 stream_.convertInfo[i].outOffset.clear();
\r
9570 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9572 if ( format == RTAUDIO_SINT16 )
\r
9574 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9576 else if ( format == RTAUDIO_FLOAT64 )
\r
9578 else if ( format == RTAUDIO_SINT24 )
\r
9580 else if ( format == RTAUDIO_SINT8 )
\r
9583 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9584 error( RtAudioError::WARNING );
\r
9589 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9591 if ( mode == INPUT ) { // convert device to user buffer
\r
9592 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9593 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9594 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9595 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9597 else { // convert user to device buffer
\r
9598 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9599 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9600 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9601 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9604 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9605 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9607 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9609 // Set up the interleave/deinterleave offsets.
\r
9610 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9611 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9612 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9613 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9614 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9615 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9616 stream_.convertInfo[mode].inJump = 1;
\r
9620 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9621 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9622 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9623 stream_.convertInfo[mode].outJump = 1;
\r
9627 else { // no (de)interleaving
\r
9628 if ( stream_.userInterleaved ) {
\r
9629 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9630 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9631 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9635 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9636 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9637 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9638 stream_.convertInfo[mode].inJump = 1;
\r
9639 stream_.convertInfo[mode].outJump = 1;
\r
9644 // Add channel offset.
\r
9645 if ( firstChannel > 0 ) {
\r
9646 if ( stream_.deviceInterleaved[mode] ) {
\r
9647 if ( mode == OUTPUT ) {
\r
9648 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9649 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9652 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9653 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9657 if ( mode == OUTPUT ) {
\r
9658 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9659 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9662 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9663 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9669 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9671 // This function does format conversion, input/output channel compensation, and
\r
9672 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9673 // the lower three bytes of a 32-bit integer.
\r
9675 // Clear our device buffer when in/out duplex device channels are different
\r
9676 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9677 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9678 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9681 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9683 Float64 *out = (Float64 *)outBuffer;
\r
9685 if (info.inFormat == RTAUDIO_SINT8) {
\r
9686 signed char *in = (signed char *)inBuffer;
\r
9687 scale = 1.0 / 127.5;
\r
9688 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9689 for (j=0; j<info.channels; j++) {
\r
9690 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9691 out[info.outOffset[j]] += 0.5;
\r
9692 out[info.outOffset[j]] *= scale;
\r
9694 in += info.inJump;
\r
9695 out += info.outJump;
\r
9698 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9699 Int16 *in = (Int16 *)inBuffer;
\r
9700 scale = 1.0 / 32767.5;
\r
9701 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9702 for (j=0; j<info.channels; j++) {
\r
9703 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9704 out[info.outOffset[j]] += 0.5;
\r
9705 out[info.outOffset[j]] *= scale;
\r
9707 in += info.inJump;
\r
9708 out += info.outJump;
\r
9711 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9712 Int24 *in = (Int24 *)inBuffer;
\r
9713 scale = 1.0 / 8388607.5;
\r
9714 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9715 for (j=0; j<info.channels; j++) {
\r
9716 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9717 out[info.outOffset[j]] += 0.5;
\r
9718 out[info.outOffset[j]] *= scale;
\r
9720 in += info.inJump;
\r
9721 out += info.outJump;
\r
9724 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9725 Int32 *in = (Int32 *)inBuffer;
\r
9726 scale = 1.0 / 2147483647.5;
\r
9727 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9728 for (j=0; j<info.channels; j++) {
\r
9729 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9730 out[info.outOffset[j]] += 0.5;
\r
9731 out[info.outOffset[j]] *= scale;
\r
9733 in += info.inJump;
\r
9734 out += info.outJump;
\r
9737 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9738 Float32 *in = (Float32 *)inBuffer;
\r
9739 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9740 for (j=0; j<info.channels; j++) {
\r
9741 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9743 in += info.inJump;
\r
9744 out += info.outJump;
\r
9747 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9748 // Channel compensation and/or (de)interleaving only.
\r
9749 Float64 *in = (Float64 *)inBuffer;
\r
9750 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9751 for (j=0; j<info.channels; j++) {
\r
9752 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9754 in += info.inJump;
\r
9755 out += info.outJump;
\r
9759 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9761 Float32 *out = (Float32 *)outBuffer;
\r
9763 if (info.inFormat == RTAUDIO_SINT8) {
\r
9764 signed char *in = (signed char *)inBuffer;
\r
9765 scale = (Float32) ( 1.0 / 127.5 );
\r
9766 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9767 for (j=0; j<info.channels; j++) {
\r
9768 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9769 out[info.outOffset[j]] += 0.5;
\r
9770 out[info.outOffset[j]] *= scale;
\r
9772 in += info.inJump;
\r
9773 out += info.outJump;
\r
9776 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9777 Int16 *in = (Int16 *)inBuffer;
\r
9778 scale = (Float32) ( 1.0 / 32767.5 );
\r
9779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9780 for (j=0; j<info.channels; j++) {
\r
9781 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9782 out[info.outOffset[j]] += 0.5;
\r
9783 out[info.outOffset[j]] *= scale;
\r
9785 in += info.inJump;
\r
9786 out += info.outJump;
\r
9789 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9790 Int24 *in = (Int24 *)inBuffer;
\r
9791 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9792 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9793 for (j=0; j<info.channels; j++) {
\r
9794 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9795 out[info.outOffset[j]] += 0.5;
\r
9796 out[info.outOffset[j]] *= scale;
\r
9798 in += info.inJump;
\r
9799 out += info.outJump;
\r
9802 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9803 Int32 *in = (Int32 *)inBuffer;
\r
9804 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9805 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9806 for (j=0; j<info.channels; j++) {
\r
9807 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9808 out[info.outOffset[j]] += 0.5;
\r
9809 out[info.outOffset[j]] *= scale;
\r
9811 in += info.inJump;
\r
9812 out += info.outJump;
\r
9815 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9816 // Channel compensation and/or (de)interleaving only.
\r
9817 Float32 *in = (Float32 *)inBuffer;
\r
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9819 for (j=0; j<info.channels; j++) {
\r
9820 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9822 in += info.inJump;
\r
9823 out += info.outJump;
\r
9826 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9827 Float64 *in = (Float64 *)inBuffer;
\r
9828 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9829 for (j=0; j<info.channels; j++) {
\r
9830 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9832 in += info.inJump;
\r
9833 out += info.outJump;
\r
9837 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9838 Int32 *out = (Int32 *)outBuffer;
\r
9839 if (info.inFormat == RTAUDIO_SINT8) {
\r
9840 signed char *in = (signed char *)inBuffer;
\r
9841 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9842 for (j=0; j<info.channels; j++) {
\r
9843 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9844 out[info.outOffset[j]] <<= 24;
\r
9846 in += info.inJump;
\r
9847 out += info.outJump;
\r
9850 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9851 Int16 *in = (Int16 *)inBuffer;
\r
9852 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9853 for (j=0; j<info.channels; j++) {
\r
9854 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9855 out[info.outOffset[j]] <<= 16;
\r
9857 in += info.inJump;
\r
9858 out += info.outJump;
\r
9861 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9862 Int24 *in = (Int24 *)inBuffer;
\r
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9864 for (j=0; j<info.channels; j++) {
\r
9865 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9866 out[info.outOffset[j]] <<= 8;
\r
9868 in += info.inJump;
\r
9869 out += info.outJump;
\r
9872 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9873 // Channel compensation and/or (de)interleaving only.
\r
9874 Int32 *in = (Int32 *)inBuffer;
\r
9875 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9876 for (j=0; j<info.channels; j++) {
\r
9877 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9879 in += info.inJump;
\r
9880 out += info.outJump;
\r
9883 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9884 Float32 *in = (Float32 *)inBuffer;
\r
9885 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9886 for (j=0; j<info.channels; j++) {
\r
9887 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9889 in += info.inJump;
\r
9890 out += info.outJump;
\r
9893 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9894 Float64 *in = (Float64 *)inBuffer;
\r
9895 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9896 for (j=0; j<info.channels; j++) {
\r
9897 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9899 in += info.inJump;
\r
9900 out += info.outJump;
\r
9904 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9905 Int24 *out = (Int24 *)outBuffer;
\r
9906 if (info.inFormat == RTAUDIO_SINT8) {
\r
9907 signed char *in = (signed char *)inBuffer;
\r
9908 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9909 for (j=0; j<info.channels; j++) {
\r
9910 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9911 //out[info.outOffset[j]] <<= 16;
\r
9913 in += info.inJump;
\r
9914 out += info.outJump;
\r
9917 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9918 Int16 *in = (Int16 *)inBuffer;
\r
9919 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9920 for (j=0; j<info.channels; j++) {
\r
9921 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9922 //out[info.outOffset[j]] <<= 8;
\r
9924 in += info.inJump;
\r
9925 out += info.outJump;
\r
9928 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9929 // Channel compensation and/or (de)interleaving only.
\r
9930 Int24 *in = (Int24 *)inBuffer;
\r
9931 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9932 for (j=0; j<info.channels; j++) {
\r
9933 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9935 in += info.inJump;
\r
9936 out += info.outJump;
\r
9939 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9940 Int32 *in = (Int32 *)inBuffer;
\r
9941 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9942 for (j=0; j<info.channels; j++) {
\r
9943 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9944 //out[info.outOffset[j]] >>= 8;
\r
9946 in += info.inJump;
\r
9947 out += info.outJump;
\r
9950 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9951 Float32 *in = (Float32 *)inBuffer;
\r
9952 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9953 for (j=0; j<info.channels; j++) {
\r
9954 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9956 in += info.inJump;
\r
9957 out += info.outJump;
\r
9960 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9961 Float64 *in = (Float64 *)inBuffer;
\r
9962 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9963 for (j=0; j<info.channels; j++) {
\r
9964 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9966 in += info.inJump;
\r
9967 out += info.outJump;
\r
9971 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9972 Int16 *out = (Int16 *)outBuffer;
\r
9973 if (info.inFormat == RTAUDIO_SINT8) {
\r
9974 signed char *in = (signed char *)inBuffer;
\r
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9976 for (j=0; j<info.channels; j++) {
\r
9977 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9978 out[info.outOffset[j]] <<= 8;
\r
9980 in += info.inJump;
\r
9981 out += info.outJump;
\r
9984 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9985 // Channel compensation and/or (de)interleaving only.
\r
9986 Int16 *in = (Int16 *)inBuffer;
\r
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9988 for (j=0; j<info.channels; j++) {
\r
9989 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9991 in += info.inJump;
\r
9992 out += info.outJump;
\r
9995 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9996 Int24 *in = (Int24 *)inBuffer;
\r
9997 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9998 for (j=0; j<info.channels; j++) {
\r
9999 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10001 in += info.inJump;
\r
10002 out += info.outJump;
\r
10005 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10006 Int32 *in = (Int32 *)inBuffer;
\r
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10008 for (j=0; j<info.channels; j++) {
\r
10009 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10011 in += info.inJump;
\r
10012 out += info.outJump;
\r
10015 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10016 Float32 *in = (Float32 *)inBuffer;
\r
10017 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10018 for (j=0; j<info.channels; j++) {
\r
10019 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10021 in += info.inJump;
\r
10022 out += info.outJump;
\r
10025 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10026 Float64 *in = (Float64 *)inBuffer;
\r
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10028 for (j=0; j<info.channels; j++) {
\r
10029 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10031 in += info.inJump;
\r
10032 out += info.outJump;
\r
10036 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10037 signed char *out = (signed char *)outBuffer;
\r
10038 if (info.inFormat == RTAUDIO_SINT8) {
\r
10039 // Channel compensation and/or (de)interleaving only.
\r
10040 signed char *in = (signed char *)inBuffer;
\r
10041 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10042 for (j=0; j<info.channels; j++) {
\r
10043 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10045 in += info.inJump;
\r
10046 out += info.outJump;
\r
10049 if (info.inFormat == RTAUDIO_SINT16) {
\r
10050 Int16 *in = (Int16 *)inBuffer;
\r
10051 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10052 for (j=0; j<info.channels; j++) {
\r
10053 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10055 in += info.inJump;
\r
10056 out += info.outJump;
\r
10059 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10060 Int24 *in = (Int24 *)inBuffer;
\r
10061 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10062 for (j=0; j<info.channels; j++) {
\r
10063 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10065 in += info.inJump;
\r
10066 out += info.outJump;
\r
10069 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10070 Int32 *in = (Int32 *)inBuffer;
\r
10071 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10072 for (j=0; j<info.channels; j++) {
\r
10073 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10075 in += info.inJump;
\r
10076 out += info.outJump;
\r
10079 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10080 Float32 *in = (Float32 *)inBuffer;
\r
10081 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10082 for (j=0; j<info.channels; j++) {
\r
10083 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10085 in += info.inJump;
\r
10086 out += info.outJump;
\r
10089 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10090 Float64 *in = (Float64 *)inBuffer;
\r
10091 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10092 for (j=0; j<info.channels; j++) {
\r
10093 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10095 in += info.inJump;
\r
10096 out += info.outJump;
\r
10102 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10103 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10104 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10106 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10108 register char val;
\r
10109 register char *ptr;
\r
10112 if ( format == RTAUDIO_SINT16 ) {
\r
10113 for ( unsigned int i=0; i<samples; i++ ) {
\r
10114 // Swap 1st and 2nd bytes.
\r
10116 *(ptr) = *(ptr+1);
\r
10119 // Increment 2 bytes.
\r
10123 else if ( format == RTAUDIO_SINT32 ||
\r
10124 format == RTAUDIO_FLOAT32 ) {
\r
10125 for ( unsigned int i=0; i<samples; i++ ) {
\r
10126 // Swap 1st and 4th bytes.
\r
10128 *(ptr) = *(ptr+3);
\r
10131 // Swap 2nd and 3rd bytes.
\r
10134 *(ptr) = *(ptr+1);
\r
10137 // Increment 3 more bytes.
\r
10141 else if ( format == RTAUDIO_SINT24 ) {
\r
10142 for ( unsigned int i=0; i<samples; i++ ) {
\r
10143 // Swap 1st and 3rd bytes.
\r
10145 *(ptr) = *(ptr+2);
\r
10148 // Increment 2 more bytes.
\r
10152 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10153 for ( unsigned int i=0; i<samples; i++ ) {
\r
10154 // Swap 1st and 8th bytes
\r
10156 *(ptr) = *(ptr+7);
\r
10159 // Swap 2nd and 7th bytes
\r
10162 *(ptr) = *(ptr+5);
\r
10165 // Swap 3rd and 6th bytes
\r
10168 *(ptr) = *(ptr+3);
\r
10171 // Swap 4th and 5th bytes
\r
10174 *(ptr) = *(ptr+1);
\r
10177 // Increment 5 more bytes.
\r
10183 // Indentation settings for Vim and Emacs
\r
10185 // Local Variables:
\r
10186 // c-basic-offset: 2
\r
10187 // indent-tabs-mode: nil
\r
10190 // vim: et sts=2 sw=2
\r