1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_DS__)
\r
105 apis.push_back( WINDOWS_DS );
\r
107 #if defined(__MACOSX_CORE__)
\r
108 apis.push_back( MACOSX_CORE );
\r
110 #if defined(__RTAUDIO_DUMMY__)
\r
111 apis.push_back( RTAUDIO_DUMMY );
\r
115 void RtAudio :: openRtApi( RtAudio::Api api )
\r
121 #if defined(__UNIX_JACK__)
\r
122 if ( api == UNIX_JACK )
\r
123 rtapi_ = new RtApiJack();
\r
125 #if defined(__LINUX_ALSA__)
\r
126 if ( api == LINUX_ALSA )
\r
127 rtapi_ = new RtApiAlsa();
\r
129 #if defined(__LINUX_PULSE__)
\r
130 if ( api == LINUX_PULSE )
\r
131 rtapi_ = new RtApiPulse();
\r
133 #if defined(__LINUX_OSS__)
\r
134 if ( api == LINUX_OSS )
\r
135 rtapi_ = new RtApiOss();
\r
137 #if defined(__WINDOWS_ASIO__)
\r
138 if ( api == WINDOWS_ASIO )
\r
139 rtapi_ = new RtApiAsio();
\r
141 #if defined(__WINDOWS_DS__)
\r
142 if ( api == WINDOWS_DS )
\r
143 rtapi_ = new RtApiDs();
\r
145 #if defined(__MACOSX_CORE__)
\r
146 if ( api == MACOSX_CORE )
\r
147 rtapi_ = new RtApiCore();
\r
149 #if defined(__RTAUDIO_DUMMY__)
\r
150 if ( api == RTAUDIO_DUMMY )
\r
151 rtapi_ = new RtApiDummy();
\r
155 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
159 if ( api != UNSPECIFIED ) {
\r
160 // Attempt to open the specified API.
\r
162 if ( rtapi_ ) return;
\r
164 // No compiled support for specified API value. Issue a debug
\r
165 // warning and continue as if no API was specified.
\r
166 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
169 // Iterate through the compiled APIs and return as soon as we find
\r
170 // one with at least one device or we reach the end of the list.
\r
171 std::vector< RtAudio::Api > apis;
\r
172 getCompiledApi( apis );
\r
173 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
174 openRtApi( apis[i] );
\r
175 if ( rtapi_->getDeviceCount() ) break;
\r
178 if ( rtapi_ ) return;
\r
180 // It should not be possible to get here because the preprocessor
\r
181 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
182 // API-specific definitions are passed to the compiler. But just in
\r
183 // case something weird happens, we'll thow an error.
\r
184 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
185 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
188 RtAudio :: ~RtAudio() throw()
\r
194 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
195 RtAudio::StreamParameters *inputParameters,
\r
196 RtAudioFormat format, unsigned int sampleRate,
\r
197 unsigned int *bufferFrames,
\r
198 RtAudioCallback callback, void *userData,
\r
199 RtAudio::StreamOptions *options,
\r
200 RtAudioErrorCallback errorCallback )
\r
202 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
203 sampleRate, bufferFrames, callback,
\r
204 userData, options, errorCallback );
\r
207 // *************************************************** //
\r
209 // Public RtApi definitions (see end of file for
\r
210 // private or protected utility functions).
\r
212 // *************************************************** //
\r
216 stream_.state = STREAM_CLOSED;
\r
217 stream_.mode = UNINITIALIZED;
\r
218 stream_.apiHandle = 0;
\r
219 stream_.userBuffer[0] = 0;
\r
220 stream_.userBuffer[1] = 0;
\r
221 MUTEX_INITIALIZE( &stream_.mutex );
\r
222 showWarnings_ = true;
\r
223 firstErrorOccurred_ = false;
\r
228 MUTEX_DESTROY( &stream_.mutex );
\r
231 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
232 RtAudio::StreamParameters *iParams,
\r
233 RtAudioFormat format, unsigned int sampleRate,
\r
234 unsigned int *bufferFrames,
\r
235 RtAudioCallback callback, void *userData,
\r
236 RtAudio::StreamOptions *options,
\r
237 RtAudioErrorCallback errorCallback )
\r
239 if ( stream_.state != STREAM_CLOSED ) {
\r
240 errorText_ = "RtApi::openStream: a stream is already open!";
\r
241 error( RtAudioError::INVALID_USE );
\r
245 if ( oParams && oParams->nChannels < 1 ) {
\r
246 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
247 error( RtAudioError::INVALID_USE );
\r
251 if ( iParams && iParams->nChannels < 1 ) {
\r
252 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
253 error( RtAudioError::INVALID_USE );
\r
257 if ( oParams == NULL && iParams == NULL ) {
\r
258 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
259 error( RtAudioError::INVALID_USE );
\r
263 if ( formatBytes(format) == 0 ) {
\r
264 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 unsigned int nDevices = getDeviceCount();
\r
270 unsigned int oChannels = 0;
\r
272 oChannels = oParams->nChannels;
\r
273 if ( oParams->deviceId >= nDevices ) {
\r
274 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
275 error( RtAudioError::INVALID_USE );
\r
280 unsigned int iChannels = 0;
\r
282 iChannels = iParams->nChannels;
\r
283 if ( iParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
293 if ( oChannels > 0 ) {
\r
295 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
296 sampleRate, format, bufferFrames, options );
\r
297 if ( result == false ) {
\r
298 error( RtAudioError::SYSTEM_ERROR );
\r
303 if ( iChannels > 0 ) {
\r
305 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
306 sampleRate, format, bufferFrames, options );
\r
307 if ( result == false ) {
\r
308 if ( oChannels > 0 ) closeStream();
\r
309 error( RtAudioError::SYSTEM_ERROR );
\r
314 stream_.callbackInfo.callback = (void *) callback;
\r
315 stream_.callbackInfo.userData = userData;
\r
316 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
318 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
319 stream_.state = STREAM_STOPPED;
\r
322 unsigned int RtApi :: getDefaultInputDevice( void )
\r
324 // Should be implemented in subclasses if possible.
\r
328 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
330 // Should be implemented in subclasses if possible.
\r
334 void RtApi :: closeStream( void )
\r
336 // MUST be implemented in subclasses!
\r
340 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
341 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
342 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
343 RtAudio::StreamOptions * /*options*/ )
\r
345 // MUST be implemented in subclasses!
\r
349 void RtApi :: tickStreamTime( void )
\r
351 // Subclasses that do not provide their own implementation of
\r
352 // getStreamTime should call this function once per buffer I/O to
\r
353 // provide basic stream time support.
\r
355 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
357 #if defined( HAVE_GETTIMEOFDAY )
\r
358 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
362 long RtApi :: getStreamLatency( void )
\r
366 long totalLatency = 0;
\r
367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
368 totalLatency = stream_.latency[0];
\r
369 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
370 totalLatency += stream_.latency[1];
\r
372 return totalLatency;
\r
375 double RtApi :: getStreamTime( void )
\r
379 #if defined( HAVE_GETTIMEOFDAY )
\r
380 // Return a very accurate estimate of the stream time by
\r
381 // adding in the elapsed time since the last tick.
\r
382 struct timeval then;
\r
383 struct timeval now;
\r
385 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
386 return stream_.streamTime;
\r
388 gettimeofday( &now, NULL );
\r
389 then = stream_.lastTickTimestamp;
\r
390 return stream_.streamTime +
\r
391 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
392 (then.tv_sec + 0.000001 * then.tv_usec));
\r
394 return stream_.streamTime;
\r
398 unsigned int RtApi :: getStreamSampleRate( void )
\r
402 return stream_.sampleRate;
\r
406 // *************************************************** //
\r
408 // OS/API-specific methods.
\r
410 // *************************************************** //
\r
412 #if defined(__MACOSX_CORE__)
\r
414 // The OS X CoreAudio API is designed to use a separate callback
\r
415 // procedure for each of its audio devices. A single RtAudio duplex
\r
416 // stream using two different devices is supported here, though it
\r
417 // cannot be guaranteed to always behave correctly because we cannot
\r
418 // synchronize these two callbacks.
\r
420 // A property listener is installed for over/underrun information.
\r
421 // However, no functionality is currently provided to allow property
\r
422 // listeners to trigger user handlers because it is unclear what could
\r
423 // be done if a critical stream parameter (buffer size, sample rate,
\r
424 // device disconnect) notification arrived. The listeners entail
\r
425 // quite a bit of extra code and most likely, a user program wouldn't
\r
426 // be prepared for the result anyway. However, we do provide a flag
\r
427 // to the client callback function to inform of an over/underrun.
\r
429 // A structure to hold various information related to the CoreAudio API
\r
431 struct CoreHandle {
\r
432 AudioDeviceID id[2]; // device ids
\r
433 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
434 AudioDeviceIOProcID procId[2];
\r
436 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
437 UInt32 nStreams[2]; // number of streams to use
\r
439 char *deviceBuffer;
\r
440 pthread_cond_t condition;
\r
441 int drainCounter; // Tracks callback counts when draining
\r
442 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
445 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
448 RtApiCore:: RtApiCore()
\r
450 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
451 // This is a largely undocumented but absolutely necessary
\r
452 // requirement starting with OS-X 10.6. If not called, queries and
\r
453 // updates to various audio device properties are not handled
\r
455 CFRunLoopRef theRunLoop = NULL;
\r
456 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
457 kAudioObjectPropertyScopeGlobal,
\r
458 kAudioObjectPropertyElementMaster };
\r
459 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
460 if ( result != noErr ) {
\r
461 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
462 error( RtAudioError::WARNING );
\r
467 RtApiCore :: ~RtApiCore()
\r
469 // The subclass destructor gets called before the base class
\r
470 // destructor, so close an existing stream before deallocating
\r
471 // apiDeviceId memory.
\r
472 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
475 unsigned int RtApiCore :: getDeviceCount( void )
\r
477 // Find out how many audio devices there are, if any.
\r
479 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
480 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
481 if ( result != noErr ) {
\r
482 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
483 error( RtAudioError::WARNING );
\r
487 return dataSize / sizeof( AudioDeviceID );
\r
490 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
492 unsigned int nDevices = getDeviceCount();
\r
493 if ( nDevices <= 1 ) return 0;
\r
496 UInt32 dataSize = sizeof( AudioDeviceID );
\r
497 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
498 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
499 if ( result != noErr ) {
\r
500 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
501 error( RtAudioError::WARNING );
\r
505 dataSize *= nDevices;
\r
506 AudioDeviceID deviceList[ nDevices ];
\r
507 property.mSelector = kAudioHardwarePropertyDevices;
\r
508 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
509 if ( result != noErr ) {
\r
510 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
511 error( RtAudioError::WARNING );
\r
515 for ( unsigned int i=0; i<nDevices; i++ )
\r
516 if ( id == deviceList[i] ) return i;
\r
518 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
519 error( RtAudioError::WARNING );
\r
523 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
525 unsigned int nDevices = getDeviceCount();
\r
526 if ( nDevices <= 1 ) return 0;
\r
529 UInt32 dataSize = sizeof( AudioDeviceID );
\r
530 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
531 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
532 if ( result != noErr ) {
\r
533 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
534 error( RtAudioError::WARNING );
\r
538 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
539 AudioDeviceID deviceList[ nDevices ];
\r
540 property.mSelector = kAudioHardwarePropertyDevices;
\r
541 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
542 if ( result != noErr ) {
\r
543 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
544 error( RtAudioError::WARNING );
\r
548 for ( unsigned int i=0; i<nDevices; i++ )
\r
549 if ( id == deviceList[i] ) return i;
\r
551 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
552 error( RtAudioError::WARNING );
\r
556 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
558 RtAudio::DeviceInfo info;
\r
559 info.probed = false;
\r
562 unsigned int nDevices = getDeviceCount();
\r
563 if ( nDevices == 0 ) {
\r
564 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
565 error( RtAudioError::INVALID_USE );
\r
569 if ( device >= nDevices ) {
\r
570 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
571 error( RtAudioError::INVALID_USE );
\r
575 AudioDeviceID deviceList[ nDevices ];
\r
576 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
577 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
578 kAudioObjectPropertyScopeGlobal,
\r
579 kAudioObjectPropertyElementMaster };
\r
580 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
581 0, NULL, &dataSize, (void *) &deviceList );
\r
582 if ( result != noErr ) {
\r
583 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
584 error( RtAudioError::WARNING );
\r
588 AudioDeviceID id = deviceList[ device ];
\r
590 // Get the device name.
\r
592 CFStringRef cfname;
\r
593 dataSize = sizeof( CFStringRef );
\r
594 property.mSelector = kAudioObjectPropertyManufacturer;
\r
595 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
596 if ( result != noErr ) {
\r
597 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
598 errorText_ = errorStream_.str();
\r
599 error( RtAudioError::WARNING );
\r
603 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
604 int length = CFStringGetLength(cfname);
\r
605 char *mname = (char *)malloc(length * 3 + 1);
\r
606 #if defined( UNICODE ) || defined( _UNICODE )
\r
607 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
609 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
611 info.name.append( (const char *)mname, strlen(mname) );
\r
612 info.name.append( ": " );
\r
613 CFRelease( cfname );
\r
616 property.mSelector = kAudioObjectPropertyName;
\r
617 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
618 if ( result != noErr ) {
\r
619 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
620 errorText_ = errorStream_.str();
\r
621 error( RtAudioError::WARNING );
\r
625 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
626 length = CFStringGetLength(cfname);
\r
627 char *name = (char *)malloc(length * 3 + 1);
\r
628 #if defined( UNICODE ) || defined( _UNICODE )
\r
629 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
631 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
633 info.name.append( (const char *)name, strlen(name) );
\r
634 CFRelease( cfname );
\r
637 // Get the output stream "configuration".
\r
638 AudioBufferList *bufferList = nil;
\r
639 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
640 property.mScope = kAudioDevicePropertyScopeOutput;
\r
641 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
643 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
644 if ( result != noErr || dataSize == 0 ) {
\r
645 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
646 errorText_ = errorStream_.str();
\r
647 error( RtAudioError::WARNING );
\r
651 // Allocate the AudioBufferList.
\r
652 bufferList = (AudioBufferList *) malloc( dataSize );
\r
653 if ( bufferList == NULL ) {
\r
654 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
655 error( RtAudioError::WARNING );
\r
659 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
660 if ( result != noErr || dataSize == 0 ) {
\r
661 free( bufferList );
\r
662 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
663 errorText_ = errorStream_.str();
\r
664 error( RtAudioError::WARNING );
\r
668 // Get output channel information.
\r
669 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
670 for ( i=0; i<nStreams; i++ )
\r
671 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
672 free( bufferList );
\r
674 // Get the input stream "configuration".
\r
675 property.mScope = kAudioDevicePropertyScopeInput;
\r
676 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
677 if ( result != noErr || dataSize == 0 ) {
\r
678 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
679 errorText_ = errorStream_.str();
\r
680 error( RtAudioError::WARNING );
\r
684 // Allocate the AudioBufferList.
\r
685 bufferList = (AudioBufferList *) malloc( dataSize );
\r
686 if ( bufferList == NULL ) {
\r
687 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
688 error( RtAudioError::WARNING );
\r
692 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
693 if (result != noErr || dataSize == 0) {
\r
694 free( bufferList );
\r
695 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
696 errorText_ = errorStream_.str();
\r
697 error( RtAudioError::WARNING );
\r
701 // Get input channel information.
\r
702 nStreams = bufferList->mNumberBuffers;
\r
703 for ( i=0; i<nStreams; i++ )
\r
704 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
705 free( bufferList );
\r
707 // If device opens for both playback and capture, we determine the channels.
\r
708 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
709 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
711 // Probe the device sample rates.
\r
712 bool isInput = false;
\r
713 if ( info.outputChannels == 0 ) isInput = true;
\r
715 // Determine the supported sample rates.
\r
716 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
717 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
718 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
719 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
720 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
721 errorText_ = errorStream_.str();
\r
722 error( RtAudioError::WARNING );
\r
726 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
727 AudioValueRange rangeList[ nRanges ];
\r
728 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
729 if ( result != kAudioHardwareNoError ) {
\r
730 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
731 errorText_ = errorStream_.str();
\r
732 error( RtAudioError::WARNING );
\r
736 // The sample rate reporting mechanism is a bit of a mystery. It
\r
737 // seems that it can either return individual rates or a range of
\r
738 // rates. I assume that if the min / max range values are the same,
\r
739 // then that represents a single supported rate and if the min / max
\r
740 // range values are different, the device supports an arbitrary
\r
741 // range of values (though there might be multiple ranges, so we'll
\r
742 // use the most conservative range).
\r
743 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
744 bool haveValueRange = false;
\r
745 info.sampleRates.clear();
\r
746 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
747 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
748 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
750 haveValueRange = true;
\r
751 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
752 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
756 if ( haveValueRange ) {
\r
757 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
758 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
759 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
763 // Sort and remove any redundant values
\r
764 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
765 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
767 if ( info.sampleRates.size() == 0 ) {
\r
768 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
769 errorText_ = errorStream_.str();
\r
770 error( RtAudioError::WARNING );
\r
774 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
775 // Thus, any other "physical" formats supported by the device are of
\r
776 // no interest to the client.
\r
777 info.nativeFormats = RTAUDIO_FLOAT32;
\r
779 if ( info.outputChannels > 0 )
\r
780 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
781 if ( info.inputChannels > 0 )
\r
782 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
784 info.probed = true;
\r
788 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
789 const AudioTimeStamp* /*inNow*/,
\r
790 const AudioBufferList* inInputData,
\r
791 const AudioTimeStamp* /*inInputTime*/,
\r
792 AudioBufferList* outOutputData,
\r
793 const AudioTimeStamp* /*inOutputTime*/,
\r
794 void* infoPointer )
\r
796 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
798 RtApiCore *object = (RtApiCore *) info->object;
\r
799 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
800 return kAudioHardwareUnspecifiedError;
\r
802 return kAudioHardwareNoError;
\r
805 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
807 const AudioObjectPropertyAddress properties[],
\r
808 void* handlePointer )
\r
810 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
811 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
812 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
813 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
814 handle->xrun[1] = true;
\r
816 handle->xrun[0] = true;
\r
820 return kAudioHardwareNoError;
\r
823 static OSStatus rateListener( AudioObjectID inDevice,
\r
824 UInt32 /*nAddresses*/,
\r
825 const AudioObjectPropertyAddress /*properties*/[],
\r
826 void* ratePointer )
\r
828 Float64 *rate = (Float64 *) ratePointer;
\r
829 UInt32 dataSize = sizeof( Float64 );
\r
830 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
831 kAudioObjectPropertyScopeGlobal,
\r
832 kAudioObjectPropertyElementMaster };
\r
833 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
834 return kAudioHardwareNoError;
\r
837 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
838 unsigned int firstChannel, unsigned int sampleRate,
\r
839 RtAudioFormat format, unsigned int *bufferSize,
\r
840 RtAudio::StreamOptions *options )
\r
843 unsigned int nDevices = getDeviceCount();
\r
844 if ( nDevices == 0 ) {
\r
845 // This should not happen because a check is made before this function is called.
\r
846 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
850 if ( device >= nDevices ) {
\r
851 // This should not happen because a check is made before this function is called.
\r
852 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
856 AudioDeviceID deviceList[ nDevices ];
\r
857 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
858 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
859 kAudioObjectPropertyScopeGlobal,
\r
860 kAudioObjectPropertyElementMaster };
\r
861 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
862 0, NULL, &dataSize, (void *) &deviceList );
\r
863 if ( result != noErr ) {
\r
864 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
868 AudioDeviceID id = deviceList[ device ];
\r
870 // Setup for stream mode.
\r
871 bool isInput = false;
\r
872 if ( mode == INPUT ) {
\r
874 property.mScope = kAudioDevicePropertyScopeInput;
\r
877 property.mScope = kAudioDevicePropertyScopeOutput;
\r
879 // Get the stream "configuration".
\r
880 AudioBufferList *bufferList = nil;
\r
882 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
883 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
884 if ( result != noErr || dataSize == 0 ) {
\r
885 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
886 errorText_ = errorStream_.str();
\r
890 // Allocate the AudioBufferList.
\r
891 bufferList = (AudioBufferList *) malloc( dataSize );
\r
892 if ( bufferList == NULL ) {
\r
893 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
897 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
898 if (result != noErr || dataSize == 0) {
\r
899 free( bufferList );
\r
900 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
901 errorText_ = errorStream_.str();
\r
905 // Search for one or more streams that contain the desired number of
\r
906 // channels. CoreAudio devices can have an arbitrary number of
\r
907 // streams and each stream can have an arbitrary number of channels.
\r
908 // For each stream, a single buffer of interleaved samples is
\r
909 // provided. RtAudio prefers the use of one stream of interleaved
\r
910 // data or multiple consecutive single-channel streams. However, we
\r
911 // now support multiple consecutive multi-channel streams of
\r
912 // interleaved data as well.
\r
913 UInt32 iStream, offsetCounter = firstChannel;
\r
914 UInt32 nStreams = bufferList->mNumberBuffers;
\r
915 bool monoMode = false;
\r
916 bool foundStream = false;
\r
918 // First check that the device supports the requested number of
\r
920 UInt32 deviceChannels = 0;
\r
921 for ( iStream=0; iStream<nStreams; iStream++ )
\r
922 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
924 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
925 free( bufferList );
\r
926 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
927 errorText_ = errorStream_.str();
\r
931 // Look for a single stream meeting our needs.
\r
932 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
933 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
934 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
935 if ( streamChannels >= channels + offsetCounter ) {
\r
936 firstStream = iStream;
\r
937 channelOffset = offsetCounter;
\r
938 foundStream = true;
\r
941 if ( streamChannels > offsetCounter ) break;
\r
942 offsetCounter -= streamChannels;
\r
945 // If we didn't find a single stream above, then we should be able
\r
946 // to meet the channel specification with multiple streams.
\r
947 if ( foundStream == false ) {
\r
949 offsetCounter = firstChannel;
\r
950 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
951 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
952 if ( streamChannels > offsetCounter ) break;
\r
953 offsetCounter -= streamChannels;
\r
956 firstStream = iStream;
\r
957 channelOffset = offsetCounter;
\r
958 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
960 if ( streamChannels > 1 ) monoMode = false;
\r
961 while ( channelCounter > 0 ) {
\r
962 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
963 if ( streamChannels > 1 ) monoMode = false;
\r
964 channelCounter -= streamChannels;
\r
969 free( bufferList );
\r
971 // Determine the buffer size.
\r
972 AudioValueRange bufferRange;
\r
973 dataSize = sizeof( AudioValueRange );
\r
974 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
975 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
977 if ( result != noErr ) {
\r
978 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
979 errorText_ = errorStream_.str();
\r
983 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
984 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
985 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
987 // Set the buffer size. For multiple streams, I'm assuming we only
\r
988 // need to make this setting for the master channel.
\r
989 UInt32 theSize = (UInt32) *bufferSize;
\r
990 dataSize = sizeof( UInt32 );
\r
991 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
992 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
994 if ( result != noErr ) {
\r
995 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
996 errorText_ = errorStream_.str();
\r
1000 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1001 // MUST be the same in both directions!
\r
1002 *bufferSize = theSize;
\r
1003 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 stream_.bufferSize = *bufferSize;
\r
1010 stream_.nBuffers = 1;
\r
1012 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1013 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1015 dataSize = sizeof( hog_pid );
\r
1016 property.mSelector = kAudioDevicePropertyHogMode;
\r
1017 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1018 if ( result != noErr ) {
\r
1019 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1020 errorText_ = errorStream_.str();
\r
1024 if ( hog_pid != getpid() ) {
\r
1025 hog_pid = getpid();
\r
1026 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1035 // Check and if necessary, change the sample rate for the device.
\r
1036 Float64 nominalRate;
\r
1037 dataSize = sizeof( Float64 );
\r
1038 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1039 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1040 if ( result != noErr ) {
\r
1041 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1042 errorText_ = errorStream_.str();
\r
1046 // Only change the sample rate if off by more than 1 Hz.
\r
1047 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1049 // Set a property listener for the sample rate change
\r
1050 Float64 reportedRate = 0.0;
\r
1051 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1052 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1053 if ( result != noErr ) {
\r
1054 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1055 errorText_ = errorStream_.str();
\r
1059 nominalRate = (Float64) sampleRate;
\r
1060 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1061 if ( result != noErr ) {
\r
1062 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 // Now wait until the reported nominal rate is what we just set.
\r
1069 UInt32 microCounter = 0;
\r
1070 while ( reportedRate != nominalRate ) {
\r
1071 microCounter += 5000;
\r
1072 if ( microCounter > 5000000 ) break;
\r
1076 // Remove the property listener.
\r
1077 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1079 if ( microCounter > 5000000 ) {
\r
1080 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1081 errorText_ = errorStream_.str();
\r
1086 // Now set the stream format for all streams. Also, check the
\r
1087 // physical format of the device and change that if necessary.
\r
1088 AudioStreamBasicDescription description;
\r
1089 dataSize = sizeof( AudioStreamBasicDescription );
\r
1090 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1091 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1092 if ( result != noErr ) {
\r
1093 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1094 errorText_ = errorStream_.str();
\r
1098 // Set the sample rate and data format id. However, only make the
\r
1099 // change if the sample rate is not within 1.0 of the desired
\r
1100 // rate and the format is not linear pcm.
\r
1101 bool updateFormat = false;
\r
1102 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1103 description.mSampleRate = (Float64) sampleRate;
\r
1104 updateFormat = true;
\r
1107 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1108 description.mFormatID = kAudioFormatLinearPCM;
\r
1109 updateFormat = true;
\r
1112 if ( updateFormat ) {
\r
1113 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1114 if ( result != noErr ) {
\r
1115 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1116 errorText_ = errorStream_.str();
\r
1121 // Now check the physical format.
\r
1122 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1123 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1124 if ( result != noErr ) {
\r
1125 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1126 errorText_ = errorStream_.str();
\r
1130 //std::cout << "Current physical stream format:" << std::endl;
\r
1131 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1132 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1133 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1134 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1136 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1137 description.mFormatID = kAudioFormatLinearPCM;
\r
1138 //description.mSampleRate = (Float64) sampleRate;
\r
1139 AudioStreamBasicDescription testDescription = description;
\r
1140 UInt32 formatFlags;
\r
1142 // We'll try higher bit rates first and then work our way down.
\r
1143 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1144 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1145 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1146 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1147 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1148 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1149 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1150 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1151 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1152 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1155 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1157 bool setPhysicalFormat = false;
\r
1158 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1159 testDescription = description;
\r
1160 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1161 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1162 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1163 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1165 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1166 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1167 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1168 if ( result == noErr ) {
\r
1169 setPhysicalFormat = true;
\r
1170 //std::cout << "Updated physical stream format:" << std::endl;
\r
1171 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1172 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1173 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1174 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1179 if ( !setPhysicalFormat ) {
\r
1180 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1181 errorText_ = errorStream_.str();
\r
1184 } // done setting virtual/physical formats.
\r
1186 // Get the stream / device latency.
\r
1188 dataSize = sizeof( UInt32 );
\r
1189 property.mSelector = kAudioDevicePropertyLatency;
\r
1190 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1191 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1192 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1194 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1195 errorText_ = errorStream_.str();
\r
1196 error( RtAudioError::WARNING );
\r
1200 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1201 // always be presented in native-endian format, so we should never
\r
1202 // need to byte swap.
\r
1203 stream_.doByteSwap[mode] = false;
\r
1205 // From the CoreAudio documentation, PCM data must be supplied as
\r
1207 stream_.userFormat = format;
\r
1208 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1210 if ( streamCount == 1 )
\r
1211 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1212 else // multiple streams
\r
1213 stream_.nDeviceChannels[mode] = channels;
\r
1214 stream_.nUserChannels[mode] = channels;
\r
1215 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1216 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1217 else stream_.userInterleaved = true;
\r
1218 stream_.deviceInterleaved[mode] = true;
\r
1219 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1221 // Set flags for buffer conversion.
\r
1222 stream_.doConvertBuffer[mode] = false;
\r
1223 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1224 stream_.doConvertBuffer[mode] = true;
\r
1225 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1226 stream_.doConvertBuffer[mode] = true;
\r
1227 if ( streamCount == 1 ) {
\r
1228 if ( stream_.nUserChannels[mode] > 1 &&
\r
1229 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1230 stream_.doConvertBuffer[mode] = true;
\r
1232 else if ( monoMode && stream_.userInterleaved )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1235 // Allocate our CoreHandle structure for the stream.
\r
1236 CoreHandle *handle = 0;
\r
1237 if ( stream_.apiHandle == 0 ) {
\r
1239 handle = new CoreHandle;
\r
1241 catch ( std::bad_alloc& ) {
\r
1242 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1246 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1247 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1250 stream_.apiHandle = (void *) handle;
\r
1253 handle = (CoreHandle *) stream_.apiHandle;
\r
1254 handle->iStream[mode] = firstStream;
\r
1255 handle->nStreams[mode] = streamCount;
\r
1256 handle->id[mode] = id;
\r
1258 // Allocate necessary internal buffers.
\r
1259 unsigned long bufferBytes;
\r
1260 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1261 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1262 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1263 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1264 if ( stream_.userBuffer[mode] == NULL ) {
\r
1265 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1269 // If possible, we will make use of the CoreAudio stream buffers as
\r
1270 // "device buffers". However, we can't do this if using multiple
\r
1272 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1274 bool makeBuffer = true;
\r
1275 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1276 if ( mode == INPUT ) {
\r
1277 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1278 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1279 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1283 if ( makeBuffer ) {
\r
1284 bufferBytes *= *bufferSize;
\r
1285 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1286 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1287 if ( stream_.deviceBuffer == NULL ) {
\r
1288 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1294 stream_.sampleRate = sampleRate;
\r
1295 stream_.device[mode] = device;
\r
1296 stream_.state = STREAM_STOPPED;
\r
1297 stream_.callbackInfo.object = (void *) this;
\r
1299 // Setup the buffer conversion information structure.
\r
1300 if ( stream_.doConvertBuffer[mode] ) {
\r
1301 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1302 else setConvertInfo( mode, channelOffset );
\r
1305 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1306 // Only one callback procedure per device.
\r
1307 stream_.mode = DUPLEX;
\r
1309 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1310 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1312 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1313 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1315 if ( result != noErr ) {
\r
1316 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1317 errorText_ = errorStream_.str();
\r
1320 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1321 stream_.mode = DUPLEX;
\r
1323 stream_.mode = mode;
\r
1326 // Setup the device property listener for over/underload.
\r
1327 property.mSelector = kAudioDeviceProcessorOverload;
\r
1328 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1329 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1335 pthread_cond_destroy( &handle->condition );
\r
1337 stream_.apiHandle = 0;
\r
1340 for ( int i=0; i<2; i++ ) {
\r
1341 if ( stream_.userBuffer[i] ) {
\r
1342 free( stream_.userBuffer[i] );
\r
1343 stream_.userBuffer[i] = 0;
\r
1347 if ( stream_.deviceBuffer ) {
\r
1348 free( stream_.deviceBuffer );
\r
1349 stream_.deviceBuffer = 0;
\r
1352 stream_.state = STREAM_CLOSED;
\r
1356 void RtApiCore :: closeStream( void )
\r
1358 if ( stream_.state == STREAM_CLOSED ) {
\r
1359 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1360 error( RtAudioError::WARNING );
\r
1364 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1365 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1366 if ( stream_.state == STREAM_RUNNING )
\r
1367 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1368 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1369 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1371 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1372 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1376 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1377 if ( stream_.state == STREAM_RUNNING )
\r
1378 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1379 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1380 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1382 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1383 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1387 for ( int i=0; i<2; i++ ) {
\r
1388 if ( stream_.userBuffer[i] ) {
\r
1389 free( stream_.userBuffer[i] );
\r
1390 stream_.userBuffer[i] = 0;
\r
1394 if ( stream_.deviceBuffer ) {
\r
1395 free( stream_.deviceBuffer );
\r
1396 stream_.deviceBuffer = 0;
\r
1399 // Destroy pthread condition variable.
\r
1400 pthread_cond_destroy( &handle->condition );
\r
1402 stream_.apiHandle = 0;
\r
1404 stream_.mode = UNINITIALIZED;
\r
1405 stream_.state = STREAM_CLOSED;
\r
1408 void RtApiCore :: startStream( void )
\r
1411 if ( stream_.state == STREAM_RUNNING ) {
\r
1412 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1413 error( RtAudioError::WARNING );
\r
1417 OSStatus result = noErr;
\r
1418 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1419 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1421 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1422 if ( result != noErr ) {
\r
1423 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1424 errorText_ = errorStream_.str();
\r
1429 if ( stream_.mode == INPUT ||
\r
1430 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1432 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1433 if ( result != noErr ) {
\r
1434 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1435 errorText_ = errorStream_.str();
\r
1440 handle->drainCounter = 0;
\r
1441 handle->internalDrain = false;
\r
1442 stream_.state = STREAM_RUNNING;
\r
1445 if ( result == noErr ) return;
\r
1446 error( RtAudioError::SYSTEM_ERROR );
\r
1449 void RtApiCore :: stopStream( void )
\r
1452 if ( stream_.state == STREAM_STOPPED ) {
\r
1453 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1454 error( RtAudioError::WARNING );
\r
1458 OSStatus result = noErr;
\r
1459 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1460 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1462 if ( handle->drainCounter == 0 ) {
\r
1463 handle->drainCounter = 2;
\r
1464 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1467 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1468 if ( result != noErr ) {
\r
1469 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1470 errorText_ = errorStream_.str();
\r
1475 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1477 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1478 if ( result != noErr ) {
\r
1479 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1480 errorText_ = errorStream_.str();
\r
1485 stream_.state = STREAM_STOPPED;
\r
1488 if ( result == noErr ) return;
\r
1489 error( RtAudioError::SYSTEM_ERROR );
\r
1492 void RtApiCore :: abortStream( void )
\r
1495 if ( stream_.state == STREAM_STOPPED ) {
\r
1496 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1497 error( RtAudioError::WARNING );
\r
1501 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1502 handle->drainCounter = 2;
\r
1507 // This function will be called by a spawned thread when the user
\r
1508 // callback function signals that the stream should be stopped or
\r
1509 // aborted. It is better to handle it this way because the
\r
1510 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1511 // function is called.
\r
1512 static void *coreStopStream( void *ptr )
\r
1514 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1515 RtApiCore *object = (RtApiCore *) info->object;
\r
1517 object->stopStream();
\r
1518 pthread_exit( NULL );
\r
1521 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1522 const AudioBufferList *inBufferList,
\r
1523 const AudioBufferList *outBufferList )
\r
1525 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1526 if ( stream_.state == STREAM_CLOSED ) {
\r
1527 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1528 error( RtAudioError::WARNING );
\r
1532 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1533 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1535 // Check if we were draining the stream and signal is finished.
\r
1536 if ( handle->drainCounter > 3 ) {
\r
1537 ThreadHandle threadId;
\r
1539 stream_.state = STREAM_STOPPING;
\r
1540 if ( handle->internalDrain == true )
\r
1541 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1542 else // external call to stopStream()
\r
1543 pthread_cond_signal( &handle->condition );
\r
1547 AudioDeviceID outputDevice = handle->id[0];
\r
1549 // Invoke user callback to get fresh output data UNLESS we are
\r
1550 // draining stream or duplex mode AND the input/output devices are
\r
1551 // different AND this function is called for the input device.
\r
1552 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1553 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1554 double streamTime = getStreamTime();
\r
1555 RtAudioStreamStatus status = 0;
\r
1556 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1557 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1558 handle->xrun[0] = false;
\r
1560 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1561 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1562 handle->xrun[1] = false;
\r
1565 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1566 stream_.bufferSize, streamTime, status, info->userData );
\r
1567 if ( cbReturnValue == 2 ) {
\r
1568 stream_.state = STREAM_STOPPING;
\r
1569 handle->drainCounter = 2;
\r
1573 else if ( cbReturnValue == 1 ) {
\r
1574 handle->drainCounter = 1;
\r
1575 handle->internalDrain = true;
\r
1579 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1581 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1583 if ( handle->nStreams[0] == 1 ) {
\r
1584 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1586 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1588 else { // fill multiple streams with zeros
\r
1589 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1590 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1592 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1596 else if ( handle->nStreams[0] == 1 ) {
\r
1597 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1598 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1599 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1601 else { // copy from user buffer
\r
1602 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1603 stream_.userBuffer[0],
\r
1604 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1607 else { // fill multiple streams
\r
1608 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1609 if ( stream_.doConvertBuffer[0] ) {
\r
1610 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1611 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1614 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1615 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1616 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1617 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1618 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1621 else { // fill multiple multi-channel streams with interleaved data
\r
1622 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1623 Float32 *out, *in;
\r
1625 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1626 UInt32 inChannels = stream_.nUserChannels[0];
\r
1627 if ( stream_.doConvertBuffer[0] ) {
\r
1628 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1629 inChannels = stream_.nDeviceChannels[0];
\r
1632 if ( inInterleaved ) inOffset = 1;
\r
1633 else inOffset = stream_.bufferSize;
\r
1635 channelsLeft = inChannels;
\r
1636 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1638 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1639 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1642 // Account for possible channel offset in first stream
\r
1643 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1644 streamChannels -= stream_.channelOffset[0];
\r
1645 outJump = stream_.channelOffset[0];
\r
1649 // Account for possible unfilled channels at end of the last stream
\r
1650 if ( streamChannels > channelsLeft ) {
\r
1651 outJump = streamChannels - channelsLeft;
\r
1652 streamChannels = channelsLeft;
\r
1655 // Determine input buffer offsets and skips
\r
1656 if ( inInterleaved ) {
\r
1657 inJump = inChannels;
\r
1658 in += inChannels - channelsLeft;
\r
1662 in += (inChannels - channelsLeft) * inOffset;
\r
1665 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1666 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1667 *out++ = in[j*inOffset];
\r
1672 channelsLeft -= streamChannels;
\r
1677 if ( handle->drainCounter ) {
\r
1678 handle->drainCounter++;
\r
1683 AudioDeviceID inputDevice;
\r
1684 inputDevice = handle->id[1];
\r
1685 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1687 if ( handle->nStreams[1] == 1 ) {
\r
1688 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1689 convertBuffer( stream_.userBuffer[1],
\r
1690 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1691 stream_.convertInfo[1] );
\r
1693 else { // copy to user buffer
\r
1694 memcpy( stream_.userBuffer[1],
\r
1695 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1696 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1699 else { // read from multiple streams
\r
1700 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1701 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1703 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1704 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1705 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1706 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1707 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1710 else { // read from multiple multi-channel streams
\r
1711 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1712 Float32 *out, *in;
\r
1714 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1715 UInt32 outChannels = stream_.nUserChannels[1];
\r
1716 if ( stream_.doConvertBuffer[1] ) {
\r
1717 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1718 outChannels = stream_.nDeviceChannels[1];
\r
1721 if ( outInterleaved ) outOffset = 1;
\r
1722 else outOffset = stream_.bufferSize;
\r
1724 channelsLeft = outChannels;
\r
1725 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1727 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1728 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1731 // Account for possible channel offset in first stream
\r
1732 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1733 streamChannels -= stream_.channelOffset[1];
\r
1734 inJump = stream_.channelOffset[1];
\r
1738 // Account for possible unread channels at end of the last stream
\r
1739 if ( streamChannels > channelsLeft ) {
\r
1740 inJump = streamChannels - channelsLeft;
\r
1741 streamChannels = channelsLeft;
\r
1744 // Determine output buffer offsets and skips
\r
1745 if ( outInterleaved ) {
\r
1746 outJump = outChannels;
\r
1747 out += outChannels - channelsLeft;
\r
1751 out += (outChannels - channelsLeft) * outOffset;
\r
1754 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1755 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1756 out[j*outOffset] = *in++;
\r
1761 channelsLeft -= streamChannels;
\r
1765 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1766 convertBuffer( stream_.userBuffer[1],
\r
1767 stream_.deviceBuffer,
\r
1768 stream_.convertInfo[1] );
\r
1774 //MUTEX_UNLOCK( &stream_.mutex );
\r
1776 RtApi::tickStreamTime();
\r
1780 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1784 case kAudioHardwareNotRunningError:
\r
1785 return "kAudioHardwareNotRunningError";
\r
1787 case kAudioHardwareUnspecifiedError:
\r
1788 return "kAudioHardwareUnspecifiedError";
\r
1790 case kAudioHardwareUnknownPropertyError:
\r
1791 return "kAudioHardwareUnknownPropertyError";
\r
1793 case kAudioHardwareBadPropertySizeError:
\r
1794 return "kAudioHardwareBadPropertySizeError";
\r
1796 case kAudioHardwareIllegalOperationError:
\r
1797 return "kAudioHardwareIllegalOperationError";
\r
1799 case kAudioHardwareBadObjectError:
\r
1800 return "kAudioHardwareBadObjectError";
\r
1802 case kAudioHardwareBadDeviceError:
\r
1803 return "kAudioHardwareBadDeviceError";
\r
1805 case kAudioHardwareBadStreamError:
\r
1806 return "kAudioHardwareBadStreamError";
\r
1808 case kAudioHardwareUnsupportedOperationError:
\r
1809 return "kAudioHardwareUnsupportedOperationError";
\r
1811 case kAudioDeviceUnsupportedFormatError:
\r
1812 return "kAudioDeviceUnsupportedFormatError";
\r
1814 case kAudioDevicePermissionsError:
\r
1815 return "kAudioDevicePermissionsError";
\r
1818 return "CoreAudio unknown error";
\r
1822 //******************** End of __MACOSX_CORE__ *********************//
\r
1825 #if defined(__UNIX_JACK__)
\r
1827 // JACK is a low-latency audio server, originally written for the
\r
1828 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1829 // connect a number of different applications to an audio device, as
\r
1830 // well as allowing them to share audio between themselves.
\r
1832 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1833 // have ports connected to the server. The JACK server is typically
\r
1834 // started in a terminal as follows:
\r
1836 // .jackd -d alsa -d hw:0
\r
1838 // or through an interface program such as qjackctl. Many of the
\r
1839 // parameters normally set for a stream are fixed by the JACK server
\r
1840 // and can be specified when the JACK server is started. In
\r
1843 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1845 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1846 // frames, and number of buffers = 4. Once the server is running, it
\r
1847 // is not possible to override these values. If the values are not
\r
1848 // specified in the command-line, the JACK server uses default values.
\r
1850 // The JACK server does not have to be running when an instance of
\r
1851 // RtApiJack is created, though the function getDeviceCount() will
\r
1852 // report 0 devices found until JACK has been started. When no
\r
1853 // devices are available (i.e., the JACK server is not running), a
\r
1854 // stream cannot be opened.
\r
1856 #include <jack/jack.h>
\r
1857 #include <unistd.h>
\r
1860 // A structure to hold various information related to the Jack API
\r
1861 // implementation.
\r
1862 struct JackHandle {
\r
1863 jack_client_t *client;
\r
1864 jack_port_t **ports[2];
\r
1865 std::string deviceName[2];
\r
1867 pthread_cond_t condition;
\r
1868 int drainCounter; // Tracks callback counts when draining
\r
1869 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1872 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1875 static void jackSilentError( const char * ) {};
\r
1877 RtApiJack :: RtApiJack()
\r
1879 // Nothing to do here.
\r
1880 #if !defined(__RTAUDIO_DEBUG__)
\r
1881 // Turn off Jack's internal error reporting.
\r
1882 jack_set_error_function( &jackSilentError );
\r
1886 RtApiJack :: ~RtApiJack()
\r
1888 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1891 unsigned int RtApiJack :: getDeviceCount( void )
\r
1893 // See if we can become a jack client.
\r
1894 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1895 jack_status_t *status = NULL;
\r
1896 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1897 if ( client == 0 ) return 0;
\r
1899 const char **ports;
\r
1900 std::string port, previousPort;
\r
1901 unsigned int nChannels = 0, nDevices = 0;
\r
1902 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1904 // Parse the port names up to the first colon (:).
\r
1905 size_t iColon = 0;
\r
1907 port = (char *) ports[ nChannels ];
\r
1908 iColon = port.find(":");
\r
1909 if ( iColon != std::string::npos ) {
\r
1910 port = port.substr( 0, iColon + 1 );
\r
1911 if ( port != previousPort ) {
\r
1913 previousPort = port;
\r
1916 } while ( ports[++nChannels] );
\r
1920 jack_client_close( client );
\r
1924 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1926 RtAudio::DeviceInfo info;
\r
1927 info.probed = false;
\r
1929 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1930 jack_status_t *status = NULL;
\r
1931 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1932 if ( client == 0 ) {
\r
1933 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1934 error( RtAudioError::WARNING );
\r
1938 const char **ports;
\r
1939 std::string port, previousPort;
\r
1940 unsigned int nPorts = 0, nDevices = 0;
\r
1941 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1943 // Parse the port names up to the first colon (:).
\r
1944 size_t iColon = 0;
\r
1946 port = (char *) ports[ nPorts ];
\r
1947 iColon = port.find(":");
\r
1948 if ( iColon != std::string::npos ) {
\r
1949 port = port.substr( 0, iColon );
\r
1950 if ( port != previousPort ) {
\r
1951 if ( nDevices == device ) info.name = port;
\r
1953 previousPort = port;
\r
1956 } while ( ports[++nPorts] );
\r
1960 if ( device >= nDevices ) {
\r
1961 jack_client_close( client );
\r
1962 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1963 error( RtAudioError::INVALID_USE );
\r
1967 // Get the current jack server sample rate.
\r
1968 info.sampleRates.clear();
\r
1969 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1971 // Count the available ports containing the client name as device
\r
1972 // channels. Jack "input ports" equal RtAudio output channels.
\r
1973 unsigned int nChannels = 0;
\r
1974 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1976 while ( ports[ nChannels ] ) nChannels++;
\r
1978 info.outputChannels = nChannels;
\r
1981 // Jack "output ports" equal RtAudio input channels.
\r
1983 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1985 while ( ports[ nChannels ] ) nChannels++;
\r
1987 info.inputChannels = nChannels;
\r
1990 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1991 jack_client_close(client);
\r
1992 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1993 error( RtAudioError::WARNING );
\r
1997 // If device opens for both playback and capture, we determine the channels.
\r
1998 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1999 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2001 // Jack always uses 32-bit floats.
\r
2002 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2004 // Jack doesn't provide default devices so we'll use the first available one.
\r
2005 if ( device == 0 && info.outputChannels > 0 )
\r
2006 info.isDefaultOutput = true;
\r
2007 if ( device == 0 && info.inputChannels > 0 )
\r
2008 info.isDefaultInput = true;
\r
2010 jack_client_close(client);
\r
2011 info.probed = true;
\r
2015 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2017 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2019 RtApiJack *object = (RtApiJack *) info->object;
\r
2020 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2025 // This function will be called by a spawned thread when the Jack
\r
2026 // server signals that it is shutting down. It is necessary to handle
\r
2027 // it this way because the jackShutdown() function must return before
\r
2028 // the jack_deactivate() function (in closeStream()) will return.
\r
2029 static void *jackCloseStream( void *ptr )
\r
2031 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2032 RtApiJack *object = (RtApiJack *) info->object;
\r
2034 object->closeStream();
\r
2036 pthread_exit( NULL );
\r
2038 static void jackShutdown( void *infoPointer )
\r
2040 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2041 RtApiJack *object = (RtApiJack *) info->object;
\r
2043 // Check current stream state. If stopped, then we'll assume this
\r
2044 // was called as a result of a call to RtApiJack::stopStream (the
\r
2045 // deactivation of a client handle causes this function to be called).
\r
2046 // If not, we'll assume the Jack server is shutting down or some
\r
2047 // other problem occurred and we should close the stream.
\r
2048 if ( object->isStreamRunning() == false ) return;
\r
2050 ThreadHandle threadId;
\r
2051 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2052 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2055 static int jackXrun( void *infoPointer )
\r
2057 JackHandle *handle = (JackHandle *) infoPointer;
\r
2059 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2060 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2065 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2066 unsigned int firstChannel, unsigned int sampleRate,
\r
2067 RtAudioFormat format, unsigned int *bufferSize,
\r
2068 RtAudio::StreamOptions *options )
\r
2070 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2072 // Look for jack server and try to become a client (only do once per stream).
\r
2073 jack_client_t *client = 0;
\r
2074 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2075 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2076 jack_status_t *status = NULL;
\r
2077 if ( options && !options->streamName.empty() )
\r
2078 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2080 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2081 if ( client == 0 ) {
\r
2082 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2083 error( RtAudioError::WARNING );
\r
2088 // The handle must have been created on an earlier pass.
\r
2089 client = handle->client;
\r
2092 const char **ports;
\r
2093 std::string port, previousPort, deviceName;
\r
2094 unsigned int nPorts = 0, nDevices = 0;
\r
2095 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2097 // Parse the port names up to the first colon (:).
\r
2098 size_t iColon = 0;
\r
2100 port = (char *) ports[ nPorts ];
\r
2101 iColon = port.find(":");
\r
2102 if ( iColon != std::string::npos ) {
\r
2103 port = port.substr( 0, iColon );
\r
2104 if ( port != previousPort ) {
\r
2105 if ( nDevices == device ) deviceName = port;
\r
2107 previousPort = port;
\r
2110 } while ( ports[++nPorts] );
\r
2114 if ( device >= nDevices ) {
\r
2115 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2119 // Count the available ports containing the client name as device
\r
2120 // channels. Jack "input ports" equal RtAudio output channels.
\r
2121 unsigned int nChannels = 0;
\r
2122 unsigned long flag = JackPortIsInput;
\r
2123 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2124 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2126 while ( ports[ nChannels ] ) nChannels++;
\r
2130 // Compare the jack ports for specified client to the requested number of channels.
\r
2131 if ( nChannels < (channels + firstChannel) ) {
\r
2132 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2133 errorText_ = errorStream_.str();
\r
2137 // Check the jack server sample rate.
\r
2138 unsigned int jackRate = jack_get_sample_rate( client );
\r
2139 if ( sampleRate != jackRate ) {
\r
2140 jack_client_close( client );
\r
2141 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2142 errorText_ = errorStream_.str();
\r
2145 stream_.sampleRate = jackRate;
\r
2147 // Get the latency of the JACK port.
\r
2148 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2149 if ( ports[ firstChannel ] ) {
\r
2150 // Added by Ge Wang
\r
2151 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2152 // the range (usually the min and max are equal)
\r
2153 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2154 // get the latency range
\r
2155 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2156 // be optimistic, use the min!
\r
2157 stream_.latency[mode] = latrange.min;
\r
2158 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2162 // The jack server always uses 32-bit floating-point data.
\r
2163 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2164 stream_.userFormat = format;
\r
2166 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2167 else stream_.userInterleaved = true;
\r
2169 // Jack always uses non-interleaved buffers.
\r
2170 stream_.deviceInterleaved[mode] = false;
\r
2172 // Jack always provides host byte-ordered data.
\r
2173 stream_.doByteSwap[mode] = false;
\r
2175 // Get the buffer size. The buffer size and number of buffers
\r
2176 // (periods) is set when the jack server is started.
\r
2177 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2178 *bufferSize = stream_.bufferSize;
\r
2180 stream_.nDeviceChannels[mode] = channels;
\r
2181 stream_.nUserChannels[mode] = channels;
\r
2183 // Set flags for buffer conversion.
\r
2184 stream_.doConvertBuffer[mode] = false;
\r
2185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2186 stream_.doConvertBuffer[mode] = true;
\r
2187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2188 stream_.nUserChannels[mode] > 1 )
\r
2189 stream_.doConvertBuffer[mode] = true;
\r
2191 // Allocate our JackHandle structure for the stream.
\r
2192 if ( handle == 0 ) {
\r
2194 handle = new JackHandle;
\r
2196 catch ( std::bad_alloc& ) {
\r
2197 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2201 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2202 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2205 stream_.apiHandle = (void *) handle;
\r
2206 handle->client = client;
\r
2208 handle->deviceName[mode] = deviceName;
\r
2210 // Allocate necessary internal buffers.
\r
2211 unsigned long bufferBytes;
\r
2212 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2213 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2214 if ( stream_.userBuffer[mode] == NULL ) {
\r
2215 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2219 if ( stream_.doConvertBuffer[mode] ) {
\r
2221 bool makeBuffer = true;
\r
2222 if ( mode == OUTPUT )
\r
2223 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2224 else { // mode == INPUT
\r
2225 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2226 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2227 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2228 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2232 if ( makeBuffer ) {
\r
2233 bufferBytes *= *bufferSize;
\r
2234 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2235 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2236 if ( stream_.deviceBuffer == NULL ) {
\r
2237 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2243 // Allocate memory for the Jack ports (channels) identifiers.
\r
2244 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2245 if ( handle->ports[mode] == NULL ) {
\r
2246 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2250 stream_.device[mode] = device;
\r
2251 stream_.channelOffset[mode] = firstChannel;
\r
2252 stream_.state = STREAM_STOPPED;
\r
2253 stream_.callbackInfo.object = (void *) this;
\r
2255 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2256 // We had already set up the stream for output.
\r
2257 stream_.mode = DUPLEX;
\r
2259 stream_.mode = mode;
\r
2260 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2261 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2262 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2265 // Register our ports.
\r
2267 if ( mode == OUTPUT ) {
\r
2268 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2269 snprintf( label, 64, "outport %d", i );
\r
2270 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2271 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2275 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2276 snprintf( label, 64, "inport %d", i );
\r
2277 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2278 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2282 // Setup the buffer conversion information structure. We don't use
\r
2283 // buffers to do channel offsets, so we override that parameter
\r
2285 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2291 pthread_cond_destroy( &handle->condition );
\r
2292 jack_client_close( handle->client );
\r
2294 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2295 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2298 stream_.apiHandle = 0;
\r
2301 for ( int i=0; i<2; i++ ) {
\r
2302 if ( stream_.userBuffer[i] ) {
\r
2303 free( stream_.userBuffer[i] );
\r
2304 stream_.userBuffer[i] = 0;
\r
2308 if ( stream_.deviceBuffer ) {
\r
2309 free( stream_.deviceBuffer );
\r
2310 stream_.deviceBuffer = 0;
\r
2316 void RtApiJack :: closeStream( void )
\r
2318 if ( stream_.state == STREAM_CLOSED ) {
\r
2319 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2320 error( RtAudioError::WARNING );
\r
2324 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2327 if ( stream_.state == STREAM_RUNNING )
\r
2328 jack_deactivate( handle->client );
\r
2330 jack_client_close( handle->client );
\r
2334 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2335 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2336 pthread_cond_destroy( &handle->condition );
\r
2338 stream_.apiHandle = 0;
\r
2341 for ( int i=0; i<2; i++ ) {
\r
2342 if ( stream_.userBuffer[i] ) {
\r
2343 free( stream_.userBuffer[i] );
\r
2344 stream_.userBuffer[i] = 0;
\r
2348 if ( stream_.deviceBuffer ) {
\r
2349 free( stream_.deviceBuffer );
\r
2350 stream_.deviceBuffer = 0;
\r
2353 stream_.mode = UNINITIALIZED;
\r
2354 stream_.state = STREAM_CLOSED;
\r
2357 void RtApiJack :: startStream( void )
\r
2360 if ( stream_.state == STREAM_RUNNING ) {
\r
2361 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2362 error( RtAudioError::WARNING );
\r
2366 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2367 int result = jack_activate( handle->client );
\r
2369 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2373 const char **ports;
\r
2375 // Get the list of available ports.
\r
2376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2378 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2379 if ( ports == NULL) {
\r
2380 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2384 // Now make the port connections. Since RtAudio wasn't designed to
\r
2385 // allow the user to select particular channels of a device, we'll
\r
2386 // just open the first "nChannels" ports with offset.
\r
2387 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2389 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2390 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2393 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2400 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2402 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2403 if ( ports == NULL) {
\r
2404 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2408 // Now make the port connections. See note above.
\r
2409 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2411 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2412 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2415 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2422 handle->drainCounter = 0;
\r
2423 handle->internalDrain = false;
\r
2424 stream_.state = STREAM_RUNNING;
\r
2427 if ( result == 0 ) return;
\r
2428 error( RtAudioError::SYSTEM_ERROR );
\r
2431 void RtApiJack :: stopStream( void )
\r
2434 if ( stream_.state == STREAM_STOPPED ) {
\r
2435 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2436 error( RtAudioError::WARNING );
\r
2440 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2441 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2443 if ( handle->drainCounter == 0 ) {
\r
2444 handle->drainCounter = 2;
\r
2445 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2449 jack_deactivate( handle->client );
\r
2450 stream_.state = STREAM_STOPPED;
\r
2453 void RtApiJack :: abortStream( void )
\r
2456 if ( stream_.state == STREAM_STOPPED ) {
\r
2457 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2458 error( RtAudioError::WARNING );
\r
2462 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2463 handle->drainCounter = 2;
\r
2468 // This function will be called by a spawned thread when the user
\r
2469 // callback function signals that the stream should be stopped or
\r
2470 // aborted. It is necessary to handle it this way because the
\r
2471 // callbackEvent() function must return before the jack_deactivate()
\r
2472 // function will return.
\r
2473 static void *jackStopStream( void *ptr )
\r
2475 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2476 RtApiJack *object = (RtApiJack *) info->object;
\r
2478 object->stopStream();
\r
2479 pthread_exit( NULL );
\r
2482 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2484 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2485 if ( stream_.state == STREAM_CLOSED ) {
\r
2486 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2487 error( RtAudioError::WARNING );
\r
2490 if ( stream_.bufferSize != nframes ) {
\r
2491 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2492 error( RtAudioError::WARNING );
\r
2496 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2497 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2499 // Check if we were draining the stream and signal is finished.
\r
2500 if ( handle->drainCounter > 3 ) {
\r
2501 ThreadHandle threadId;
\r
2503 stream_.state = STREAM_STOPPING;
\r
2504 if ( handle->internalDrain == true )
\r
2505 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2507 pthread_cond_signal( &handle->condition );
\r
2511 // Invoke user callback first, to get fresh output data.
\r
2512 if ( handle->drainCounter == 0 ) {
\r
2513 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2514 double streamTime = getStreamTime();
\r
2515 RtAudioStreamStatus status = 0;
\r
2516 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2517 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2518 handle->xrun[0] = false;
\r
2520 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2521 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2522 handle->xrun[1] = false;
\r
2524 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2525 stream_.bufferSize, streamTime, status, info->userData );
\r
2526 if ( cbReturnValue == 2 ) {
\r
2527 stream_.state = STREAM_STOPPING;
\r
2528 handle->drainCounter = 2;
\r
2530 pthread_create( &id, NULL, jackStopStream, info );
\r
2533 else if ( cbReturnValue == 1 ) {
\r
2534 handle->drainCounter = 1;
\r
2535 handle->internalDrain = true;
\r
2539 jack_default_audio_sample_t *jackbuffer;
\r
2540 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2541 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2543 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2545 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2546 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2547 memset( jackbuffer, 0, bufferBytes );
\r
2551 else if ( stream_.doConvertBuffer[0] ) {
\r
2553 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2555 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2556 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2557 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2560 else { // no buffer conversion
\r
2561 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2562 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2563 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2567 if ( handle->drainCounter ) {
\r
2568 handle->drainCounter++;
\r
2573 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2575 if ( stream_.doConvertBuffer[1] ) {
\r
2576 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2577 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2578 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2580 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2582 else { // no buffer conversion
\r
2583 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2584 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2585 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2591 RtApi::tickStreamTime();
\r
2594 //******************** End of __UNIX_JACK__ *********************//
\r
2597 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2599 // The ASIO API is designed around a callback scheme, so this
\r
2600 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2601 // Jack. The primary constraint with ASIO is that it only allows
\r
2602 // access to a single driver at a time. Thus, it is not possible to
\r
2603 // have more than one simultaneous RtAudio stream.
\r
2605 // This implementation also requires a number of external ASIO files
\r
2606 // and a few global variables. The ASIO callback scheme does not
\r
2607 // allow for the passing of user data, so we must create a global
\r
2608 // pointer to our callbackInfo structure.
\r
2610 // On unix systems, we make use of a pthread condition variable.
\r
2611 // Since there is no equivalent in Windows, I hacked something based
\r
2612 // on information found in
\r
2613 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2615 #include "asiosys.h"
\r
2617 #include "iasiothiscallresolver.h"
\r
2618 #include "asiodrivers.h"
\r
2621 static AsioDrivers drivers;
\r
2622 static ASIOCallbacks asioCallbacks;
\r
2623 static ASIODriverInfo driverInfo;
\r
2624 static CallbackInfo *asioCallbackInfo;
\r
2625 static bool asioXRun;
\r
2627 struct AsioHandle {
\r
2628 int drainCounter; // Tracks callback counts when draining
\r
2629 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2630 ASIOBufferInfo *bufferInfos;
\r
2634 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2637 // Function declarations (definitions at end of section)
\r
2638 static const char* getAsioErrorString( ASIOError result );
\r
2639 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2640 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2642 RtApiAsio :: RtApiAsio()
\r
2644 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2645 // CoInitialize beforehand, but it must be for appartment threading
\r
2646 // (in which case, CoInitilialize will return S_FALSE here).
\r
2647 coInitialized_ = false;
\r
2648 HRESULT hr = CoInitialize( NULL );
\r
2649 if ( FAILED(hr) ) {
\r
2650 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2651 error( RtAudioError::WARNING );
\r
2653 coInitialized_ = true;
\r
2655 drivers.removeCurrentDriver();
\r
2656 driverInfo.asioVersion = 2;
\r
2658 // See note in DirectSound implementation about GetDesktopWindow().
\r
2659 driverInfo.sysRef = GetForegroundWindow();
\r
2662 RtApiAsio :: ~RtApiAsio()
\r
2664 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2665 if ( coInitialized_ ) CoUninitialize();
\r
2668 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2670 return (unsigned int) drivers.asioGetNumDev();
\r
2673 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2675 RtAudio::DeviceInfo info;
\r
2676 info.probed = false;
\r
2679 unsigned int nDevices = getDeviceCount();
\r
2680 if ( nDevices == 0 ) {
\r
2681 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2682 error( RtAudioError::INVALID_USE );
\r
2686 if ( device >= nDevices ) {
\r
2687 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2688 error( RtAudioError::INVALID_USE );
\r
2692 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2693 if ( stream_.state != STREAM_CLOSED ) {
\r
2694 if ( device >= devices_.size() ) {
\r
2695 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2696 error( RtAudioError::WARNING );
\r
2699 return devices_[ device ];
\r
2702 char driverName[32];
\r
2703 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2704 if ( result != ASE_OK ) {
\r
2705 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2706 errorText_ = errorStream_.str();
\r
2707 error( RtAudioError::WARNING );
\r
2711 info.name = driverName;
\r
2713 if ( !drivers.loadDriver( driverName ) ) {
\r
2714 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2715 errorText_ = errorStream_.str();
\r
2716 error( RtAudioError::WARNING );
\r
2720 result = ASIOInit( &driverInfo );
\r
2721 if ( result != ASE_OK ) {
\r
2722 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2723 errorText_ = errorStream_.str();
\r
2724 error( RtAudioError::WARNING );
\r
2728 // Determine the device channel information.
\r
2729 long inputChannels, outputChannels;
\r
2730 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2731 if ( result != ASE_OK ) {
\r
2732 drivers.removeCurrentDriver();
\r
2733 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2734 errorText_ = errorStream_.str();
\r
2735 error( RtAudioError::WARNING );
\r
2739 info.outputChannels = outputChannels;
\r
2740 info.inputChannels = inputChannels;
\r
2741 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2742 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2744 // Determine the supported sample rates.
\r
2745 info.sampleRates.clear();
\r
2746 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2747 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2748 if ( result == ASE_OK )
\r
2749 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2752 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2753 ASIOChannelInfo channelInfo;
\r
2754 channelInfo.channel = 0;
\r
2755 channelInfo.isInput = true;
\r
2756 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2757 result = ASIOGetChannelInfo( &channelInfo );
\r
2758 if ( result != ASE_OK ) {
\r
2759 drivers.removeCurrentDriver();
\r
2760 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2761 errorText_ = errorStream_.str();
\r
2762 error( RtAudioError::WARNING );
\r
2766 info.nativeFormats = 0;
\r
2767 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2768 info.nativeFormats |= RTAUDIO_SINT16;
\r
2769 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2770 info.nativeFormats |= RTAUDIO_SINT32;
\r
2771 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2772 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2773 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2774 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2775 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2776 info.nativeFormats |= RTAUDIO_SINT24;
\r
2778 if ( info.outputChannels > 0 )
\r
2779 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2780 if ( info.inputChannels > 0 )
\r
2781 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2783 info.probed = true;
\r
2784 drivers.removeCurrentDriver();
\r
2788 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2790 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2791 object->callbackEvent( index );
\r
2794 void RtApiAsio :: saveDeviceInfo( void )
\r
2798 unsigned int nDevices = getDeviceCount();
\r
2799 devices_.resize( nDevices );
\r
2800 for ( unsigned int i=0; i<nDevices; i++ )
\r
2801 devices_[i] = getDeviceInfo( i );
\r
2804 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2805 unsigned int firstChannel, unsigned int sampleRate,
\r
2806 RtAudioFormat format, unsigned int *bufferSize,
\r
2807 RtAudio::StreamOptions *options )
\r
2809 // For ASIO, a duplex stream MUST use the same driver.
\r
2810 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2811 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2815 char driverName[32];
\r
2816 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2817 if ( result != ASE_OK ) {
\r
2818 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2819 errorText_ = errorStream_.str();
\r
2823 // Only load the driver once for duplex stream.
\r
2824 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2825 // The getDeviceInfo() function will not work when a stream is open
\r
2826 // because ASIO does not allow multiple devices to run at the same
\r
2827 // time. Thus, we'll probe the system before opening a stream and
\r
2828 // save the results for use by getDeviceInfo().
\r
2829 this->saveDeviceInfo();
\r
2831 if ( !drivers.loadDriver( driverName ) ) {
\r
2832 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2833 errorText_ = errorStream_.str();
\r
2837 result = ASIOInit( &driverInfo );
\r
2838 if ( result != ASE_OK ) {
\r
2839 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2840 errorText_ = errorStream_.str();
\r
2845 // Check the device channel count.
\r
2846 long inputChannels, outputChannels;
\r
2847 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2848 if ( result != ASE_OK ) {
\r
2849 drivers.removeCurrentDriver();
\r
2850 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2851 errorText_ = errorStream_.str();
\r
2855 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2856 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2857 drivers.removeCurrentDriver();
\r
2858 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2859 errorText_ = errorStream_.str();
\r
2862 stream_.nDeviceChannels[mode] = channels;
\r
2863 stream_.nUserChannels[mode] = channels;
\r
2864 stream_.channelOffset[mode] = firstChannel;
\r
2866 // Verify the sample rate is supported.
\r
2867 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2868 if ( result != ASE_OK ) {
\r
2869 drivers.removeCurrentDriver();
\r
2870 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2871 errorText_ = errorStream_.str();
\r
2875 // Get the current sample rate
\r
2876 ASIOSampleRate currentRate;
\r
2877 result = ASIOGetSampleRate( ¤tRate );
\r
2878 if ( result != ASE_OK ) {
\r
2879 drivers.removeCurrentDriver();
\r
2880 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2881 errorText_ = errorStream_.str();
\r
2885 // Set the sample rate only if necessary
\r
2886 if ( currentRate != sampleRate ) {
\r
2887 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2888 if ( result != ASE_OK ) {
\r
2889 drivers.removeCurrentDriver();
\r
2890 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2891 errorText_ = errorStream_.str();
\r
2896 // Determine the driver data type.
\r
2897 ASIOChannelInfo channelInfo;
\r
2898 channelInfo.channel = 0;
\r
2899 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2900 else channelInfo.isInput = true;
\r
2901 result = ASIOGetChannelInfo( &channelInfo );
\r
2902 if ( result != ASE_OK ) {
\r
2903 drivers.removeCurrentDriver();
\r
2904 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2905 errorText_ = errorStream_.str();
\r
2909 // Assuming WINDOWS host is always little-endian.
\r
2910 stream_.doByteSwap[mode] = false;
\r
2911 stream_.userFormat = format;
\r
2912 stream_.deviceFormat[mode] = 0;
\r
2913 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2914 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2915 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2917 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2918 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2919 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2921 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2922 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2923 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2925 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2926 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2927 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2929 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2930 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2931 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2934 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2935 drivers.removeCurrentDriver();
\r
2936 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2937 errorText_ = errorStream_.str();
\r
2941 // Set the buffer size. For a duplex stream, this will end up
\r
2942 // setting the buffer size based on the input constraints, which
\r
2944 long minSize, maxSize, preferSize, granularity;
\r
2945 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2946 if ( result != ASE_OK ) {
\r
2947 drivers.removeCurrentDriver();
\r
2948 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2949 errorText_ = errorStream_.str();
\r
2953 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2954 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2955 else if ( granularity == -1 ) {
\r
2956 // Make sure bufferSize is a power of two.
\r
2957 int log2_of_min_size = 0;
\r
2958 int log2_of_max_size = 0;
\r
2960 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2961 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2962 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2965 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2966 int min_delta_num = log2_of_min_size;
\r
2968 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2969 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2970 if (current_delta < min_delta) {
\r
2971 min_delta = current_delta;
\r
2972 min_delta_num = i;
\r
2976 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2977 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2978 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2980 else if ( granularity != 0 ) {
\r
2981 // Set to an even multiple of granularity, rounding up.
\r
2982 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2985 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2986 drivers.removeCurrentDriver();
\r
2987 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2991 stream_.bufferSize = *bufferSize;
\r
2992 stream_.nBuffers = 2;
\r
2994 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2995 else stream_.userInterleaved = true;
\r
2997 // ASIO always uses non-interleaved buffers.
\r
2998 stream_.deviceInterleaved[mode] = false;
\r
3000 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3001 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3002 if ( handle == 0 ) {
\r
3004 handle = new AsioHandle;
\r
3006 catch ( std::bad_alloc& ) {
\r
3007 //if ( handle == NULL ) {
\r
3008 drivers.removeCurrentDriver();
\r
3009 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3012 handle->bufferInfos = 0;
\r
3014 // Create a manual-reset event.
\r
3015 handle->condition = CreateEvent( NULL, // no security
\r
3016 TRUE, // manual-reset
\r
3017 FALSE, // non-signaled initially
\r
3018 NULL ); // unnamed
\r
3019 stream_.apiHandle = (void *) handle;
\r
3022 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3023 // and output separately, we'll have to dispose of previously
\r
3024 // created output buffers for a duplex stream.
\r
3025 long inputLatency, outputLatency;
\r
3026 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3027 ASIODisposeBuffers();
\r
3028 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3031 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3032 bool buffersAllocated = false;
\r
3033 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3034 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3035 if ( handle->bufferInfos == NULL ) {
\r
3036 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3037 errorText_ = errorStream_.str();
\r
3041 ASIOBufferInfo *infos;
\r
3042 infos = handle->bufferInfos;
\r
3043 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3044 infos->isInput = ASIOFalse;
\r
3045 infos->channelNum = i + stream_.channelOffset[0];
\r
3046 infos->buffers[0] = infos->buffers[1] = 0;
\r
3048 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3049 infos->isInput = ASIOTrue;
\r
3050 infos->channelNum = i + stream_.channelOffset[1];
\r
3051 infos->buffers[0] = infos->buffers[1] = 0;
\r
3054 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3055 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3056 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3057 asioCallbacks.asioMessage = &asioMessages;
\r
3058 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3059 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3060 if ( result != ASE_OK ) {
\r
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3062 errorText_ = errorStream_.str();
\r
3065 buffersAllocated = true;
\r
3067 // Set flags for buffer conversion.
\r
3068 stream_.doConvertBuffer[mode] = false;
\r
3069 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3070 stream_.doConvertBuffer[mode] = true;
\r
3071 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3072 stream_.nUserChannels[mode] > 1 )
\r
3073 stream_.doConvertBuffer[mode] = true;
\r
3075 // Allocate necessary internal buffers
\r
3076 unsigned long bufferBytes;
\r
3077 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3078 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3079 if ( stream_.userBuffer[mode] == NULL ) {
\r
3080 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3084 if ( stream_.doConvertBuffer[mode] ) {
\r
3086 bool makeBuffer = true;
\r
3087 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3088 if ( mode == INPUT ) {
\r
3089 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3090 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3091 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3095 if ( makeBuffer ) {
\r
3096 bufferBytes *= *bufferSize;
\r
3097 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3098 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3099 if ( stream_.deviceBuffer == NULL ) {
\r
3100 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3106 stream_.sampleRate = sampleRate;
\r
3107 stream_.device[mode] = device;
\r
3108 stream_.state = STREAM_STOPPED;
\r
3109 asioCallbackInfo = &stream_.callbackInfo;
\r
3110 stream_.callbackInfo.object = (void *) this;
\r
3111 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3112 // We had already set up an output stream.
\r
3113 stream_.mode = DUPLEX;
\r
3115 stream_.mode = mode;
\r
3117 // Determine device latencies
\r
3118 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3119 if ( result != ASE_OK ) {
\r
3120 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3121 errorText_ = errorStream_.str();
\r
3122 error( RtAudioError::WARNING); // warn but don't fail
\r
3125 stream_.latency[0] = outputLatency;
\r
3126 stream_.latency[1] = inputLatency;
\r
3129 // Setup the buffer conversion information structure. We don't use
\r
3130 // buffers to do channel offsets, so we override that parameter
\r
3132 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3137 if ( buffersAllocated )
\r
3138 ASIODisposeBuffers();
\r
3139 drivers.removeCurrentDriver();
\r
3142 CloseHandle( handle->condition );
\r
3143 if ( handle->bufferInfos )
\r
3144 free( handle->bufferInfos );
\r
3146 stream_.apiHandle = 0;
\r
3149 for ( int i=0; i<2; i++ ) {
\r
3150 if ( stream_.userBuffer[i] ) {
\r
3151 free( stream_.userBuffer[i] );
\r
3152 stream_.userBuffer[i] = 0;
\r
3156 if ( stream_.deviceBuffer ) {
\r
3157 free( stream_.deviceBuffer );
\r
3158 stream_.deviceBuffer = 0;
\r
3164 void RtApiAsio :: closeStream()
\r
3166 if ( stream_.state == STREAM_CLOSED ) {
\r
3167 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3168 error( RtAudioError::WARNING );
\r
3172 if ( stream_.state == STREAM_RUNNING ) {
\r
3173 stream_.state = STREAM_STOPPED;
\r
3176 ASIODisposeBuffers();
\r
3177 drivers.removeCurrentDriver();
\r
3179 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3181 CloseHandle( handle->condition );
\r
3182 if ( handle->bufferInfos )
\r
3183 free( handle->bufferInfos );
\r
3185 stream_.apiHandle = 0;
\r
3188 for ( int i=0; i<2; i++ ) {
\r
3189 if ( stream_.userBuffer[i] ) {
\r
3190 free( stream_.userBuffer[i] );
\r
3191 stream_.userBuffer[i] = 0;
\r
3195 if ( stream_.deviceBuffer ) {
\r
3196 free( stream_.deviceBuffer );
\r
3197 stream_.deviceBuffer = 0;
\r
3200 stream_.mode = UNINITIALIZED;
\r
3201 stream_.state = STREAM_CLOSED;
\r
3204 bool stopThreadCalled = false;
\r
3206 void RtApiAsio :: startStream()
\r
3209 if ( stream_.state == STREAM_RUNNING ) {
\r
3210 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3211 error( RtAudioError::WARNING );
\r
3215 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3216 ASIOError result = ASIOStart();
\r
3217 if ( result != ASE_OK ) {
\r
3218 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3219 errorText_ = errorStream_.str();
\r
3223 handle->drainCounter = 0;
\r
3224 handle->internalDrain = false;
\r
3225 ResetEvent( handle->condition );
\r
3226 stream_.state = STREAM_RUNNING;
\r
3230 stopThreadCalled = false;
\r
3232 if ( result == ASE_OK ) return;
\r
3233 error( RtAudioError::SYSTEM_ERROR );
\r
3236 void RtApiAsio :: stopStream()
\r
3239 if ( stream_.state == STREAM_STOPPED ) {
\r
3240 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3241 error( RtAudioError::WARNING );
\r
3245 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3246 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3247 if ( handle->drainCounter == 0 ) {
\r
3248 handle->drainCounter = 2;
\r
3249 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3253 stream_.state = STREAM_STOPPED;
\r
3255 ASIOError result = ASIOStop();
\r
3256 if ( result != ASE_OK ) {
\r
3257 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3258 errorText_ = errorStream_.str();
\r
3261 if ( result == ASE_OK ) return;
\r
3262 error( RtAudioError::SYSTEM_ERROR );
\r
3265 void RtApiAsio :: abortStream()
\r
3268 if ( stream_.state == STREAM_STOPPED ) {
\r
3269 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3270 error( RtAudioError::WARNING );
\r
3274 // The following lines were commented-out because some behavior was
\r
3275 // noted where the device buffers need to be zeroed to avoid
\r
3276 // continuing sound, even when the device buffers are completely
\r
3277 // disposed. So now, calling abort is the same as calling stop.
\r
3278 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3279 // handle->drainCounter = 2;
\r
3283 // This function will be called by a spawned thread when the user
\r
3284 // callback function signals that the stream should be stopped or
\r
3285 // aborted. It is necessary to handle it this way because the
\r
3286 // callbackEvent() function must return before the ASIOStop()
\r
3287 // function will return.
\r
3288 static unsigned __stdcall asioStopStream( void *ptr )
\r
3290 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3291 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3293 object->stopStream();
\r
3294 _endthreadex( 0 );
\r
3298 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3300 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3301 if ( stream_.state == STREAM_CLOSED ) {
\r
3302 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3303 error( RtAudioError::WARNING );
\r
3307 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3308 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3310 // Check if we were draining the stream and signal if finished.
\r
3311 if ( handle->drainCounter > 3 ) {
\r
3313 stream_.state = STREAM_STOPPING;
\r
3314 if ( handle->internalDrain == false )
\r
3315 SetEvent( handle->condition );
\r
3316 else { // spawn a thread to stop the stream
\r
3317 unsigned threadId;
\r
3318 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3319 &stream_.callbackInfo, 0, &threadId );
\r
3324 // Invoke user callback to get fresh output data UNLESS we are
\r
3325 // draining stream.
\r
3326 if ( handle->drainCounter == 0 ) {
\r
3327 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3328 double streamTime = getStreamTime();
\r
3329 RtAudioStreamStatus status = 0;
\r
3330 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3331 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3334 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3335 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3338 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3339 stream_.bufferSize, streamTime, status, info->userData );
\r
3340 if ( cbReturnValue == 2 ) {
\r
3341 stream_.state = STREAM_STOPPING;
\r
3342 handle->drainCounter = 2;
\r
3343 unsigned threadId;
\r
3344 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3345 &stream_.callbackInfo, 0, &threadId );
\r
3348 else if ( cbReturnValue == 1 ) {
\r
3349 handle->drainCounter = 1;
\r
3350 handle->internalDrain = true;
\r
3354 unsigned int nChannels, bufferBytes, i, j;
\r
3355 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3358 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3360 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3362 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3363 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3364 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3368 else if ( stream_.doConvertBuffer[0] ) {
\r
3370 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3371 if ( stream_.doByteSwap[0] )
\r
3372 byteSwapBuffer( stream_.deviceBuffer,
\r
3373 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3374 stream_.deviceFormat[0] );
\r
3376 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3377 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3378 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3379 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3385 if ( stream_.doByteSwap[0] )
\r
3386 byteSwapBuffer( stream_.userBuffer[0],
\r
3387 stream_.bufferSize * stream_.nUserChannels[0],
\r
3388 stream_.userFormat );
\r
3390 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3391 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3392 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3393 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3398 if ( handle->drainCounter ) {
\r
3399 handle->drainCounter++;
\r
3404 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3406 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3408 if (stream_.doConvertBuffer[1]) {
\r
3410 // Always interleave ASIO input data.
\r
3411 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3412 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3413 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3414 handle->bufferInfos[i].buffers[bufferIndex],
\r
3418 if ( stream_.doByteSwap[1] )
\r
3419 byteSwapBuffer( stream_.deviceBuffer,
\r
3420 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3421 stream_.deviceFormat[1] );
\r
3422 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3426 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3427 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3428 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3429 handle->bufferInfos[i].buffers[bufferIndex],
\r
3434 if ( stream_.doByteSwap[1] )
\r
3435 byteSwapBuffer( stream_.userBuffer[1],
\r
3436 stream_.bufferSize * stream_.nUserChannels[1],
\r
3437 stream_.userFormat );
\r
3442 // The following call was suggested by Malte Clasen. While the API
\r
3443 // documentation indicates it should not be required, some device
\r
3444 // drivers apparently do not function correctly without it.
\r
3445 ASIOOutputReady();
\r
3447 RtApi::tickStreamTime();
\r
3451 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3453 // The ASIO documentation says that this usually only happens during
\r
3454 // external sync. Audio processing is not stopped by the driver,
\r
3455 // actual sample rate might not have even changed, maybe only the
\r
3456 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3459 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3461 object->stopStream();
\r
3463 catch ( RtAudioError &exception ) {
\r
3464 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3468 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3471 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3475 switch( selector ) {
\r
3476 case kAsioSelectorSupported:
\r
3477 if ( value == kAsioResetRequest
\r
3478 || value == kAsioEngineVersion
\r
3479 || value == kAsioResyncRequest
\r
3480 || value == kAsioLatenciesChanged
\r
3481 // The following three were added for ASIO 2.0, you don't
\r
3482 // necessarily have to support them.
\r
3483 || value == kAsioSupportsTimeInfo
\r
3484 || value == kAsioSupportsTimeCode
\r
3485 || value == kAsioSupportsInputMonitor)
\r
3488 case kAsioResetRequest:
\r
3489 // Defer the task and perform the reset of the driver during the
\r
3490 // next "safe" situation. You cannot reset the driver right now,
\r
3491 // as this code is called from the driver. Reset the driver is
\r
3492 // done by completely destruct is. I.e. ASIOStop(),
\r
3493 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3495 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3498 case kAsioResyncRequest:
\r
3499 // This informs the application that the driver encountered some
\r
3500 // non-fatal data loss. It is used for synchronization purposes
\r
3501 // of different media. Added mainly to work around the Win16Mutex
\r
3502 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3503 // which could lose data because the Mutex was held too long by
\r
3504 // another thread. However a driver can issue it in other
\r
3505 // situations, too.
\r
3506 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3510 case kAsioLatenciesChanged:
\r
3511 // This will inform the host application that the drivers were
\r
3512 // latencies changed. Beware, it this does not mean that the
\r
3513 // buffer sizes have changed! You might need to update internal
\r
3515 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3518 case kAsioEngineVersion:
\r
3519 // Return the supported ASIO version of the host application. If
\r
3520 // a host application does not implement this selector, ASIO 1.0
\r
3521 // is assumed by the driver.
\r
3524 case kAsioSupportsTimeInfo:
\r
3525 // Informs the driver whether the
\r
3526 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3527 // For compatibility with ASIO 1.0 drivers the host application
\r
3528 // should always support the "old" bufferSwitch method, too.
\r
3531 case kAsioSupportsTimeCode:
\r
3532 // Informs the driver whether application is interested in time
\r
3533 // code info. If an application does not need to know about time
\r
3534 // code, the driver has less work to do.
\r
3541 static const char* getAsioErrorString( ASIOError result )
\r
3546 const char*message;
\r
3549 static const Messages m[] =
\r
3551 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3552 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3553 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3554 { ASE_InvalidMode, "Invalid mode." },
\r
3555 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3556 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3557 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3560 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3561 if ( m[i].value == result ) return m[i].message;
\r
3563 return "Unknown error.";
\r
3565 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3569 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3571 // Modified by Robin Davies, October 2005
\r
3572 // - Improvements to DirectX pointer chasing.
\r
3573 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3574 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3575 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3576 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3578 #include <dsound.h>
\r
3579 #include <assert.h>
\r
3580 #include <algorithm>
\r
3582 #if defined(__MINGW32__)
\r
3583 // missing from latest mingw winapi
\r
3584 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3585 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3586 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3587 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3590 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3592 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3593 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3596 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3598 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3599 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3600 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3601 return pointer >= earlierPointer && pointer < laterPointer;
\r
3604 // A structure to hold various information related to the DirectSound
\r
3605 // API implementation.
\r
3607 unsigned int drainCounter; // Tracks callback counts when draining
\r
3608 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3612 UINT bufferPointer[2];
\r
3613 DWORD dsBufferSize[2];
\r
3614 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3618 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3621 // Declarations for utility functions, callbacks, and structures
\r
3622 // specific to the DirectSound implementation.
\r
3623 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3624 LPCTSTR description,
\r
3626 LPVOID lpContext );
\r
3628 static const char* getErrorString( int code );
\r
3630 static unsigned __stdcall callbackHandler( void *ptr );
\r
3639 : found(false) { validId[0] = false; validId[1] = false; }
\r
3642 struct DsProbeData {
\r
3644 std::vector<struct DsDevice>* dsDevices;
\r
3647 RtApiDs :: RtApiDs()
\r
3649 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3650 // accept whatever the mainline chose for a threading model.
\r
3651 coInitialized_ = false;
\r
3652 HRESULT hr = CoInitialize( NULL );
\r
3653 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3656 RtApiDs :: ~RtApiDs()
\r
3658 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3659 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3662 // The DirectSound default output is always the first device.
\r
3663 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3668 // The DirectSound default input is always the first input device,
\r
3669 // which is the first capture device enumerated.
\r
3670 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3675 unsigned int RtApiDs :: getDeviceCount( void )
\r
3677 // Set query flag for previously found devices to false, so that we
\r
3678 // can check for any devices that have disappeared.
\r
3679 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3680 dsDevices[i].found = false;
\r
3682 // Query DirectSound devices.
\r
3683 struct DsProbeData probeInfo;
\r
3684 probeInfo.isInput = false;
\r
3685 probeInfo.dsDevices = &dsDevices;
\r
3686 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3687 if ( FAILED( result ) ) {
\r
3688 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3689 errorText_ = errorStream_.str();
\r
3690 error( RtAudioError::WARNING );
\r
3693 // Query DirectSoundCapture devices.
\r
3694 probeInfo.isInput = true;
\r
3695 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3696 if ( FAILED( result ) ) {
\r
3697 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3698 errorText_ = errorStream_.str();
\r
3699 error( RtAudioError::WARNING );
\r
3702 // Clean out any devices that may have disappeared.
\r
3703 std::vector< int > indices;
\r
3704 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3705 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3706 unsigned int nErased = 0;
\r
3707 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3708 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3710 return static_cast<unsigned int>(dsDevices.size());
\r
3713 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3715 RtAudio::DeviceInfo info;
\r
3716 info.probed = false;
\r
3718 if ( dsDevices.size() == 0 ) {
\r
3719 // Force a query of all devices
\r
3721 if ( dsDevices.size() == 0 ) {
\r
3722 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3723 error( RtAudioError::INVALID_USE );
\r
3728 if ( device >= dsDevices.size() ) {
\r
3729 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3730 error( RtAudioError::INVALID_USE );
\r
3735 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3737 LPDIRECTSOUND output;
\r
3739 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3740 if ( FAILED( result ) ) {
\r
3741 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3742 errorText_ = errorStream_.str();
\r
3743 error( RtAudioError::WARNING );
\r
3747 outCaps.dwSize = sizeof( outCaps );
\r
3748 result = output->GetCaps( &outCaps );
\r
3749 if ( FAILED( result ) ) {
\r
3750 output->Release();
\r
3751 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3752 errorText_ = errorStream_.str();
\r
3753 error( RtAudioError::WARNING );
\r
3757 // Get output channel information.
\r
3758 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3760 // Get sample rate information.
\r
3761 info.sampleRates.clear();
\r
3762 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3763 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3764 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3765 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3768 // Get format information.
\r
3769 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3770 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3772 output->Release();
\r
3774 if ( getDefaultOutputDevice() == device )
\r
3775 info.isDefaultOutput = true;
\r
3777 if ( dsDevices[ device ].validId[1] == false ) {
\r
3778 info.name = dsDevices[ device ].name;
\r
3779 info.probed = true;
\r
3785 LPDIRECTSOUNDCAPTURE input;
\r
3786 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3787 if ( FAILED( result ) ) {
\r
3788 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3789 errorText_ = errorStream_.str();
\r
3790 error( RtAudioError::WARNING );
\r
3795 inCaps.dwSize = sizeof( inCaps );
\r
3796 result = input->GetCaps( &inCaps );
\r
3797 if ( FAILED( result ) ) {
\r
3799 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3800 errorText_ = errorStream_.str();
\r
3801 error( RtAudioError::WARNING );
\r
3805 // Get input channel information.
\r
3806 info.inputChannels = inCaps.dwChannels;
\r
3808 // Get sample rate and format information.
\r
3809 std::vector<unsigned int> rates;
\r
3810 if ( inCaps.dwChannels >= 2 ) {
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3813 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3814 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3815 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3817 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3818 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3820 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3821 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3822 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3823 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3824 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3826 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3827 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3828 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3829 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3830 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3833 else if ( inCaps.dwChannels == 1 ) {
\r
3834 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3835 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3836 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3837 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3838 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3839 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3840 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3841 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3843 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3844 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3845 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3846 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3847 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3849 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3850 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3851 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3852 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3853 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3856 else info.inputChannels = 0; // technically, this would be an error
\r
3860 if ( info.inputChannels == 0 ) return info;
\r
3862 // Copy the supported rates to the info structure but avoid duplication.
\r
3864 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3866 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3867 if ( rates[i] == info.sampleRates[j] ) {
\r
3872 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3874 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3876 // If device opens for both playback and capture, we determine the channels.
\r
3877 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3878 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3880 if ( device == 0 ) info.isDefaultInput = true;
\r
3882 // Copy name and return.
\r
3883 info.name = dsDevices[ device ].name;
\r
3884 info.probed = true;
\r
3888 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3889 unsigned int firstChannel, unsigned int sampleRate,
\r
3890 RtAudioFormat format, unsigned int *bufferSize,
\r
3891 RtAudio::StreamOptions *options )
\r
3893 if ( channels + firstChannel > 2 ) {
\r
3894 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3898 size_t nDevices = dsDevices.size();
\r
3899 if ( nDevices == 0 ) {
\r
3900 // This should not happen because a check is made before this function is called.
\r
3901 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3905 if ( device >= nDevices ) {
\r
3906 // This should not happen because a check is made before this function is called.
\r
3907 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3911 if ( mode == OUTPUT ) {
\r
3912 if ( dsDevices[ device ].validId[0] == false ) {
\r
3913 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3914 errorText_ = errorStream_.str();
\r
3918 else { // mode == INPUT
\r
3919 if ( dsDevices[ device ].validId[1] == false ) {
\r
3920 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3921 errorText_ = errorStream_.str();
\r
3926 // According to a note in PortAudio, using GetDesktopWindow()
\r
3927 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3928 // that occur when the application's window is not the foreground
\r
3929 // window. Also, if the application window closes before the
\r
3930 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3931 // problems when using GetDesktopWindow() but it seems fine now
\r
3932 // (January 2010). I'll leave it commented here.
\r
3933 // HWND hWnd = GetForegroundWindow();
\r
3934 HWND hWnd = GetDesktopWindow();
\r
3936 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3937 // two. This is a judgement call and a value of two is probably too
\r
3938 // low for capture, but it should work for playback.
\r
3940 if ( options ) nBuffers = options->numberOfBuffers;
\r
3941 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3942 if ( nBuffers < 2 ) nBuffers = 3;
\r
3944 // Check the lower range of the user-specified buffer size and set
\r
3945 // (arbitrarily) to a lower bound of 32.
\r
3946 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3948 // Create the wave format structure. The data format setting will
\r
3949 // be determined later.
\r
3950 WAVEFORMATEX waveFormat;
\r
3951 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3952 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3953 waveFormat.nChannels = channels + firstChannel;
\r
3954 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3956 // Determine the device buffer size. By default, we'll use the value
\r
3957 // defined above (32K), but we will grow it to make allowances for
\r
3958 // very large software buffer sizes.
\r
3959 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3960 DWORD dsPointerLeadTime = 0;
\r
3962 void *ohandle = 0, *bhandle = 0;
\r
3964 if ( mode == OUTPUT ) {
\r
3966 LPDIRECTSOUND output;
\r
3967 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3968 if ( FAILED( result ) ) {
\r
3969 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3970 errorText_ = errorStream_.str();
\r
3975 outCaps.dwSize = sizeof( outCaps );
\r
3976 result = output->GetCaps( &outCaps );
\r
3977 if ( FAILED( result ) ) {
\r
3978 output->Release();
\r
3979 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3980 errorText_ = errorStream_.str();
\r
3984 // Check channel information.
\r
3985 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3986 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3987 errorText_ = errorStream_.str();
\r
3991 // Check format information. Use 16-bit format unless not
\r
3992 // supported or user requests 8-bit.
\r
3993 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3994 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3995 waveFormat.wBitsPerSample = 16;
\r
3996 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3999 waveFormat.wBitsPerSample = 8;
\r
4000 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4002 stream_.userFormat = format;
\r
4004 // Update wave format structure and buffer information.
\r
4005 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4006 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4007 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4009 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4010 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4011 dsBufferSize *= 2;
\r
4013 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
4014 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
4015 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
4016 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
4017 if ( FAILED( result ) ) {
\r
4018 output->Release();
\r
4019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
4020 errorText_ = errorStream_.str();
\r
4024 // Even though we will write to the secondary buffer, we need to
\r
4025 // access the primary buffer to set the correct output format
\r
4026 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
4027 // buffer description.
\r
4028 DSBUFFERDESC bufferDescription;
\r
4029 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4030 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4031 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
4033 // Obtain the primary buffer
\r
4034 LPDIRECTSOUNDBUFFER buffer;
\r
4035 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4036 if ( FAILED( result ) ) {
\r
4037 output->Release();
\r
4038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4039 errorText_ = errorStream_.str();
\r
4043 // Set the primary DS buffer sound format.
\r
4044 result = buffer->SetFormat( &waveFormat );
\r
4045 if ( FAILED( result ) ) {
\r
4046 output->Release();
\r
4047 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4048 errorText_ = errorStream_.str();
\r
4052 // Setup the secondary DS buffer description.
\r
4053 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4054 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4055 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4056 DSBCAPS_GLOBALFOCUS |
\r
4057 DSBCAPS_GETCURRENTPOSITION2 |
\r
4058 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4059 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4060 bufferDescription.lpwfxFormat = &waveFormat;
\r
4062 // Try to create the secondary DS buffer. If that doesn't work,
\r
4063 // try to use software mixing. Otherwise, there's a problem.
\r
4064 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4065 if ( FAILED( result ) ) {
\r
4066 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4067 DSBCAPS_GLOBALFOCUS |
\r
4068 DSBCAPS_GETCURRENTPOSITION2 |
\r
4069 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4070 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4071 if ( FAILED( result ) ) {
\r
4072 output->Release();
\r
4073 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4074 errorText_ = errorStream_.str();
\r
4079 // Get the buffer size ... might be different from what we specified.
\r
4081 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4082 result = buffer->GetCaps( &dsbcaps );
\r
4083 if ( FAILED( result ) ) {
\r
4084 output->Release();
\r
4085 buffer->Release();
\r
4086 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4087 errorText_ = errorStream_.str();
\r
4091 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4093 // Lock the DS buffer
\r
4096 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4097 if ( FAILED( result ) ) {
\r
4098 output->Release();
\r
4099 buffer->Release();
\r
4100 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4101 errorText_ = errorStream_.str();
\r
4105 // Zero the DS buffer
\r
4106 ZeroMemory( audioPtr, dataLen );
\r
4108 // Unlock the DS buffer
\r
4109 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4110 if ( FAILED( result ) ) {
\r
4111 output->Release();
\r
4112 buffer->Release();
\r
4113 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4114 errorText_ = errorStream_.str();
\r
4118 ohandle = (void *) output;
\r
4119 bhandle = (void *) buffer;
\r
4122 if ( mode == INPUT ) {
\r
4124 LPDIRECTSOUNDCAPTURE input;
\r
4125 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4126 if ( FAILED( result ) ) {
\r
4127 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4128 errorText_ = errorStream_.str();
\r
4133 inCaps.dwSize = sizeof( inCaps );
\r
4134 result = input->GetCaps( &inCaps );
\r
4135 if ( FAILED( result ) ) {
\r
4137 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4138 errorText_ = errorStream_.str();
\r
4142 // Check channel information.
\r
4143 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4144 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4148 // Check format information. Use 16-bit format unless user
\r
4149 // requests 8-bit.
\r
4150 DWORD deviceFormats;
\r
4151 if ( channels + firstChannel == 2 ) {
\r
4152 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4153 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4154 waveFormat.wBitsPerSample = 8;
\r
4155 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4157 else { // assume 16-bit is supported
\r
4158 waveFormat.wBitsPerSample = 16;
\r
4159 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4162 else { // channel == 1
\r
4163 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4164 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4165 waveFormat.wBitsPerSample = 8;
\r
4166 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4168 else { // assume 16-bit is supported
\r
4169 waveFormat.wBitsPerSample = 16;
\r
4170 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4173 stream_.userFormat = format;
\r
4175 // Update wave format structure and buffer information.
\r
4176 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4177 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4178 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4180 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4181 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4182 dsBufferSize *= 2;
\r
4184 // Setup the secondary DS buffer description.
\r
4185 DSCBUFFERDESC bufferDescription;
\r
4186 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4187 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4188 bufferDescription.dwFlags = 0;
\r
4189 bufferDescription.dwReserved = 0;
\r
4190 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4191 bufferDescription.lpwfxFormat = &waveFormat;
\r
4193 // Create the capture buffer.
\r
4194 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4195 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4196 if ( FAILED( result ) ) {
\r
4198 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4199 errorText_ = errorStream_.str();
\r
4203 // Get the buffer size ... might be different from what we specified.
\r
4204 DSCBCAPS dscbcaps;
\r
4205 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4206 result = buffer->GetCaps( &dscbcaps );
\r
4207 if ( FAILED( result ) ) {
\r
4209 buffer->Release();
\r
4210 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4211 errorText_ = errorStream_.str();
\r
4215 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4217 // NOTE: We could have a problem here if this is a duplex stream
\r
4218 // and the play and capture hardware buffer sizes are different
\r
4219 // (I'm actually not sure if that is a problem or not).
\r
4220 // Currently, we are not verifying that.
\r
4222 // Lock the capture buffer
\r
4225 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4226 if ( FAILED( result ) ) {
\r
4228 buffer->Release();
\r
4229 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4230 errorText_ = errorStream_.str();
\r
4234 // Zero the buffer
\r
4235 ZeroMemory( audioPtr, dataLen );
\r
4237 // Unlock the buffer
\r
4238 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4239 if ( FAILED( result ) ) {
\r
4241 buffer->Release();
\r
4242 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4243 errorText_ = errorStream_.str();
\r
4247 ohandle = (void *) input;
\r
4248 bhandle = (void *) buffer;
\r
4251 // Set various stream parameters
\r
4252 DsHandle *handle = 0;
\r
4253 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4254 stream_.nUserChannels[mode] = channels;
\r
4255 stream_.bufferSize = *bufferSize;
\r
4256 stream_.channelOffset[mode] = firstChannel;
\r
4257 stream_.deviceInterleaved[mode] = true;
\r
4258 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4259 else stream_.userInterleaved = true;
\r
4261 // Set flag for buffer conversion
\r
4262 stream_.doConvertBuffer[mode] = false;
\r
4263 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4264 stream_.doConvertBuffer[mode] = true;
\r
4265 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4266 stream_.doConvertBuffer[mode] = true;
\r
4267 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4268 stream_.nUserChannels[mode] > 1 )
\r
4269 stream_.doConvertBuffer[mode] = true;
\r
4271 // Allocate necessary internal buffers
\r
4272 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4273 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4274 if ( stream_.userBuffer[mode] == NULL ) {
\r
4275 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4279 if ( stream_.doConvertBuffer[mode] ) {
\r
4281 bool makeBuffer = true;
\r
4282 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4283 if ( mode == INPUT ) {
\r
4284 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4285 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4286 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4290 if ( makeBuffer ) {
\r
4291 bufferBytes *= *bufferSize;
\r
4292 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4293 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4294 if ( stream_.deviceBuffer == NULL ) {
\r
4295 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4301 // Allocate our DsHandle structures for the stream.
\r
4302 if ( stream_.apiHandle == 0 ) {
\r
4304 handle = new DsHandle;
\r
4306 catch ( std::bad_alloc& ) {
\r
4307 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4311 // Create a manual-reset event.
\r
4312 handle->condition = CreateEvent( NULL, // no security
\r
4313 TRUE, // manual-reset
\r
4314 FALSE, // non-signaled initially
\r
4315 NULL ); // unnamed
\r
4316 stream_.apiHandle = (void *) handle;
\r
4319 handle = (DsHandle *) stream_.apiHandle;
\r
4320 handle->id[mode] = ohandle;
\r
4321 handle->buffer[mode] = bhandle;
\r
4322 handle->dsBufferSize[mode] = dsBufferSize;
\r
4323 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4325 stream_.device[mode] = device;
\r
4326 stream_.state = STREAM_STOPPED;
\r
4327 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4328 // We had already set up an output stream.
\r
4329 stream_.mode = DUPLEX;
\r
4331 stream_.mode = mode;
\r
4332 stream_.nBuffers = nBuffers;
\r
4333 stream_.sampleRate = sampleRate;
\r
4335 // Setup the buffer conversion information structure.
\r
4336 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4338 // Setup the callback thread.
\r
4339 if ( stream_.callbackInfo.isRunning == false ) {
\r
4340 unsigned threadId;
\r
4341 stream_.callbackInfo.isRunning = true;
\r
4342 stream_.callbackInfo.object = (void *) this;
\r
4343 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4344 &stream_.callbackInfo, 0, &threadId );
\r
4345 if ( stream_.callbackInfo.thread == 0 ) {
\r
4346 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4350 // Boost DS thread priority
\r
4351 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4357 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4358 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4359 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4360 if ( buffer ) buffer->Release();
\r
4361 object->Release();
\r
4363 if ( handle->buffer[1] ) {
\r
4364 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4365 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4366 if ( buffer ) buffer->Release();
\r
4367 object->Release();
\r
4369 CloseHandle( handle->condition );
\r
4371 stream_.apiHandle = 0;
\r
4374 for ( int i=0; i<2; i++ ) {
\r
4375 if ( stream_.userBuffer[i] ) {
\r
4376 free( stream_.userBuffer[i] );
\r
4377 stream_.userBuffer[i] = 0;
\r
4381 if ( stream_.deviceBuffer ) {
\r
4382 free( stream_.deviceBuffer );
\r
4383 stream_.deviceBuffer = 0;
\r
4386 stream_.state = STREAM_CLOSED;
\r
4390 void RtApiDs :: closeStream()
\r
4392 if ( stream_.state == STREAM_CLOSED ) {
\r
4393 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4394 error( RtAudioError::WARNING );
\r
4398 // Stop the callback thread.
\r
4399 stream_.callbackInfo.isRunning = false;
\r
4400 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4401 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4403 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4405 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4406 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4407 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4410 buffer->Release();
\r
4412 object->Release();
\r
4414 if ( handle->buffer[1] ) {
\r
4415 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4416 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4419 buffer->Release();
\r
4421 object->Release();
\r
4423 CloseHandle( handle->condition );
\r
4425 stream_.apiHandle = 0;
\r
4428 for ( int i=0; i<2; i++ ) {
\r
4429 if ( stream_.userBuffer[i] ) {
\r
4430 free( stream_.userBuffer[i] );
\r
4431 stream_.userBuffer[i] = 0;
\r
4435 if ( stream_.deviceBuffer ) {
\r
4436 free( stream_.deviceBuffer );
\r
4437 stream_.deviceBuffer = 0;
\r
4440 stream_.mode = UNINITIALIZED;
\r
4441 stream_.state = STREAM_CLOSED;
\r
4444 void RtApiDs :: startStream()
\r
4447 if ( stream_.state == STREAM_RUNNING ) {
\r
4448 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4449 error( RtAudioError::WARNING );
\r
4453 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4455 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4456 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4457 // this is already in effect.
\r
4458 timeBeginPeriod( 1 );
\r
4460 buffersRolling = false;
\r
4461 duplexPrerollBytes = 0;
\r
4463 if ( stream_.mode == DUPLEX ) {
\r
4464 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4465 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4468 HRESULT result = 0;
\r
4469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4471 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4472 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4473 if ( FAILED( result ) ) {
\r
4474 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4475 errorText_ = errorStream_.str();
\r
4480 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4482 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4483 result = buffer->Start( DSCBSTART_LOOPING );
\r
4484 if ( FAILED( result ) ) {
\r
4485 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4486 errorText_ = errorStream_.str();
\r
4491 handle->drainCounter = 0;
\r
4492 handle->internalDrain = false;
\r
4493 ResetEvent( handle->condition );
\r
4494 stream_.state = STREAM_RUNNING;
\r
4497 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4500 void RtApiDs :: stopStream()
\r
4503 if ( stream_.state == STREAM_STOPPED ) {
\r
4504 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4505 error( RtAudioError::WARNING );
\r
4509 HRESULT result = 0;
\r
4512 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4513 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4514 if ( handle->drainCounter == 0 ) {
\r
4515 handle->drainCounter = 2;
\r
4516 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4519 stream_.state = STREAM_STOPPED;
\r
4521 // Stop the buffer and clear memory
\r
4522 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4523 result = buffer->Stop();
\r
4524 if ( FAILED( result ) ) {
\r
4525 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4526 errorText_ = errorStream_.str();
\r
4530 // Lock the buffer and clear it so that if we start to play again,
\r
4531 // we won't have old data playing.
\r
4532 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4533 if ( FAILED( result ) ) {
\r
4534 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4535 errorText_ = errorStream_.str();
\r
4539 // Zero the DS buffer
\r
4540 ZeroMemory( audioPtr, dataLen );
\r
4542 // Unlock the DS buffer
\r
4543 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4544 if ( FAILED( result ) ) {
\r
4545 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4546 errorText_ = errorStream_.str();
\r
4550 // If we start playing again, we must begin at beginning of buffer.
\r
4551 handle->bufferPointer[0] = 0;
\r
4554 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4555 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4559 stream_.state = STREAM_STOPPED;
\r
4561 result = buffer->Stop();
\r
4562 if ( FAILED( result ) ) {
\r
4563 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4564 errorText_ = errorStream_.str();
\r
4568 // Lock the buffer and clear it so that if we start to play again,
\r
4569 // we won't have old data playing.
\r
4570 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4571 if ( FAILED( result ) ) {
\r
4572 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4573 errorText_ = errorStream_.str();
\r
4577 // Zero the DS buffer
\r
4578 ZeroMemory( audioPtr, dataLen );
\r
4580 // Unlock the DS buffer
\r
4581 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4582 if ( FAILED( result ) ) {
\r
4583 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4584 errorText_ = errorStream_.str();
\r
4588 // If we start recording again, we must begin at beginning of buffer.
\r
4589 handle->bufferPointer[1] = 0;
\r
4593 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4594 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4597 void RtApiDs :: abortStream()
\r
4600 if ( stream_.state == STREAM_STOPPED ) {
\r
4601 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4602 error( RtAudioError::WARNING );
\r
4606 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4607 handle->drainCounter = 2;
\r
4612 void RtApiDs :: callbackEvent()
\r
4614 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4615 Sleep( 50 ); // sleep 50 milliseconds
\r
4619 if ( stream_.state == STREAM_CLOSED ) {
\r
4620 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4621 error( RtAudioError::WARNING );
\r
4625 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4626 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4628 // Check if we were draining the stream and signal is finished.
\r
4629 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4631 stream_.state = STREAM_STOPPING;
\r
4632 if ( handle->internalDrain == false )
\r
4633 SetEvent( handle->condition );
\r
4639 // Invoke user callback to get fresh output data UNLESS we are
\r
4640 // draining stream.
\r
4641 if ( handle->drainCounter == 0 ) {
\r
4642 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4643 double streamTime = getStreamTime();
\r
4644 RtAudioStreamStatus status = 0;
\r
4645 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4646 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4647 handle->xrun[0] = false;
\r
4649 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4650 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4651 handle->xrun[1] = false;
\r
4653 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4654 stream_.bufferSize, streamTime, status, info->userData );
\r
4655 if ( cbReturnValue == 2 ) {
\r
4656 stream_.state = STREAM_STOPPING;
\r
4657 handle->drainCounter = 2;
\r
4661 else if ( cbReturnValue == 1 ) {
\r
4662 handle->drainCounter = 1;
\r
4663 handle->internalDrain = true;
\r
4668 DWORD currentWritePointer, safeWritePointer;
\r
4669 DWORD currentReadPointer, safeReadPointer;
\r
4670 UINT nextWritePointer;
\r
4672 LPVOID buffer1 = NULL;
\r
4673 LPVOID buffer2 = NULL;
\r
4674 DWORD bufferSize1 = 0;
\r
4675 DWORD bufferSize2 = 0;
\r
4680 if ( buffersRolling == false ) {
\r
4681 if ( stream_.mode == DUPLEX ) {
\r
4682 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4684 // It takes a while for the devices to get rolling. As a result,
\r
4685 // there's no guarantee that the capture and write device pointers
\r
4686 // will move in lockstep. Wait here for both devices to start
\r
4687 // rolling, and then set our buffer pointers accordingly.
\r
4688 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4689 // bytes later than the write buffer.
\r
4691 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4692 // take place between the two GetCurrentPosition calls... but I'm
\r
4693 // really not sure how to solve the problem. Temporarily boost to
\r
4694 // Realtime priority, maybe; but I'm not sure what priority the
\r
4695 // DirectSound service threads run at. We *should* be roughly
\r
4696 // within a ms or so of correct.
\r
4698 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4699 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4701 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4703 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4704 if ( FAILED( result ) ) {
\r
4705 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4706 errorText_ = errorStream_.str();
\r
4707 error( RtAudioError::SYSTEM_ERROR );
\r
4710 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4711 if ( FAILED( result ) ) {
\r
4712 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4713 errorText_ = errorStream_.str();
\r
4714 error( RtAudioError::SYSTEM_ERROR );
\r
4718 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4719 if ( FAILED( result ) ) {
\r
4720 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4721 errorText_ = errorStream_.str();
\r
4722 error( RtAudioError::SYSTEM_ERROR );
\r
4725 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4726 if ( FAILED( result ) ) {
\r
4727 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4728 errorText_ = errorStream_.str();
\r
4729 error( RtAudioError::SYSTEM_ERROR );
\r
4732 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4736 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4738 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4739 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4740 handle->bufferPointer[1] = safeReadPointer;
\r
4742 else if ( stream_.mode == OUTPUT ) {
\r
4744 // Set the proper nextWritePosition after initial startup.
\r
4745 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4746 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4747 if ( FAILED( result ) ) {
\r
4748 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4749 errorText_ = errorStream_.str();
\r
4750 error( RtAudioError::SYSTEM_ERROR );
\r
4753 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4754 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4757 buffersRolling = true;
\r
4760 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4762 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4764 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4765 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4766 bufferBytes *= formatBytes( stream_.userFormat );
\r
4767 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4770 // Setup parameters and do buffer conversion if necessary.
\r
4771 if ( stream_.doConvertBuffer[0] ) {
\r
4772 buffer = stream_.deviceBuffer;
\r
4773 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4774 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4775 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4778 buffer = stream_.userBuffer[0];
\r
4779 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4780 bufferBytes *= formatBytes( stream_.userFormat );
\r
4783 // No byte swapping necessary in DirectSound implementation.
\r
4785 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4786 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4788 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4789 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4791 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4792 nextWritePointer = handle->bufferPointer[0];
\r
4794 DWORD endWrite, leadPointer;
\r
4796 // Find out where the read and "safe write" pointers are.
\r
4797 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4798 if ( FAILED( result ) ) {
\r
4799 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4800 errorText_ = errorStream_.str();
\r
4801 error( RtAudioError::SYSTEM_ERROR );
\r
4805 // We will copy our output buffer into the region between
\r
4806 // safeWritePointer and leadPointer. If leadPointer is not
\r
4807 // beyond the next endWrite position, wait until it is.
\r
4808 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4809 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4810 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4811 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4812 endWrite = nextWritePointer + bufferBytes;
\r
4814 // Check whether the entire write region is behind the play pointer.
\r
4815 if ( leadPointer >= endWrite ) break;
\r
4817 // If we are here, then we must wait until the leadPointer advances
\r
4818 // beyond the end of our next write region. We use the
\r
4819 // Sleep() function to suspend operation until that happens.
\r
4820 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4821 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4822 if ( millis < 1.0 ) millis = 1.0;
\r
4823 Sleep( (DWORD) millis );
\r
4826 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4827 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4828 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4829 handle->xrun[0] = true;
\r
4830 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4831 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4832 handle->bufferPointer[0] = nextWritePointer;
\r
4833 endWrite = nextWritePointer + bufferBytes;
\r
4836 // Lock free space in the buffer
\r
4837 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4838 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4839 if ( FAILED( result ) ) {
\r
4840 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4841 errorText_ = errorStream_.str();
\r
4842 error( RtAudioError::SYSTEM_ERROR );
\r
4846 // Copy our buffer into the DS buffer
\r
4847 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4848 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4850 // Update our buffer offset and unlock sound buffer
\r
4851 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4852 if ( FAILED( result ) ) {
\r
4853 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4854 errorText_ = errorStream_.str();
\r
4855 error( RtAudioError::SYSTEM_ERROR );
\r
4858 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4859 handle->bufferPointer[0] = nextWritePointer;
\r
4861 if ( handle->drainCounter ) {
\r
4862 handle->drainCounter++;
\r
4867 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4869 // Setup parameters.
\r
4870 if ( stream_.doConvertBuffer[1] ) {
\r
4871 buffer = stream_.deviceBuffer;
\r
4872 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4873 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4876 buffer = stream_.userBuffer[1];
\r
4877 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4878 bufferBytes *= formatBytes( stream_.userFormat );
\r
4881 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4882 long nextReadPointer = handle->bufferPointer[1];
\r
4883 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4885 // Find out where the write and "safe read" pointers are.
\r
4886 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4887 if ( FAILED( result ) ) {
\r
4888 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4889 errorText_ = errorStream_.str();
\r
4890 error( RtAudioError::SYSTEM_ERROR );
\r
4894 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4895 DWORD endRead = nextReadPointer + bufferBytes;
\r
4897 // Handling depends on whether we are INPUT or DUPLEX.
\r
4898 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4899 // then a wait here will drag the write pointers into the forbidden zone.
\r
4901 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4902 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4903 // practical way to sync up the read and write pointers reliably, given the
\r
4904 // the very complex relationship between phase and increment of the read and write
\r
4907 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4908 // provide a pre-roll period of 0.5 seconds in which we return
\r
4909 // zeros from the read buffer while the pointers sync up.
\r
4911 if ( stream_.mode == DUPLEX ) {
\r
4912 if ( safeReadPointer < endRead ) {
\r
4913 if ( duplexPrerollBytes <= 0 ) {
\r
4914 // Pre-roll time over. Be more agressive.
\r
4915 int adjustment = endRead-safeReadPointer;
\r
4917 handle->xrun[1] = true;
\r
4919 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4920 // and perform fine adjustments later.
\r
4921 // - small adjustments: back off by twice as much.
\r
4922 if ( adjustment >= 2*bufferBytes )
\r
4923 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4925 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4927 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4931 // In pre=roll time. Just do it.
\r
4932 nextReadPointer = safeReadPointer - bufferBytes;
\r
4933 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4935 endRead = nextReadPointer + bufferBytes;
\r
4938 else { // mode == INPUT
\r
4939 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4940 // See comments for playback.
\r
4941 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4942 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4943 if ( millis < 1.0 ) millis = 1.0;
\r
4944 Sleep( (DWORD) millis );
\r
4946 // Wake up and find out where we are now.
\r
4947 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4948 if ( FAILED( result ) ) {
\r
4949 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4950 errorText_ = errorStream_.str();
\r
4951 error( RtAudioError::SYSTEM_ERROR );
\r
4955 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4959 // Lock free space in the buffer
\r
4960 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4961 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4962 if ( FAILED( result ) ) {
\r
4963 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4964 errorText_ = errorStream_.str();
\r
4965 error( RtAudioError::SYSTEM_ERROR );
\r
4969 if ( duplexPrerollBytes <= 0 ) {
\r
4970 // Copy our buffer into the DS buffer
\r
4971 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4972 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4975 memset( buffer, 0, bufferSize1 );
\r
4976 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4977 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4980 // Update our buffer offset and unlock sound buffer
\r
4981 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4982 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4983 if ( FAILED( result ) ) {
\r
4984 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4985 errorText_ = errorStream_.str();
\r
4986 error( RtAudioError::SYSTEM_ERROR );
\r
4989 handle->bufferPointer[1] = nextReadPointer;
\r
4991 // No byte swapping necessary in DirectSound implementation.
\r
4993 // If necessary, convert 8-bit data from unsigned to signed.
\r
4994 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4995 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4997 // Do buffer conversion if necessary.
\r
4998 if ( stream_.doConvertBuffer[1] )
\r
4999 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
5003 RtApi::tickStreamTime();
\r
5006 // Definitions for utility functions and callbacks
\r
5007 // specific to the DirectSound implementation.
\r
5009 static unsigned __stdcall callbackHandler( void *ptr )
\r
5011 CallbackInfo *info = (CallbackInfo *) ptr;
\r
5012 RtApiDs *object = (RtApiDs *) info->object;
\r
5013 bool* isRunning = &info->isRunning;
\r
5015 while ( *isRunning == true ) {
\r
5016 object->callbackEvent();
\r
5019 _endthreadex( 0 );
\r
5023 #include "tchar.h"
\r
5025 static std::string convertTChar( LPCTSTR name )
\r
5027 #if defined( UNICODE ) || defined( _UNICODE )
\r
5028 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
5029 std::string s( length-1, '\0' );
\r
5030 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
5032 std::string s( name );
\r
5038 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5039 LPCTSTR description,
\r
5040 LPCTSTR /*module*/,
\r
5041 LPVOID lpContext )
\r
5043 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
5044 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
5047 bool validDevice = false;
\r
5048 if ( probeInfo.isInput == true ) {
\r
5050 LPDIRECTSOUNDCAPTURE object;
\r
5052 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5053 if ( hr != DS_OK ) return TRUE;
\r
5055 caps.dwSize = sizeof(caps);
\r
5056 hr = object->GetCaps( &caps );
\r
5057 if ( hr == DS_OK ) {
\r
5058 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5059 validDevice = true;
\r
5061 object->Release();
\r
5065 LPDIRECTSOUND object;
\r
5066 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5067 if ( hr != DS_OK ) return TRUE;
\r
5069 caps.dwSize = sizeof(caps);
\r
5070 hr = object->GetCaps( &caps );
\r
5071 if ( hr == DS_OK ) {
\r
5072 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5073 validDevice = true;
\r
5075 object->Release();
\r
5078 // If good device, then save its name and guid.
\r
5079 std::string name = convertTChar( description );
\r
5080 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5081 if ( lpguid == NULL )
\r
5082 name = "Default Device";
\r
5083 if ( validDevice ) {
\r
5084 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5085 if ( dsDevices[i].name == name ) {
\r
5086 dsDevices[i].found = true;
\r
5087 if ( probeInfo.isInput ) {
\r
5088 dsDevices[i].id[1] = lpguid;
\r
5089 dsDevices[i].validId[1] = true;
\r
5092 dsDevices[i].id[0] = lpguid;
\r
5093 dsDevices[i].validId[0] = true;
\r
5100 device.name = name;
\r
5101 device.found = true;
\r
5102 if ( probeInfo.isInput ) {
\r
5103 device.id[1] = lpguid;
\r
5104 device.validId[1] = true;
\r
5107 device.id[0] = lpguid;
\r
5108 device.validId[0] = true;
\r
5110 dsDevices.push_back( device );
\r
5116 static const char* getErrorString( int code )
\r
5120 case DSERR_ALLOCATED:
\r
5121 return "Already allocated";
\r
5123 case DSERR_CONTROLUNAVAIL:
\r
5124 return "Control unavailable";
\r
5126 case DSERR_INVALIDPARAM:
\r
5127 return "Invalid parameter";
\r
5129 case DSERR_INVALIDCALL:
\r
5130 return "Invalid call";
\r
5132 case DSERR_GENERIC:
\r
5133 return "Generic error";
\r
5135 case DSERR_PRIOLEVELNEEDED:
\r
5136 return "Priority level needed";
\r
5138 case DSERR_OUTOFMEMORY:
\r
5139 return "Out of memory";
\r
5141 case DSERR_BADFORMAT:
\r
5142 return "The sample rate or the channel format is not supported";
\r
5144 case DSERR_UNSUPPORTED:
\r
5145 return "Not supported";
\r
5147 case DSERR_NODRIVER:
\r
5148 return "No driver";
\r
5150 case DSERR_ALREADYINITIALIZED:
\r
5151 return "Already initialized";
\r
5153 case DSERR_NOAGGREGATION:
\r
5154 return "No aggregation";
\r
5156 case DSERR_BUFFERLOST:
\r
5157 return "Buffer lost";
\r
5159 case DSERR_OTHERAPPHASPRIO:
\r
5160 return "Another application already has priority";
\r
5162 case DSERR_UNINITIALIZED:
\r
5163 return "Uninitialized";
\r
5166 return "DirectSound unknown error";
\r
5169 //******************** End of __WINDOWS_DS__ *********************//
\r
5173 #if defined(__LINUX_ALSA__)
\r
5175 #include <alsa/asoundlib.h>
\r
5176 #include <unistd.h>
\r
5178 // A structure to hold various information related to the ALSA API
\r
5179 // implementation.
\r
5180 struct AlsaHandle {
\r
5181 snd_pcm_t *handles[2];
\r
5182 bool synchronized;
\r
5184 pthread_cond_t runnable_cv;
\r
5188 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5191 static void *alsaCallbackHandler( void * ptr );
\r
5193 RtApiAlsa :: RtApiAlsa()
\r
5195 // Nothing to do here.
\r
5198 RtApiAlsa :: ~RtApiAlsa()
\r
5200 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5203 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5205 unsigned nDevices = 0;
\r
5206 int result, subdevice, card;
\r
5208 snd_ctl_t *handle;
\r
5210 // Count cards and devices
\r
5212 snd_card_next( &card );
\r
5213 while ( card >= 0 ) {
\r
5214 sprintf( name, "hw:%d", card );
\r
5215 result = snd_ctl_open( &handle, name, 0 );
\r
5216 if ( result < 0 ) {
\r
5217 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5218 errorText_ = errorStream_.str();
\r
5219 error( RtAudioError::WARNING );
\r
5224 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5225 if ( result < 0 ) {
\r
5226 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5227 errorText_ = errorStream_.str();
\r
5228 error( RtAudioError::WARNING );
\r
5231 if ( subdevice < 0 )
\r
5236 snd_ctl_close( handle );
\r
5237 snd_card_next( &card );
\r
5240 result = snd_ctl_open( &handle, "default", 0 );
\r
5241 if (result == 0) {
\r
5243 snd_ctl_close( handle );
\r
5249 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5251 RtAudio::DeviceInfo info;
\r
5252 info.probed = false;
\r
5254 unsigned nDevices = 0;
\r
5255 int result, subdevice, card;
\r
5257 snd_ctl_t *chandle;
\r
5259 // Count cards and devices
\r
5261 snd_card_next( &card );
\r
5262 while ( card >= 0 ) {
\r
5263 sprintf( name, "hw:%d", card );
\r
5264 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5265 if ( result < 0 ) {
\r
5266 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5267 errorText_ = errorStream_.str();
\r
5268 error( RtAudioError::WARNING );
\r
5273 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5274 if ( result < 0 ) {
\r
5275 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5276 errorText_ = errorStream_.str();
\r
5277 error( RtAudioError::WARNING );
\r
5280 if ( subdevice < 0 ) break;
\r
5281 if ( nDevices == device ) {
\r
5282 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5288 snd_ctl_close( chandle );
\r
5289 snd_card_next( &card );
\r
5292 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5293 if ( result == 0 ) {
\r
5294 if ( nDevices == device ) {
\r
5295 strcpy( name, "default" );
\r
5301 if ( nDevices == 0 ) {
\r
5302 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5303 error( RtAudioError::INVALID_USE );
\r
5307 if ( device >= nDevices ) {
\r
5308 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5309 error( RtAudioError::INVALID_USE );
\r
5315 // If a stream is already open, we cannot probe the stream devices.
\r
5316 // Thus, use the saved results.
\r
5317 if ( stream_.state != STREAM_CLOSED &&
\r
5318 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5319 snd_ctl_close( chandle );
\r
5320 if ( device >= devices_.size() ) {
\r
5321 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5322 error( RtAudioError::WARNING );
\r
5325 return devices_[ device ];
\r
5328 int openMode = SND_PCM_ASYNC;
\r
5329 snd_pcm_stream_t stream;
\r
5330 snd_pcm_info_t *pcminfo;
\r
5331 snd_pcm_info_alloca( &pcminfo );
\r
5332 snd_pcm_t *phandle;
\r
5333 snd_pcm_hw_params_t *params;
\r
5334 snd_pcm_hw_params_alloca( ¶ms );
\r
5336 // First try for playback unless default device (which has subdev -1)
\r
5337 stream = SND_PCM_STREAM_PLAYBACK;
\r
5338 snd_pcm_info_set_stream( pcminfo, stream );
\r
5339 if ( subdevice != -1 ) {
\r
5340 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5341 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5343 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5344 if ( result < 0 ) {
\r
5345 // Device probably doesn't support playback.
\r
5346 goto captureProbe;
\r
5350 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5351 if ( result < 0 ) {
\r
5352 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5353 errorText_ = errorStream_.str();
\r
5354 error( RtAudioError::WARNING );
\r
5355 goto captureProbe;
\r
5358 // The device is open ... fill the parameter structure.
\r
5359 result = snd_pcm_hw_params_any( phandle, params );
\r
5360 if ( result < 0 ) {
\r
5361 snd_pcm_close( phandle );
\r
5362 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5363 errorText_ = errorStream_.str();
\r
5364 error( RtAudioError::WARNING );
\r
5365 goto captureProbe;
\r
5368 // Get output channel information.
\r
5369 unsigned int value;
\r
5370 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5371 if ( result < 0 ) {
\r
5372 snd_pcm_close( phandle );
\r
5373 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5374 errorText_ = errorStream_.str();
\r
5375 error( RtAudioError::WARNING );
\r
5376 goto captureProbe;
\r
5378 info.outputChannels = value;
\r
5379 snd_pcm_close( phandle );
\r
5382 stream = SND_PCM_STREAM_CAPTURE;
\r
5383 snd_pcm_info_set_stream( pcminfo, stream );
\r
5385 // Now try for capture unless default device (with subdev = -1)
\r
5386 if ( subdevice != -1 ) {
\r
5387 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5388 snd_ctl_close( chandle );
\r
5389 if ( result < 0 ) {
\r
5390 // Device probably doesn't support capture.
\r
5391 if ( info.outputChannels == 0 ) return info;
\r
5392 goto probeParameters;
\r
5396 snd_ctl_close( chandle );
\r
5398 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5399 if ( result < 0 ) {
\r
5400 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5401 errorText_ = errorStream_.str();
\r
5402 error( RtAudioError::WARNING );
\r
5403 if ( info.outputChannels == 0 ) return info;
\r
5404 goto probeParameters;
\r
5407 // The device is open ... fill the parameter structure.
\r
5408 result = snd_pcm_hw_params_any( phandle, params );
\r
5409 if ( result < 0 ) {
\r
5410 snd_pcm_close( phandle );
\r
5411 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5412 errorText_ = errorStream_.str();
\r
5413 error( RtAudioError::WARNING );
\r
5414 if ( info.outputChannels == 0 ) return info;
\r
5415 goto probeParameters;
\r
5418 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5419 if ( result < 0 ) {
\r
5420 snd_pcm_close( phandle );
\r
5421 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5422 errorText_ = errorStream_.str();
\r
5423 error( RtAudioError::WARNING );
\r
5424 if ( info.outputChannels == 0 ) return info;
\r
5425 goto probeParameters;
\r
5427 info.inputChannels = value;
\r
5428 snd_pcm_close( phandle );
\r
5430 // If device opens for both playback and capture, we determine the channels.
\r
5431 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5432 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5434 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5435 if ( device == 0 && info.outputChannels > 0 )
\r
5436 info.isDefaultOutput = true;
\r
5437 if ( device == 0 && info.inputChannels > 0 )
\r
5438 info.isDefaultInput = true;
\r
5441 // At this point, we just need to figure out the supported data
\r
5442 // formats and sample rates. We'll proceed by opening the device in
\r
5443 // the direction with the maximum number of channels, or playback if
\r
5444 // they are equal. This might limit our sample rate options, but so
\r
5447 if ( info.outputChannels >= info.inputChannels )
\r
5448 stream = SND_PCM_STREAM_PLAYBACK;
\r
5450 stream = SND_PCM_STREAM_CAPTURE;
\r
5451 snd_pcm_info_set_stream( pcminfo, stream );
\r
5453 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5454 if ( result < 0 ) {
\r
5455 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5456 errorText_ = errorStream_.str();
\r
5457 error( RtAudioError::WARNING );
\r
5461 // The device is open ... fill the parameter structure.
\r
5462 result = snd_pcm_hw_params_any( phandle, params );
\r
5463 if ( result < 0 ) {
\r
5464 snd_pcm_close( phandle );
\r
5465 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5466 errorText_ = errorStream_.str();
\r
5467 error( RtAudioError::WARNING );
\r
5471 // Test our discrete set of sample rate values.
\r
5472 info.sampleRates.clear();
\r
5473 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5474 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5475 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5477 if ( info.sampleRates.size() == 0 ) {
\r
5478 snd_pcm_close( phandle );
\r
5479 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5480 errorText_ = errorStream_.str();
\r
5481 error( RtAudioError::WARNING );
\r
5485 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5486 snd_pcm_format_t format;
\r
5487 info.nativeFormats = 0;
\r
5488 format = SND_PCM_FORMAT_S8;
\r
5489 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5490 info.nativeFormats |= RTAUDIO_SINT8;
\r
5491 format = SND_PCM_FORMAT_S16;
\r
5492 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5493 info.nativeFormats |= RTAUDIO_SINT16;
\r
5494 format = SND_PCM_FORMAT_S24;
\r
5495 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5496 info.nativeFormats |= RTAUDIO_SINT24;
\r
5497 format = SND_PCM_FORMAT_S32;
\r
5498 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5499 info.nativeFormats |= RTAUDIO_SINT32;
\r
5500 format = SND_PCM_FORMAT_FLOAT;
\r
5501 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5502 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5503 format = SND_PCM_FORMAT_FLOAT64;
\r
5504 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5505 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5507 // Check that we have at least one supported format
\r
5508 if ( info.nativeFormats == 0 ) {
\r
5509 snd_pcm_close( phandle );
\r
5510 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5511 errorText_ = errorStream_.str();
\r
5512 error( RtAudioError::WARNING );
\r
5516 // Get the device name
\r
5518 result = snd_card_get_name( card, &cardname );
\r
5519 if ( result >= 0 ) {
\r
5520 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5525 // That's all ... close the device and return
\r
5526 snd_pcm_close( phandle );
\r
5527 info.probed = true;
\r
5531 void RtApiAlsa :: saveDeviceInfo( void )
\r
5535 unsigned int nDevices = getDeviceCount();
\r
5536 devices_.resize( nDevices );
\r
5537 for ( unsigned int i=0; i<nDevices; i++ )
\r
5538 devices_[i] = getDeviceInfo( i );
\r
5541 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5542 unsigned int firstChannel, unsigned int sampleRate,
\r
5543 RtAudioFormat format, unsigned int *bufferSize,
\r
5544 RtAudio::StreamOptions *options )
\r
5547 #if defined(__RTAUDIO_DEBUG__)
\r
5548 snd_output_t *out;
\r
5549 snd_output_stdio_attach(&out, stderr, 0);
\r
5552 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5554 unsigned nDevices = 0;
\r
5555 int result, subdevice, card;
\r
5557 snd_ctl_t *chandle;
\r
5559 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5560 snprintf(name, sizeof(name), "%s", "default");
\r
5562 // Count cards and devices
\r
5564 snd_card_next( &card );
\r
5565 while ( card >= 0 ) {
\r
5566 sprintf( name, "hw:%d", card );
\r
5567 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5568 if ( result < 0 ) {
\r
5569 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5570 errorText_ = errorStream_.str();
\r
5575 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5576 if ( result < 0 ) break;
\r
5577 if ( subdevice < 0 ) break;
\r
5578 if ( nDevices == device ) {
\r
5579 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5580 snd_ctl_close( chandle );
\r
5585 snd_ctl_close( chandle );
\r
5586 snd_card_next( &card );
\r
5589 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5590 if ( result == 0 ) {
\r
5591 if ( nDevices == device ) {
\r
5592 strcpy( name, "default" );
\r
5598 if ( nDevices == 0 ) {
\r
5599 // This should not happen because a check is made before this function is called.
\r
5600 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5604 if ( device >= nDevices ) {
\r
5605 // This should not happen because a check is made before this function is called.
\r
5606 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5613 // The getDeviceInfo() function will not work for a device that is
\r
5614 // already open. Thus, we'll probe the system before opening a
\r
5615 // stream and save the results for use by getDeviceInfo().
\r
5616 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5617 this->saveDeviceInfo();
\r
5619 snd_pcm_stream_t stream;
\r
5620 if ( mode == OUTPUT )
\r
5621 stream = SND_PCM_STREAM_PLAYBACK;
\r
5623 stream = SND_PCM_STREAM_CAPTURE;
\r
5625 snd_pcm_t *phandle;
\r
5626 int openMode = SND_PCM_ASYNC;
\r
5627 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5628 if ( result < 0 ) {
\r
5629 if ( mode == OUTPUT )
\r
5630 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5632 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5633 errorText_ = errorStream_.str();
\r
5637 // Fill the parameter structure.
\r
5638 snd_pcm_hw_params_t *hw_params;
\r
5639 snd_pcm_hw_params_alloca( &hw_params );
\r
5640 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5641 if ( result < 0 ) {
\r
5642 snd_pcm_close( phandle );
\r
5643 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5644 errorText_ = errorStream_.str();
\r
5648 #if defined(__RTAUDIO_DEBUG__)
\r
5649 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5650 snd_pcm_hw_params_dump( hw_params, out );
\r
5653 // Set access ... check user preference.
\r
5654 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5655 stream_.userInterleaved = false;
\r
5656 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5657 if ( result < 0 ) {
\r
5658 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5659 stream_.deviceInterleaved[mode] = true;
\r
5662 stream_.deviceInterleaved[mode] = false;
\r
5665 stream_.userInterleaved = true;
\r
5666 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5667 if ( result < 0 ) {
\r
5668 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5669 stream_.deviceInterleaved[mode] = false;
\r
5672 stream_.deviceInterleaved[mode] = true;
\r
5675 if ( result < 0 ) {
\r
5676 snd_pcm_close( phandle );
\r
5677 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5678 errorText_ = errorStream_.str();
\r
5682 // Determine how to set the device format.
\r
5683 stream_.userFormat = format;
\r
5684 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5686 if ( format == RTAUDIO_SINT8 )
\r
5687 deviceFormat = SND_PCM_FORMAT_S8;
\r
5688 else if ( format == RTAUDIO_SINT16 )
\r
5689 deviceFormat = SND_PCM_FORMAT_S16;
\r
5690 else if ( format == RTAUDIO_SINT24 )
\r
5691 deviceFormat = SND_PCM_FORMAT_S24;
\r
5692 else if ( format == RTAUDIO_SINT32 )
\r
5693 deviceFormat = SND_PCM_FORMAT_S32;
\r
5694 else if ( format == RTAUDIO_FLOAT32 )
\r
5695 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5696 else if ( format == RTAUDIO_FLOAT64 )
\r
5697 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5699 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5700 stream_.deviceFormat[mode] = format;
\r
5704 // The user requested format is not natively supported by the device.
\r
5705 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5706 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5707 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5711 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5712 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5713 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5717 deviceFormat = SND_PCM_FORMAT_S32;
\r
5718 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5719 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5723 deviceFormat = SND_PCM_FORMAT_S24;
\r
5724 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5725 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5729 deviceFormat = SND_PCM_FORMAT_S16;
\r
5730 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5731 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5735 deviceFormat = SND_PCM_FORMAT_S8;
\r
5736 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5737 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5741 // If we get here, no supported format was found.
\r
5742 snd_pcm_close( phandle );
\r
5743 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5744 errorText_ = errorStream_.str();
\r
5748 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5749 if ( result < 0 ) {
\r
5750 snd_pcm_close( phandle );
\r
5751 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5752 errorText_ = errorStream_.str();
\r
5756 // Determine whether byte-swaping is necessary.
\r
5757 stream_.doByteSwap[mode] = false;
\r
5758 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5759 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5760 if ( result == 0 )
\r
5761 stream_.doByteSwap[mode] = true;
\r
5762 else if (result < 0) {
\r
5763 snd_pcm_close( phandle );
\r
5764 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5765 errorText_ = errorStream_.str();
\r
5770 // Set the sample rate.
\r
5771 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5772 if ( result < 0 ) {
\r
5773 snd_pcm_close( phandle );
\r
5774 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5775 errorText_ = errorStream_.str();
\r
5779 // Determine the number of channels for this device. We support a possible
\r
5780 // minimum device channel number > than the value requested by the user.
\r
5781 stream_.nUserChannels[mode] = channels;
\r
5782 unsigned int value;
\r
5783 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5784 unsigned int deviceChannels = value;
\r
5785 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5786 snd_pcm_close( phandle );
\r
5787 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5788 errorText_ = errorStream_.str();
\r
5792 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5793 if ( result < 0 ) {
\r
5794 snd_pcm_close( phandle );
\r
5795 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5796 errorText_ = errorStream_.str();
\r
5799 deviceChannels = value;
\r
5800 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5801 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5803 // Set the device channels.
\r
5804 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5805 if ( result < 0 ) {
\r
5806 snd_pcm_close( phandle );
\r
5807 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5808 errorText_ = errorStream_.str();
\r
5812 // Set the buffer (or period) size.
\r
5814 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5815 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5816 if ( result < 0 ) {
\r
5817 snd_pcm_close( phandle );
\r
5818 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5819 errorText_ = errorStream_.str();
\r
5822 *bufferSize = periodSize;
\r
5824 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5825 unsigned int periods = 0;
\r
5826 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5827 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5828 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5829 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5830 if ( result < 0 ) {
\r
5831 snd_pcm_close( phandle );
\r
5832 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5833 errorText_ = errorStream_.str();
\r
5837 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5838 // MUST be the same in both directions!
\r
5839 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5840 snd_pcm_close( phandle );
\r
5841 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5842 errorText_ = errorStream_.str();
\r
5846 stream_.bufferSize = *bufferSize;
\r
5848 // Install the hardware configuration
\r
5849 result = snd_pcm_hw_params( phandle, hw_params );
\r
5850 if ( result < 0 ) {
\r
5851 snd_pcm_close( phandle );
\r
5852 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5853 errorText_ = errorStream_.str();
\r
5857 #if defined(__RTAUDIO_DEBUG__)
\r
5858 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5859 snd_pcm_hw_params_dump( hw_params, out );
\r
5862 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5863 snd_pcm_sw_params_t *sw_params = NULL;
\r
5864 snd_pcm_sw_params_alloca( &sw_params );
\r
5865 snd_pcm_sw_params_current( phandle, sw_params );
\r
5866 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5867 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5868 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5870 // The following two settings were suggested by Theo Veenker
\r
5871 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5872 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5874 // here are two options for a fix
\r
5875 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5876 snd_pcm_uframes_t val;
\r
5877 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5878 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5880 result = snd_pcm_sw_params( phandle, sw_params );
\r
5881 if ( result < 0 ) {
\r
5882 snd_pcm_close( phandle );
\r
5883 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5884 errorText_ = errorStream_.str();
\r
5888 #if defined(__RTAUDIO_DEBUG__)
\r
5889 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5890 snd_pcm_sw_params_dump( sw_params, out );
\r
5893 // Set flags for buffer conversion
\r
5894 stream_.doConvertBuffer[mode] = false;
\r
5895 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5896 stream_.doConvertBuffer[mode] = true;
\r
5897 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5898 stream_.doConvertBuffer[mode] = true;
\r
5899 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5900 stream_.nUserChannels[mode] > 1 )
\r
5901 stream_.doConvertBuffer[mode] = true;
\r
5903 // Allocate the ApiHandle if necessary and then save.
\r
5904 AlsaHandle *apiInfo = 0;
\r
5905 if ( stream_.apiHandle == 0 ) {
\r
5907 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5909 catch ( std::bad_alloc& ) {
\r
5910 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5914 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5915 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5919 stream_.apiHandle = (void *) apiInfo;
\r
5920 apiInfo->handles[0] = 0;
\r
5921 apiInfo->handles[1] = 0;
\r
5924 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5926 apiInfo->handles[mode] = phandle;
\r
5929 // Allocate necessary internal buffers.
\r
5930 unsigned long bufferBytes;
\r
5931 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5932 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5933 if ( stream_.userBuffer[mode] == NULL ) {
\r
5934 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5938 if ( stream_.doConvertBuffer[mode] ) {
\r
5940 bool makeBuffer = true;
\r
5941 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5942 if ( mode == INPUT ) {
\r
5943 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5944 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5945 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5949 if ( makeBuffer ) {
\r
5950 bufferBytes *= *bufferSize;
\r
5951 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5952 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5953 if ( stream_.deviceBuffer == NULL ) {
\r
5954 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5960 stream_.sampleRate = sampleRate;
\r
5961 stream_.nBuffers = periods;
\r
5962 stream_.device[mode] = device;
\r
5963 stream_.state = STREAM_STOPPED;
\r
5965 // Setup the buffer conversion information structure.
\r
5966 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5968 // Setup thread if necessary.
\r
5969 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5970 // We had already set up an output stream.
\r
5971 stream_.mode = DUPLEX;
\r
5972 // Link the streams if possible.
\r
5973 apiInfo->synchronized = false;
\r
5974 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5975 apiInfo->synchronized = true;
\r
5977 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5978 error( RtAudioError::WARNING );
\r
5982 stream_.mode = mode;
\r
5984 // Setup callback thread.
\r
5985 stream_.callbackInfo.object = (void *) this;
\r
5987 // Set the thread attributes for joinable and realtime scheduling
\r
5988 // priority (optional). The higher priority will only take affect
\r
5989 // if the program is run as root or suid. Note, under Linux
\r
5990 // processes with CAP_SYS_NICE privilege, a user can change
\r
5991 // scheduling policy and priority (thus need not be root). See
\r
5992 // POSIX "capabilities".
\r
5993 pthread_attr_t attr;
\r
5994 pthread_attr_init( &attr );
\r
5995 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5997 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5998 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5999 // We previously attempted to increase the audio callback priority
\r
6000 // to SCHED_RR here via the attributes. However, while no errors
\r
6001 // were reported in doing so, it did not work. So, now this is
\r
6002 // done in the alsaCallbackHandler function.
\r
6003 stream_.callbackInfo.doRealtime = true;
\r
6004 int priority = options->priority;
\r
6005 int min = sched_get_priority_min( SCHED_RR );
\r
6006 int max = sched_get_priority_max( SCHED_RR );
\r
6007 if ( priority < min ) priority = min;
\r
6008 else if ( priority > max ) priority = max;
\r
6009 stream_.callbackInfo.priority = priority;
\r
6013 stream_.callbackInfo.isRunning = true;
\r
6014 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
6015 pthread_attr_destroy( &attr );
\r
6017 stream_.callbackInfo.isRunning = false;
\r
6018 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
6027 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6028 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6029 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6031 stream_.apiHandle = 0;
\r
6034 if ( phandle) snd_pcm_close( phandle );
\r
6036 for ( int i=0; i<2; i++ ) {
\r
6037 if ( stream_.userBuffer[i] ) {
\r
6038 free( stream_.userBuffer[i] );
\r
6039 stream_.userBuffer[i] = 0;
\r
6043 if ( stream_.deviceBuffer ) {
\r
6044 free( stream_.deviceBuffer );
\r
6045 stream_.deviceBuffer = 0;
\r
6048 stream_.state = STREAM_CLOSED;
\r
6052 void RtApiAlsa :: closeStream()
\r
6054 if ( stream_.state == STREAM_CLOSED ) {
\r
6055 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6056 error( RtAudioError::WARNING );
\r
6060 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6061 stream_.callbackInfo.isRunning = false;
\r
6062 MUTEX_LOCK( &stream_.mutex );
\r
6063 if ( stream_.state == STREAM_STOPPED ) {
\r
6064 apiInfo->runnable = true;
\r
6065 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6067 MUTEX_UNLOCK( &stream_.mutex );
\r
6068 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6070 if ( stream_.state == STREAM_RUNNING ) {
\r
6071 stream_.state = STREAM_STOPPED;
\r
6072 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6073 snd_pcm_drop( apiInfo->handles[0] );
\r
6074 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6075 snd_pcm_drop( apiInfo->handles[1] );
\r
6079 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6080 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6081 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6083 stream_.apiHandle = 0;
\r
6086 for ( int i=0; i<2; i++ ) {
\r
6087 if ( stream_.userBuffer[i] ) {
\r
6088 free( stream_.userBuffer[i] );
\r
6089 stream_.userBuffer[i] = 0;
\r
6093 if ( stream_.deviceBuffer ) {
\r
6094 free( stream_.deviceBuffer );
\r
6095 stream_.deviceBuffer = 0;
\r
6098 stream_.mode = UNINITIALIZED;
\r
6099 stream_.state = STREAM_CLOSED;
\r
6102 void RtApiAlsa :: startStream()
\r
6104 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6107 if ( stream_.state == STREAM_RUNNING ) {
\r
6108 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6109 error( RtAudioError::WARNING );
\r
6113 MUTEX_LOCK( &stream_.mutex );
\r
6116 snd_pcm_state_t state;
\r
6117 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6118 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6119 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6120 state = snd_pcm_state( handle[0] );
\r
6121 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6122 result = snd_pcm_prepare( handle[0] );
\r
6123 if ( result < 0 ) {
\r
6124 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6125 errorText_ = errorStream_.str();
\r
6131 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6132 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
6133 state = snd_pcm_state( handle[1] );
\r
6134 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6135 result = snd_pcm_prepare( handle[1] );
\r
6136 if ( result < 0 ) {
\r
6137 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6138 errorText_ = errorStream_.str();
\r
6144 stream_.state = STREAM_RUNNING;
\r
6147 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6148 MUTEX_UNLOCK( &stream_.mutex );
\r
6150 if ( result >= 0 ) return;
\r
6151 error( RtAudioError::SYSTEM_ERROR );
\r
6154 void RtApiAlsa :: stopStream()
\r
6157 if ( stream_.state == STREAM_STOPPED ) {
\r
6158 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6159 error( RtAudioError::WARNING );
\r
6163 stream_.state = STREAM_STOPPED;
\r
6164 MUTEX_LOCK( &stream_.mutex );
\r
6167 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6168 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6169 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6170 if ( apiInfo->synchronized )
\r
6171 result = snd_pcm_drop( handle[0] );
\r
6173 result = snd_pcm_drain( handle[0] );
\r
6174 if ( result < 0 ) {
\r
6175 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6176 errorText_ = errorStream_.str();
\r
6181 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6182 result = snd_pcm_drop( handle[1] );
\r
6183 if ( result < 0 ) {
\r
6184 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6185 errorText_ = errorStream_.str();
\r
6191 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6192 MUTEX_UNLOCK( &stream_.mutex );
\r
6194 if ( result >= 0 ) return;
\r
6195 error( RtAudioError::SYSTEM_ERROR );
\r
6198 void RtApiAlsa :: abortStream()
\r
6201 if ( stream_.state == STREAM_STOPPED ) {
\r
6202 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6203 error( RtAudioError::WARNING );
\r
6207 stream_.state = STREAM_STOPPED;
\r
6208 MUTEX_LOCK( &stream_.mutex );
\r
6211 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6212 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6213 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6214 result = snd_pcm_drop( handle[0] );
\r
6215 if ( result < 0 ) {
\r
6216 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6217 errorText_ = errorStream_.str();
\r
6222 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6223 result = snd_pcm_drop( handle[1] );
\r
6224 if ( result < 0 ) {
\r
6225 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6226 errorText_ = errorStream_.str();
\r
6232 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6233 MUTEX_UNLOCK( &stream_.mutex );
\r
6235 if ( result >= 0 ) return;
\r
6236 error( RtAudioError::SYSTEM_ERROR );
\r
6239 void RtApiAlsa :: callbackEvent()
\r
6241 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6242 if ( stream_.state == STREAM_STOPPED ) {
\r
6243 MUTEX_LOCK( &stream_.mutex );
\r
6244 while ( !apiInfo->runnable )
\r
6245 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6247 if ( stream_.state != STREAM_RUNNING ) {
\r
6248 MUTEX_UNLOCK( &stream_.mutex );
\r
6251 MUTEX_UNLOCK( &stream_.mutex );
\r
6254 if ( stream_.state == STREAM_CLOSED ) {
\r
6255 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6256 error( RtAudioError::WARNING );
\r
6260 int doStopStream = 0;
\r
6261 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6262 double streamTime = getStreamTime();
\r
6263 RtAudioStreamStatus status = 0;
\r
6264 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6265 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6266 apiInfo->xrun[0] = false;
\r
6268 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6269 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6270 apiInfo->xrun[1] = false;
\r
6272 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6273 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6275 if ( doStopStream == 2 ) {
\r
6280 MUTEX_LOCK( &stream_.mutex );
\r
6282 // The state might change while waiting on a mutex.
\r
6283 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6288 snd_pcm_t **handle;
\r
6289 snd_pcm_sframes_t frames;
\r
6290 RtAudioFormat format;
\r
6291 handle = (snd_pcm_t **) apiInfo->handles;
\r
6293 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6295 // Setup parameters.
\r
6296 if ( stream_.doConvertBuffer[1] ) {
\r
6297 buffer = stream_.deviceBuffer;
\r
6298 channels = stream_.nDeviceChannels[1];
\r
6299 format = stream_.deviceFormat[1];
\r
6302 buffer = stream_.userBuffer[1];
\r
6303 channels = stream_.nUserChannels[1];
\r
6304 format = stream_.userFormat;
\r
6307 // Read samples from device in interleaved/non-interleaved format.
\r
6308 if ( stream_.deviceInterleaved[1] )
\r
6309 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6311 void *bufs[channels];
\r
6312 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6313 for ( int i=0; i<channels; i++ )
\r
6314 bufs[i] = (void *) (buffer + (i * offset));
\r
6315 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6318 if ( result < (int) stream_.bufferSize ) {
\r
6319 // Either an error or overrun occured.
\r
6320 if ( result == -EPIPE ) {
\r
6321 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6322 if ( state == SND_PCM_STATE_XRUN ) {
\r
6323 apiInfo->xrun[1] = true;
\r
6324 result = snd_pcm_prepare( handle[1] );
\r
6325 if ( result < 0 ) {
\r
6326 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6327 errorText_ = errorStream_.str();
\r
6331 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6332 errorText_ = errorStream_.str();
\r
6336 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6337 errorText_ = errorStream_.str();
\r
6339 error( RtAudioError::WARNING );
\r
6343 // Do byte swapping if necessary.
\r
6344 if ( stream_.doByteSwap[1] )
\r
6345 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6347 // Do buffer conversion if necessary.
\r
6348 if ( stream_.doConvertBuffer[1] )
\r
6349 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6351 // Check stream latency
\r
6352 result = snd_pcm_delay( handle[1], &frames );
\r
6353 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6358 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6360 // Setup parameters and do buffer conversion if necessary.
\r
6361 if ( stream_.doConvertBuffer[0] ) {
\r
6362 buffer = stream_.deviceBuffer;
\r
6363 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6364 channels = stream_.nDeviceChannels[0];
\r
6365 format = stream_.deviceFormat[0];
\r
6368 buffer = stream_.userBuffer[0];
\r
6369 channels = stream_.nUserChannels[0];
\r
6370 format = stream_.userFormat;
\r
6373 // Do byte swapping if necessary.
\r
6374 if ( stream_.doByteSwap[0] )
\r
6375 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6377 // Write samples to device in interleaved/non-interleaved format.
\r
6378 if ( stream_.deviceInterleaved[0] )
\r
6379 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6381 void *bufs[channels];
\r
6382 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6383 for ( int i=0; i<channels; i++ )
\r
6384 bufs[i] = (void *) (buffer + (i * offset));
\r
6385 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6388 if ( result < (int) stream_.bufferSize ) {
\r
6389 // Either an error or underrun occured.
\r
6390 if ( result == -EPIPE ) {
\r
6391 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6392 if ( state == SND_PCM_STATE_XRUN ) {
\r
6393 apiInfo->xrun[0] = true;
\r
6394 result = snd_pcm_prepare( handle[0] );
\r
6395 if ( result < 0 ) {
\r
6396 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6397 errorText_ = errorStream_.str();
\r
6401 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6402 errorText_ = errorStream_.str();
\r
6406 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6407 errorText_ = errorStream_.str();
\r
6409 error( RtAudioError::WARNING );
\r
6413 // Check stream latency
\r
6414 result = snd_pcm_delay( handle[0], &frames );
\r
6415 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6419 MUTEX_UNLOCK( &stream_.mutex );
\r
6421 RtApi::tickStreamTime();
\r
6422 if ( doStopStream == 1 ) this->stopStream();
\r
6425 static void *alsaCallbackHandler( void *ptr )
\r
6427 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6428 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6429 bool *isRunning = &info->isRunning;
\r
6431 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6432 if ( &info->doRealtime ) {
\r
6433 pthread_t tID = pthread_self(); // ID of this thread
\r
6434 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6435 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6439 while ( *isRunning == true ) {
\r
6440 pthread_testcancel();
\r
6441 object->callbackEvent();
\r
6444 pthread_exit( NULL );
\r
6447 //******************** End of __LINUX_ALSA__ *********************//
\r
6450 #if defined(__LINUX_PULSE__)
\r
6452 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6453 // and Tristan Matthews.
\r
6455 #include <pulse/error.h>
\r
6456 #include <pulse/simple.h>
\r
6459 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6460 44100, 48000, 96000, 0};
\r
6462 struct rtaudio_pa_format_mapping_t {
\r
6463 RtAudioFormat rtaudio_format;
\r
6464 pa_sample_format_t pa_format;
\r
6467 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6468 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6469 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6470 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6471 {0, PA_SAMPLE_INVALID}};
\r
6473 struct PulseAudioHandle {
\r
6474 pa_simple *s_play;
\r
6477 pthread_cond_t runnable_cv;
\r
6479 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6482 RtApiPulse::~RtApiPulse()
\r
6484 if ( stream_.state != STREAM_CLOSED )
\r
6488 unsigned int RtApiPulse::getDeviceCount( void )
\r
6493 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6495 RtAudio::DeviceInfo info;
\r
6496 info.probed = true;
\r
6497 info.name = "PulseAudio";
\r
6498 info.outputChannels = 2;
\r
6499 info.inputChannels = 2;
\r
6500 info.duplexChannels = 2;
\r
6501 info.isDefaultOutput = true;
\r
6502 info.isDefaultInput = true;
\r
6504 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6505 info.sampleRates.push_back( *sr );
\r
6507 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6512 static void *pulseaudio_callback( void * user )
\r
6514 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6515 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6516 volatile bool *isRunning = &cbi->isRunning;
\r
6518 while ( *isRunning ) {
\r
6519 pthread_testcancel();
\r
6520 context->callbackEvent();
\r
6523 pthread_exit( NULL );
\r
6526 void RtApiPulse::closeStream( void )
\r
6528 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6530 stream_.callbackInfo.isRunning = false;
\r
6532 MUTEX_LOCK( &stream_.mutex );
\r
6533 if ( stream_.state == STREAM_STOPPED ) {
\r
6534 pah->runnable = true;
\r
6535 pthread_cond_signal( &pah->runnable_cv );
\r
6537 MUTEX_UNLOCK( &stream_.mutex );
\r
6539 pthread_join( pah->thread, 0 );
\r
6540 if ( pah->s_play ) {
\r
6541 pa_simple_flush( pah->s_play, NULL );
\r
6542 pa_simple_free( pah->s_play );
\r
6545 pa_simple_free( pah->s_rec );
\r
6547 pthread_cond_destroy( &pah->runnable_cv );
\r
6549 stream_.apiHandle = 0;
\r
6552 if ( stream_.userBuffer[0] ) {
\r
6553 free( stream_.userBuffer[0] );
\r
6554 stream_.userBuffer[0] = 0;
\r
6556 if ( stream_.userBuffer[1] ) {
\r
6557 free( stream_.userBuffer[1] );
\r
6558 stream_.userBuffer[1] = 0;
\r
6561 stream_.state = STREAM_CLOSED;
\r
6562 stream_.mode = UNINITIALIZED;
\r
6565 void RtApiPulse::callbackEvent( void )
\r
6567 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6569 if ( stream_.state == STREAM_STOPPED ) {
\r
6570 MUTEX_LOCK( &stream_.mutex );
\r
6571 while ( !pah->runnable )
\r
6572 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6574 if ( stream_.state != STREAM_RUNNING ) {
\r
6575 MUTEX_UNLOCK( &stream_.mutex );
\r
6578 MUTEX_UNLOCK( &stream_.mutex );
\r
6581 if ( stream_.state == STREAM_CLOSED ) {
\r
6582 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6583 "this shouldn't happen!";
\r
6584 error( RtAudioError::WARNING );
\r
6588 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6589 double streamTime = getStreamTime();
\r
6590 RtAudioStreamStatus status = 0;
\r
6591 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6592 stream_.bufferSize, streamTime, status,
\r
6593 stream_.callbackInfo.userData );
\r
6595 if ( doStopStream == 2 ) {
\r
6600 MUTEX_LOCK( &stream_.mutex );
\r
6601 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6602 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6604 if ( stream_.state != STREAM_RUNNING )
\r
6609 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6610 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6611 convertBuffer( stream_.deviceBuffer,
\r
6612 stream_.userBuffer[OUTPUT],
\r
6613 stream_.convertInfo[OUTPUT] );
\r
6614 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6615 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6617 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6618 formatBytes( stream_.userFormat );
\r
6620 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6621 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6622 pa_strerror( pa_error ) << ".";
\r
6623 errorText_ = errorStream_.str();
\r
6624 error( RtAudioError::WARNING );
\r
6628 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6629 if ( stream_.doConvertBuffer[INPUT] )
\r
6630 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6631 formatBytes( stream_.deviceFormat[INPUT] );
\r
6633 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6634 formatBytes( stream_.userFormat );
\r
6636 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6637 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6638 pa_strerror( pa_error ) << ".";
\r
6639 errorText_ = errorStream_.str();
\r
6640 error( RtAudioError::WARNING );
\r
6642 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6643 convertBuffer( stream_.userBuffer[INPUT],
\r
6644 stream_.deviceBuffer,
\r
6645 stream_.convertInfo[INPUT] );
\r
6650 MUTEX_UNLOCK( &stream_.mutex );
\r
6651 RtApi::tickStreamTime();
\r
6653 if ( doStopStream == 1 )
\r
6657 void RtApiPulse::startStream( void )
\r
6659 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6661 if ( stream_.state == STREAM_CLOSED ) {
\r
6662 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6663 error( RtAudioError::INVALID_USE );
\r
6666 if ( stream_.state == STREAM_RUNNING ) {
\r
6667 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6668 error( RtAudioError::WARNING );
\r
6672 MUTEX_LOCK( &stream_.mutex );
\r
6674 stream_.state = STREAM_RUNNING;
\r
6676 pah->runnable = true;
\r
6677 pthread_cond_signal( &pah->runnable_cv );
\r
6678 MUTEX_UNLOCK( &stream_.mutex );
\r
6681 void RtApiPulse::stopStream( void )
\r
6683 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6685 if ( stream_.state == STREAM_CLOSED ) {
\r
6686 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6687 error( RtAudioError::INVALID_USE );
\r
6690 if ( stream_.state == STREAM_STOPPED ) {
\r
6691 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6692 error( RtAudioError::WARNING );
\r
6696 stream_.state = STREAM_STOPPED;
\r
6697 MUTEX_LOCK( &stream_.mutex );
\r
6699 if ( pah && pah->s_play ) {
\r
6701 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6702 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6703 pa_strerror( pa_error ) << ".";
\r
6704 errorText_ = errorStream_.str();
\r
6705 MUTEX_UNLOCK( &stream_.mutex );
\r
6706 error( RtAudioError::SYSTEM_ERROR );
\r
6711 stream_.state = STREAM_STOPPED;
\r
6712 MUTEX_UNLOCK( &stream_.mutex );
\r
6715 void RtApiPulse::abortStream( void )
\r
6717 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6719 if ( stream_.state == STREAM_CLOSED ) {
\r
6720 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6721 error( RtAudioError::INVALID_USE );
\r
6724 if ( stream_.state == STREAM_STOPPED ) {
\r
6725 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6726 error( RtAudioError::WARNING );
\r
6730 stream_.state = STREAM_STOPPED;
\r
6731 MUTEX_LOCK( &stream_.mutex );
\r
6733 if ( pah && pah->s_play ) {
\r
6735 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6736 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6737 pa_strerror( pa_error ) << ".";
\r
6738 errorText_ = errorStream_.str();
\r
6739 MUTEX_UNLOCK( &stream_.mutex );
\r
6740 error( RtAudioError::SYSTEM_ERROR );
\r
6745 stream_.state = STREAM_STOPPED;
\r
6746 MUTEX_UNLOCK( &stream_.mutex );
\r
6749 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6750 unsigned int channels, unsigned int firstChannel,
\r
6751 unsigned int sampleRate, RtAudioFormat format,
\r
6752 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6754 PulseAudioHandle *pah = 0;
\r
6755 unsigned long bufferBytes = 0;
\r
6756 pa_sample_spec ss;
\r
6758 if ( device != 0 ) return false;
\r
6759 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6760 if ( channels != 1 && channels != 2 ) {
\r
6761 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6764 ss.channels = channels;
\r
6766 if ( firstChannel != 0 ) return false;
\r
6768 bool sr_found = false;
\r
6769 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6770 if ( sampleRate == *sr ) {
\r
6772 stream_.sampleRate = sampleRate;
\r
6773 ss.rate = sampleRate;
\r
6777 if ( !sr_found ) {
\r
6778 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6782 bool sf_found = 0;
\r
6783 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6784 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6785 if ( format == sf->rtaudio_format ) {
\r
6787 stream_.userFormat = sf->rtaudio_format;
\r
6788 ss.format = sf->pa_format;
\r
6792 if ( !sf_found ) {
\r
6793 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6797 // Set interleaving parameters.
\r
6798 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6799 else stream_.userInterleaved = true;
\r
6800 stream_.deviceInterleaved[mode] = true;
\r
6801 stream_.nBuffers = 1;
\r
6802 stream_.doByteSwap[mode] = false;
\r
6803 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6804 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6805 stream_.nUserChannels[mode] = channels;
\r
6806 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6807 stream_.channelOffset[mode] = 0;
\r
6808 std::string streamName = "RtAudio";
\r
6810 // Allocate necessary internal buffers.
\r
6811 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6812 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6813 if ( stream_.userBuffer[mode] == NULL ) {
\r
6814 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6817 stream_.bufferSize = *bufferSize;
\r
6819 if ( stream_.doConvertBuffer[mode] ) {
\r
6821 bool makeBuffer = true;
\r
6822 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6823 if ( mode == INPUT ) {
\r
6824 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6825 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6826 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6830 if ( makeBuffer ) {
\r
6831 bufferBytes *= *bufferSize;
\r
6832 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6833 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6834 if ( stream_.deviceBuffer == NULL ) {
\r
6835 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6841 stream_.device[mode] = device;
\r
6843 // Setup the buffer conversion information structure.
\r
6844 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6846 if ( !stream_.apiHandle ) {
\r
6847 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6849 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6853 stream_.apiHandle = pah;
\r
6854 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6855 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6859 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6862 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
6865 pa_buffer_attr buffer_attr;
\r
6866 buffer_attr.fragsize = bufferBytes;
\r
6867 buffer_attr.maxlength = -1;
\r
6869 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
6870 if ( !pah->s_rec ) {
\r
6871 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6876 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6877 if ( !pah->s_play ) {
\r
6878 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6886 if ( stream_.mode == UNINITIALIZED )
\r
6887 stream_.mode = mode;
\r
6888 else if ( stream_.mode == mode )
\r
6891 stream_.mode = DUPLEX;
\r
6893 if ( !stream_.callbackInfo.isRunning ) {
\r
6894 stream_.callbackInfo.object = this;
\r
6895 stream_.callbackInfo.isRunning = true;
\r
6896 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6897 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6902 stream_.state = STREAM_STOPPED;
\r
6906 if ( pah && stream_.callbackInfo.isRunning ) {
\r
6907 pthread_cond_destroy( &pah->runnable_cv );
\r
6909 stream_.apiHandle = 0;
\r
6912 for ( int i=0; i<2; i++ ) {
\r
6913 if ( stream_.userBuffer[i] ) {
\r
6914 free( stream_.userBuffer[i] );
\r
6915 stream_.userBuffer[i] = 0;
\r
6919 if ( stream_.deviceBuffer ) {
\r
6920 free( stream_.deviceBuffer );
\r
6921 stream_.deviceBuffer = 0;
\r
6927 //******************** End of __LINUX_PULSE__ *********************//
\r
6930 #if defined(__LINUX_OSS__)
\r
6932 #include <unistd.h>
\r
6933 #include <sys/ioctl.h>
\r
6934 #include <unistd.h>
\r
6935 #include <fcntl.h>
\r
6936 #include <sys/soundcard.h>
\r
6937 #include <errno.h>
\r
6940 static void *ossCallbackHandler(void * ptr);
\r
6942 // A structure to hold various information related to the OSS API
\r
6943 // implementation.
\r
6944 struct OssHandle {
\r
6945 int id[2]; // device ids
\r
6948 pthread_cond_t runnable;
\r
6951 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6954 RtApiOss :: RtApiOss()
\r
6956 // Nothing to do here.
\r
6959 RtApiOss :: ~RtApiOss()
\r
6961 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6964 unsigned int RtApiOss :: getDeviceCount( void )
\r
6966 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6967 if ( mixerfd == -1 ) {
\r
6968 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6969 error( RtAudioError::WARNING );
\r
6973 oss_sysinfo sysinfo;
\r
6974 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6976 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6977 error( RtAudioError::WARNING );
\r
6982 return sysinfo.numaudios;
\r
6985 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6987 RtAudio::DeviceInfo info;
\r
6988 info.probed = false;
\r
6990 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6991 if ( mixerfd == -1 ) {
\r
6992 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6993 error( RtAudioError::WARNING );
\r
6997 oss_sysinfo sysinfo;
\r
6998 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6999 if ( result == -1 ) {
\r
7001 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7002 error( RtAudioError::WARNING );
\r
7006 unsigned nDevices = sysinfo.numaudios;
\r
7007 if ( nDevices == 0 ) {
\r
7009 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
7010 error( RtAudioError::INVALID_USE );
\r
7014 if ( device >= nDevices ) {
\r
7016 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
7017 error( RtAudioError::INVALID_USE );
\r
7021 oss_audioinfo ainfo;
\r
7022 ainfo.dev = device;
\r
7023 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7025 if ( result == -1 ) {
\r
7026 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7027 errorText_ = errorStream_.str();
\r
7028 error( RtAudioError::WARNING );
\r
7033 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
7034 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
7035 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
7036 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
7037 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7040 // Probe data formats ... do for input
\r
7041 unsigned long mask = ainfo.iformats;
\r
7042 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
7043 info.nativeFormats |= RTAUDIO_SINT16;
\r
7044 if ( mask & AFMT_S8 )
\r
7045 info.nativeFormats |= RTAUDIO_SINT8;
\r
7046 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
7047 info.nativeFormats |= RTAUDIO_SINT32;
\r
7048 if ( mask & AFMT_FLOAT )
\r
7049 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7050 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
7051 info.nativeFormats |= RTAUDIO_SINT24;
\r
7053 // Check that we have at least one supported format
\r
7054 if ( info.nativeFormats == 0 ) {
\r
7055 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7056 errorText_ = errorStream_.str();
\r
7057 error( RtAudioError::WARNING );
\r
7061 // Probe the supported sample rates.
\r
7062 info.sampleRates.clear();
\r
7063 if ( ainfo.nrates ) {
\r
7064 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
7065 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7066 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
7067 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7074 // Check min and max rate values;
\r
7075 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7076 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
7077 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7081 if ( info.sampleRates.size() == 0 ) {
\r
7082 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
7083 errorText_ = errorStream_.str();
\r
7084 error( RtAudioError::WARNING );
\r
7087 info.probed = true;
\r
7088 info.name = ainfo.name;
\r
7095 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7096 unsigned int firstChannel, unsigned int sampleRate,
\r
7097 RtAudioFormat format, unsigned int *bufferSize,
\r
7098 RtAudio::StreamOptions *options )
\r
7100 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
7101 if ( mixerfd == -1 ) {
\r
7102 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
7106 oss_sysinfo sysinfo;
\r
7107 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
7108 if ( result == -1 ) {
\r
7110 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7114 unsigned nDevices = sysinfo.numaudios;
\r
7115 if ( nDevices == 0 ) {
\r
7116 // This should not happen because a check is made before this function is called.
\r
7118 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7122 if ( device >= nDevices ) {
\r
7123 // This should not happen because a check is made before this function is called.
\r
7125 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7129 oss_audioinfo ainfo;
\r
7130 ainfo.dev = device;
\r
7131 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7133 if ( result == -1 ) {
\r
7134 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7135 errorText_ = errorStream_.str();
\r
7139 // Check if device supports input or output
\r
7140 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7141 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7142 if ( mode == OUTPUT )
\r
7143 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7145 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7146 errorText_ = errorStream_.str();
\r
7151 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7152 if ( mode == OUTPUT )
\r
7153 flags |= O_WRONLY;
\r
7154 else { // mode == INPUT
\r
7155 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7156 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7157 close( handle->id[0] );
\r
7158 handle->id[0] = 0;
\r
7159 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7160 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7161 errorText_ = errorStream_.str();
\r
7164 // Check that the number previously set channels is the same.
\r
7165 if ( stream_.nUserChannels[0] != channels ) {
\r
7166 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7167 errorText_ = errorStream_.str();
\r
7173 flags |= O_RDONLY;
\r
7176 // Set exclusive access if specified.
\r
7177 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7179 // Try to open the device.
\r
7181 fd = open( ainfo.devnode, flags, 0 );
\r
7183 if ( errno == EBUSY )
\r
7184 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7186 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7187 errorText_ = errorStream_.str();
\r
7191 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7193 if ( flags | O_RDWR ) {
\r
7194 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7195 if ( result == -1) {
\r
7196 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7197 errorText_ = errorStream_.str();
\r
7203 // Check the device channel support.
\r
7204 stream_.nUserChannels[mode] = channels;
\r
7205 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7207 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7208 errorText_ = errorStream_.str();
\r
7212 // Set the number of channels.
\r
7213 int deviceChannels = channels + firstChannel;
\r
7214 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7215 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7217 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7218 errorText_ = errorStream_.str();
\r
7221 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7223 // Get the data format mask
\r
7225 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7226 if ( result == -1 ) {
\r
7228 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7229 errorText_ = errorStream_.str();
\r
7233 // Determine how to set the device format.
\r
7234 stream_.userFormat = format;
\r
7235 int deviceFormat = -1;
\r
7236 stream_.doByteSwap[mode] = false;
\r
7237 if ( format == RTAUDIO_SINT8 ) {
\r
7238 if ( mask & AFMT_S8 ) {
\r
7239 deviceFormat = AFMT_S8;
\r
7240 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7243 else if ( format == RTAUDIO_SINT16 ) {
\r
7244 if ( mask & AFMT_S16_NE ) {
\r
7245 deviceFormat = AFMT_S16_NE;
\r
7246 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7248 else if ( mask & AFMT_S16_OE ) {
\r
7249 deviceFormat = AFMT_S16_OE;
\r
7250 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7251 stream_.doByteSwap[mode] = true;
\r
7254 else if ( format == RTAUDIO_SINT24 ) {
\r
7255 if ( mask & AFMT_S24_NE ) {
\r
7256 deviceFormat = AFMT_S24_NE;
\r
7257 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7259 else if ( mask & AFMT_S24_OE ) {
\r
7260 deviceFormat = AFMT_S24_OE;
\r
7261 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7262 stream_.doByteSwap[mode] = true;
\r
7265 else if ( format == RTAUDIO_SINT32 ) {
\r
7266 if ( mask & AFMT_S32_NE ) {
\r
7267 deviceFormat = AFMT_S32_NE;
\r
7268 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7270 else if ( mask & AFMT_S32_OE ) {
\r
7271 deviceFormat = AFMT_S32_OE;
\r
7272 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7273 stream_.doByteSwap[mode] = true;
\r
7277 if ( deviceFormat == -1 ) {
\r
7278 // The user requested format is not natively supported by the device.
\r
7279 if ( mask & AFMT_S16_NE ) {
\r
7280 deviceFormat = AFMT_S16_NE;
\r
7281 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7283 else if ( mask & AFMT_S32_NE ) {
\r
7284 deviceFormat = AFMT_S32_NE;
\r
7285 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7287 else if ( mask & AFMT_S24_NE ) {
\r
7288 deviceFormat = AFMT_S24_NE;
\r
7289 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7291 else if ( mask & AFMT_S16_OE ) {
\r
7292 deviceFormat = AFMT_S16_OE;
\r
7293 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7294 stream_.doByteSwap[mode] = true;
\r
7296 else if ( mask & AFMT_S32_OE ) {
\r
7297 deviceFormat = AFMT_S32_OE;
\r
7298 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7299 stream_.doByteSwap[mode] = true;
\r
7301 else if ( mask & AFMT_S24_OE ) {
\r
7302 deviceFormat = AFMT_S24_OE;
\r
7303 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7304 stream_.doByteSwap[mode] = true;
\r
7306 else if ( mask & AFMT_S8) {
\r
7307 deviceFormat = AFMT_S8;
\r
7308 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7312 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7313 // This really shouldn't happen ...
\r
7315 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7316 errorText_ = errorStream_.str();
\r
7320 // Set the data format.
\r
7321 int temp = deviceFormat;
\r
7322 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7323 if ( result == -1 || deviceFormat != temp ) {
\r
7325 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7326 errorText_ = errorStream_.str();
\r
7330 // Attempt to set the buffer size. According to OSS, the minimum
\r
7331 // number of buffers is two. The supposed minimum buffer size is 16
\r
7332 // bytes, so that will be our lower bound. The argument to this
\r
7333 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7334 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7335 // We'll check the actual value used near the end of the setup
\r
7337 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7338 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7340 if ( options ) buffers = options->numberOfBuffers;
\r
7341 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7342 if ( buffers < 2 ) buffers = 3;
\r
7343 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7344 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7345 if ( result == -1 ) {
\r
7347 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7348 errorText_ = errorStream_.str();
\r
7351 stream_.nBuffers = buffers;
\r
7353 // Save buffer size (in sample frames).
\r
7354 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7355 stream_.bufferSize = *bufferSize;
\r
7357 // Set the sample rate.
\r
7358 int srate = sampleRate;
\r
7359 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7360 if ( result == -1 ) {
\r
7362 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7363 errorText_ = errorStream_.str();
\r
7367 // Verify the sample rate setup worked.
\r
7368 if ( abs( srate - sampleRate ) > 100 ) {
\r
7370 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7371 errorText_ = errorStream_.str();
\r
7374 stream_.sampleRate = sampleRate;
\r
7376 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7377 // We're doing duplex setup here.
\r
7378 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7379 stream_.nDeviceChannels[0] = deviceChannels;
\r
7382 // Set interleaving parameters.
\r
7383 stream_.userInterleaved = true;
\r
7384 stream_.deviceInterleaved[mode] = true;
\r
7385 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7386 stream_.userInterleaved = false;
\r
7388 // Set flags for buffer conversion
\r
7389 stream_.doConvertBuffer[mode] = false;
\r
7390 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7391 stream_.doConvertBuffer[mode] = true;
\r
7392 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7393 stream_.doConvertBuffer[mode] = true;
\r
7394 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7395 stream_.nUserChannels[mode] > 1 )
\r
7396 stream_.doConvertBuffer[mode] = true;
\r
7398 // Allocate the stream handles if necessary and then save.
\r
7399 if ( stream_.apiHandle == 0 ) {
\r
7401 handle = new OssHandle;
\r
7403 catch ( std::bad_alloc& ) {
\r
7404 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7408 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7409 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7413 stream_.apiHandle = (void *) handle;
\r
7416 handle = (OssHandle *) stream_.apiHandle;
\r
7418 handle->id[mode] = fd;
\r
7420 // Allocate necessary internal buffers.
\r
7421 unsigned long bufferBytes;
\r
7422 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7423 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7424 if ( stream_.userBuffer[mode] == NULL ) {
\r
7425 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7429 if ( stream_.doConvertBuffer[mode] ) {
\r
7431 bool makeBuffer = true;
\r
7432 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7433 if ( mode == INPUT ) {
\r
7434 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7435 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7436 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7440 if ( makeBuffer ) {
\r
7441 bufferBytes *= *bufferSize;
\r
7442 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7443 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7444 if ( stream_.deviceBuffer == NULL ) {
\r
7445 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7451 stream_.device[mode] = device;
\r
7452 stream_.state = STREAM_STOPPED;
\r
7454 // Setup the buffer conversion information structure.
\r
7455 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7457 // Setup thread if necessary.
\r
7458 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7459 // We had already set up an output stream.
\r
7460 stream_.mode = DUPLEX;
\r
7461 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7464 stream_.mode = mode;
\r
7466 // Setup callback thread.
\r
7467 stream_.callbackInfo.object = (void *) this;
\r
7469 // Set the thread attributes for joinable and realtime scheduling
\r
7470 // priority. The higher priority will only take affect if the
\r
7471 // program is run as root or suid.
\r
7472 pthread_attr_t attr;
\r
7473 pthread_attr_init( &attr );
\r
7474 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7475 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7476 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7477 struct sched_param param;
\r
7478 int priority = options->priority;
\r
7479 int min = sched_get_priority_min( SCHED_RR );
\r
7480 int max = sched_get_priority_max( SCHED_RR );
\r
7481 if ( priority < min ) priority = min;
\r
7482 else if ( priority > max ) priority = max;
\r
7483 param.sched_priority = priority;
\r
7484 pthread_attr_setschedparam( &attr, ¶m );
\r
7485 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7488 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7490 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7493 stream_.callbackInfo.isRunning = true;
\r
7494 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7495 pthread_attr_destroy( &attr );
\r
7497 stream_.callbackInfo.isRunning = false;
\r
7498 errorText_ = "RtApiOss::error creating callback thread!";
\r
7507 pthread_cond_destroy( &handle->runnable );
\r
7508 if ( handle->id[0] ) close( handle->id[0] );
\r
7509 if ( handle->id[1] ) close( handle->id[1] );
\r
7511 stream_.apiHandle = 0;
\r
7514 for ( int i=0; i<2; i++ ) {
\r
7515 if ( stream_.userBuffer[i] ) {
\r
7516 free( stream_.userBuffer[i] );
\r
7517 stream_.userBuffer[i] = 0;
\r
7521 if ( stream_.deviceBuffer ) {
\r
7522 free( stream_.deviceBuffer );
\r
7523 stream_.deviceBuffer = 0;
\r
7529 void RtApiOss :: closeStream()
\r
7531 if ( stream_.state == STREAM_CLOSED ) {
\r
7532 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7533 error( RtAudioError::WARNING );
\r
7537 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7538 stream_.callbackInfo.isRunning = false;
\r
7539 MUTEX_LOCK( &stream_.mutex );
\r
7540 if ( stream_.state == STREAM_STOPPED )
\r
7541 pthread_cond_signal( &handle->runnable );
\r
7542 MUTEX_UNLOCK( &stream_.mutex );
\r
7543 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7545 if ( stream_.state == STREAM_RUNNING ) {
\r
7546 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7547 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7549 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7550 stream_.state = STREAM_STOPPED;
\r
7554 pthread_cond_destroy( &handle->runnable );
\r
7555 if ( handle->id[0] ) close( handle->id[0] );
\r
7556 if ( handle->id[1] ) close( handle->id[1] );
\r
7558 stream_.apiHandle = 0;
\r
7561 for ( int i=0; i<2; i++ ) {
\r
7562 if ( stream_.userBuffer[i] ) {
\r
7563 free( stream_.userBuffer[i] );
\r
7564 stream_.userBuffer[i] = 0;
\r
7568 if ( stream_.deviceBuffer ) {
\r
7569 free( stream_.deviceBuffer );
\r
7570 stream_.deviceBuffer = 0;
\r
7573 stream_.mode = UNINITIALIZED;
\r
7574 stream_.state = STREAM_CLOSED;
\r
7577 void RtApiOss :: startStream()
\r
7580 if ( stream_.state == STREAM_RUNNING ) {
\r
7581 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7582 error( RtAudioError::WARNING );
\r
7586 MUTEX_LOCK( &stream_.mutex );
\r
7588 stream_.state = STREAM_RUNNING;
\r
7590 // No need to do anything else here ... OSS automatically starts
\r
7591 // when fed samples.
\r
7593 MUTEX_UNLOCK( &stream_.mutex );
\r
7595 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7596 pthread_cond_signal( &handle->runnable );
\r
7599 void RtApiOss :: stopStream()
\r
7602 if ( stream_.state == STREAM_STOPPED ) {
\r
7603 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7604 error( RtAudioError::WARNING );
\r
7608 MUTEX_LOCK( &stream_.mutex );
\r
7610 // The state might change while waiting on a mutex.
\r
7611 if ( stream_.state == STREAM_STOPPED ) {
\r
7612 MUTEX_UNLOCK( &stream_.mutex );
\r
7617 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7618 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7620 // Flush the output with zeros a few times.
\r
7623 RtAudioFormat format;
\r
7625 if ( stream_.doConvertBuffer[0] ) {
\r
7626 buffer = stream_.deviceBuffer;
\r
7627 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7628 format = stream_.deviceFormat[0];
\r
7631 buffer = stream_.userBuffer[0];
\r
7632 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7633 format = stream_.userFormat;
\r
7636 memset( buffer, 0, samples * formatBytes(format) );
\r
7637 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7638 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7639 if ( result == -1 ) {
\r
7640 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7641 error( RtAudioError::WARNING );
\r
7645 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7646 if ( result == -1 ) {
\r
7647 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7648 errorText_ = errorStream_.str();
\r
7651 handle->triggered = false;
\r
7654 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7655 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7656 if ( result == -1 ) {
\r
7657 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7658 errorText_ = errorStream_.str();
\r
7664 stream_.state = STREAM_STOPPED;
\r
7665 MUTEX_UNLOCK( &stream_.mutex );
\r
7667 if ( result != -1 ) return;
\r
7668 error( RtAudioError::SYSTEM_ERROR );
\r
7671 void RtApiOss :: abortStream()
\r
7674 if ( stream_.state == STREAM_STOPPED ) {
\r
7675 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7676 error( RtAudioError::WARNING );
\r
7680 MUTEX_LOCK( &stream_.mutex );
\r
7682 // The state might change while waiting on a mutex.
\r
7683 if ( stream_.state == STREAM_STOPPED ) {
\r
7684 MUTEX_UNLOCK( &stream_.mutex );
\r
7689 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7690 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7691 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7692 if ( result == -1 ) {
\r
7693 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7694 errorText_ = errorStream_.str();
\r
7697 handle->triggered = false;
\r
7700 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7701 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7702 if ( result == -1 ) {
\r
7703 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7704 errorText_ = errorStream_.str();
\r
7710 stream_.state = STREAM_STOPPED;
\r
7711 MUTEX_UNLOCK( &stream_.mutex );
\r
7713 if ( result != -1 ) return;
\r
7714 error( RtAudioError::SYSTEM_ERROR );
\r
7717 void RtApiOss :: callbackEvent()
\r
7719 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7720 if ( stream_.state == STREAM_STOPPED ) {
\r
7721 MUTEX_LOCK( &stream_.mutex );
\r
7722 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7723 if ( stream_.state != STREAM_RUNNING ) {
\r
7724 MUTEX_UNLOCK( &stream_.mutex );
\r
7727 MUTEX_UNLOCK( &stream_.mutex );
\r
7730 if ( stream_.state == STREAM_CLOSED ) {
\r
7731 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7732 error( RtAudioError::WARNING );
\r
7736 // Invoke user callback to get fresh output data.
\r
7737 int doStopStream = 0;
\r
7738 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7739 double streamTime = getStreamTime();
\r
7740 RtAudioStreamStatus status = 0;
\r
7741 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7742 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7743 handle->xrun[0] = false;
\r
7745 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7746 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7747 handle->xrun[1] = false;
\r
7749 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7750 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7751 if ( doStopStream == 2 ) {
\r
7752 this->abortStream();
\r
7756 MUTEX_LOCK( &stream_.mutex );
\r
7758 // The state might change while waiting on a mutex.
\r
7759 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7764 RtAudioFormat format;
\r
7766 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7768 // Setup parameters and do buffer conversion if necessary.
\r
7769 if ( stream_.doConvertBuffer[0] ) {
\r
7770 buffer = stream_.deviceBuffer;
\r
7771 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7772 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7773 format = stream_.deviceFormat[0];
\r
7776 buffer = stream_.userBuffer[0];
\r
7777 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7778 format = stream_.userFormat;
\r
7781 // Do byte swapping if necessary.
\r
7782 if ( stream_.doByteSwap[0] )
\r
7783 byteSwapBuffer( buffer, samples, format );
\r
7785 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7787 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7788 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7789 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7790 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7791 handle->triggered = true;
\r
7794 // Write samples to device.
\r
7795 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7797 if ( result == -1 ) {
\r
7798 // We'll assume this is an underrun, though there isn't a
\r
7799 // specific means for determining that.
\r
7800 handle->xrun[0] = true;
\r
7801 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7802 error( RtAudioError::WARNING );
\r
7803 // Continue on to input section.
\r
7807 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7809 // Setup parameters.
\r
7810 if ( stream_.doConvertBuffer[1] ) {
\r
7811 buffer = stream_.deviceBuffer;
\r
7812 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7813 format = stream_.deviceFormat[1];
\r
7816 buffer = stream_.userBuffer[1];
\r
7817 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7818 format = stream_.userFormat;
\r
7821 // Read samples from device.
\r
7822 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7824 if ( result == -1 ) {
\r
7825 // We'll assume this is an overrun, though there isn't a
\r
7826 // specific means for determining that.
\r
7827 handle->xrun[1] = true;
\r
7828 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7829 error( RtAudioError::WARNING );
\r
7833 // Do byte swapping if necessary.
\r
7834 if ( stream_.doByteSwap[1] )
\r
7835 byteSwapBuffer( buffer, samples, format );
\r
7837 // Do buffer conversion if necessary.
\r
7838 if ( stream_.doConvertBuffer[1] )
\r
7839 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7843 MUTEX_UNLOCK( &stream_.mutex );
\r
7845 RtApi::tickStreamTime();
\r
7846 if ( doStopStream == 1 ) this->stopStream();
\r
7849 static void *ossCallbackHandler( void *ptr )
\r
7851 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7852 RtApiOss *object = (RtApiOss *) info->object;
\r
7853 bool *isRunning = &info->isRunning;
\r
7855 while ( *isRunning == true ) {
\r
7856 pthread_testcancel();
\r
7857 object->callbackEvent();
\r
7860 pthread_exit( NULL );
\r
7863 //******************** End of __LINUX_OSS__ *********************//
\r
7867 // *************************************************** //
\r
7869 // Protected common (OS-independent) RtAudio methods.
\r
7871 // *************************************************** //
\r
7873 // This method can be modified to control the behavior of error
\r
7874 // message printing.
\r
7875 void RtApi :: error( RtAudioError::Type type )
\r
7877 errorStream_.str(""); // clear the ostringstream
\r
7879 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
7880 if ( errorCallback ) {
\r
7881 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
7883 if ( firstErrorOccurred_ )
\r
7886 firstErrorOccurred_ = true;
\r
7887 const std::string errorMessage = errorText_;
\r
7889 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
7890 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
7894 errorCallback( type, errorMessage );
\r
7895 firstErrorOccurred_ = false;
\r
7899 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
7900 std::cerr << '\n' << errorText_ << "\n\n";
\r
7901 else if ( type != RtAudioError::WARNING )
\r
7902 throw( RtAudioError( errorText_, type ) );
\r
7905 void RtApi :: verifyStream()
\r
7907 if ( stream_.state == STREAM_CLOSED ) {
\r
7908 errorText_ = "RtApi:: a stream is not open!";
\r
7909 error( RtAudioError::INVALID_USE );
\r
7913 void RtApi :: clearStreamInfo()
\r
7915 stream_.mode = UNINITIALIZED;
\r
7916 stream_.state = STREAM_CLOSED;
\r
7917 stream_.sampleRate = 0;
\r
7918 stream_.bufferSize = 0;
\r
7919 stream_.nBuffers = 0;
\r
7920 stream_.userFormat = 0;
\r
7921 stream_.userInterleaved = true;
\r
7922 stream_.streamTime = 0.0;
\r
7923 stream_.apiHandle = 0;
\r
7924 stream_.deviceBuffer = 0;
\r
7925 stream_.callbackInfo.callback = 0;
\r
7926 stream_.callbackInfo.userData = 0;
\r
7927 stream_.callbackInfo.isRunning = false;
\r
7928 stream_.callbackInfo.errorCallback = 0;
\r
7929 for ( int i=0; i<2; i++ ) {
\r
7930 stream_.device[i] = 11111;
\r
7931 stream_.doConvertBuffer[i] = false;
\r
7932 stream_.deviceInterleaved[i] = true;
\r
7933 stream_.doByteSwap[i] = false;
\r
7934 stream_.nUserChannels[i] = 0;
\r
7935 stream_.nDeviceChannels[i] = 0;
\r
7936 stream_.channelOffset[i] = 0;
\r
7937 stream_.deviceFormat[i] = 0;
\r
7938 stream_.latency[i] = 0;
\r
7939 stream_.userBuffer[i] = 0;
\r
7940 stream_.convertInfo[i].channels = 0;
\r
7941 stream_.convertInfo[i].inJump = 0;
\r
7942 stream_.convertInfo[i].outJump = 0;
\r
7943 stream_.convertInfo[i].inFormat = 0;
\r
7944 stream_.convertInfo[i].outFormat = 0;
\r
7945 stream_.convertInfo[i].inOffset.clear();
\r
7946 stream_.convertInfo[i].outOffset.clear();
\r
7950 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7952 if ( format == RTAUDIO_SINT16 )
\r
7954 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7956 else if ( format == RTAUDIO_FLOAT64 )
\r
7958 else if ( format == RTAUDIO_SINT24 )
\r
7960 else if ( format == RTAUDIO_SINT8 )
\r
7963 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7964 error( RtAudioError::WARNING );
\r
7969 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7971 if ( mode == INPUT ) { // convert device to user buffer
\r
7972 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7973 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7974 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7975 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7977 else { // convert user to device buffer
\r
7978 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7979 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7980 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7981 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7984 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7985 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7987 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7989 // Set up the interleave/deinterleave offsets.
\r
7990 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7991 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7992 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7993 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7994 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7995 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7996 stream_.convertInfo[mode].inJump = 1;
\r
8000 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8001 stream_.convertInfo[mode].inOffset.push_back( k );
\r
8002 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
8003 stream_.convertInfo[mode].outJump = 1;
\r
8007 else { // no (de)interleaving
\r
8008 if ( stream_.userInterleaved ) {
\r
8009 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8010 stream_.convertInfo[mode].inOffset.push_back( k );
\r
8011 stream_.convertInfo[mode].outOffset.push_back( k );
\r
8015 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8016 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
8017 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
8018 stream_.convertInfo[mode].inJump = 1;
\r
8019 stream_.convertInfo[mode].outJump = 1;
\r
8024 // Add channel offset.
\r
8025 if ( firstChannel > 0 ) {
\r
8026 if ( stream_.deviceInterleaved[mode] ) {
\r
8027 if ( mode == OUTPUT ) {
\r
8028 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8029 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
8032 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8033 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
8037 if ( mode == OUTPUT ) {
\r
8038 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8039 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8042 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8043 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8049 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
8051 // This function does format conversion, input/output channel compensation, and
\r
8052 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
8053 // the lower three bytes of a 32-bit integer.
\r
8055 // Clear our device buffer when in/out duplex device channels are different
\r
8056 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
8057 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
8058 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
8061 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
8063 Float64 *out = (Float64 *)outBuffer;
\r
8065 if (info.inFormat == RTAUDIO_SINT8) {
\r
8066 signed char *in = (signed char *)inBuffer;
\r
8067 scale = 1.0 / 127.5;
\r
8068 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8069 for (j=0; j<info.channels; j++) {
\r
8070 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8071 out[info.outOffset[j]] += 0.5;
\r
8072 out[info.outOffset[j]] *= scale;
\r
8074 in += info.inJump;
\r
8075 out += info.outJump;
\r
8078 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8079 Int16 *in = (Int16 *)inBuffer;
\r
8080 scale = 1.0 / 32767.5;
\r
8081 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8082 for (j=0; j<info.channels; j++) {
\r
8083 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8084 out[info.outOffset[j]] += 0.5;
\r
8085 out[info.outOffset[j]] *= scale;
\r
8087 in += info.inJump;
\r
8088 out += info.outJump;
\r
8091 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8092 Int24 *in = (Int24 *)inBuffer;
\r
8093 scale = 1.0 / 8388607.5;
\r
8094 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8095 for (j=0; j<info.channels; j++) {
\r
8096 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
8097 out[info.outOffset[j]] += 0.5;
\r
8098 out[info.outOffset[j]] *= scale;
\r
8100 in += info.inJump;
\r
8101 out += info.outJump;
\r
8104 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8105 Int32 *in = (Int32 *)inBuffer;
\r
8106 scale = 1.0 / 2147483647.5;
\r
8107 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8108 for (j=0; j<info.channels; j++) {
\r
8109 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8110 out[info.outOffset[j]] += 0.5;
\r
8111 out[info.outOffset[j]] *= scale;
\r
8113 in += info.inJump;
\r
8114 out += info.outJump;
\r
8117 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8118 Float32 *in = (Float32 *)inBuffer;
\r
8119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8120 for (j=0; j<info.channels; j++) {
\r
8121 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8123 in += info.inJump;
\r
8124 out += info.outJump;
\r
8127 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8128 // Channel compensation and/or (de)interleaving only.
\r
8129 Float64 *in = (Float64 *)inBuffer;
\r
8130 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8131 for (j=0; j<info.channels; j++) {
\r
8132 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8134 in += info.inJump;
\r
8135 out += info.outJump;
\r
8139 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8141 Float32 *out = (Float32 *)outBuffer;
\r
8143 if (info.inFormat == RTAUDIO_SINT8) {
\r
8144 signed char *in = (signed char *)inBuffer;
\r
8145 scale = (Float32) ( 1.0 / 127.5 );
\r
8146 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8147 for (j=0; j<info.channels; j++) {
\r
8148 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8149 out[info.outOffset[j]] += 0.5;
\r
8150 out[info.outOffset[j]] *= scale;
\r
8152 in += info.inJump;
\r
8153 out += info.outJump;
\r
8156 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8157 Int16 *in = (Int16 *)inBuffer;
\r
8158 scale = (Float32) ( 1.0 / 32767.5 );
\r
8159 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8160 for (j=0; j<info.channels; j++) {
\r
8161 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8162 out[info.outOffset[j]] += 0.5;
\r
8163 out[info.outOffset[j]] *= scale;
\r
8165 in += info.inJump;
\r
8166 out += info.outJump;
\r
8169 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8170 Int24 *in = (Int24 *)inBuffer;
\r
8171 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8172 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8173 for (j=0; j<info.channels; j++) {
\r
8174 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8175 out[info.outOffset[j]] += 0.5;
\r
8176 out[info.outOffset[j]] *= scale;
\r
8178 in += info.inJump;
\r
8179 out += info.outJump;
\r
8182 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8183 Int32 *in = (Int32 *)inBuffer;
\r
8184 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8186 for (j=0; j<info.channels; j++) {
\r
8187 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8188 out[info.outOffset[j]] += 0.5;
\r
8189 out[info.outOffset[j]] *= scale;
\r
8191 in += info.inJump;
\r
8192 out += info.outJump;
\r
8195 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8196 // Channel compensation and/or (de)interleaving only.
\r
8197 Float32 *in = (Float32 *)inBuffer;
\r
8198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8199 for (j=0; j<info.channels; j++) {
\r
8200 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8202 in += info.inJump;
\r
8203 out += info.outJump;
\r
8206 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8207 Float64 *in = (Float64 *)inBuffer;
\r
8208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8209 for (j=0; j<info.channels; j++) {
\r
8210 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8212 in += info.inJump;
\r
8213 out += info.outJump;
\r
8217 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8218 Int32 *out = (Int32 *)outBuffer;
\r
8219 if (info.inFormat == RTAUDIO_SINT8) {
\r
8220 signed char *in = (signed char *)inBuffer;
\r
8221 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8222 for (j=0; j<info.channels; j++) {
\r
8223 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8224 out[info.outOffset[j]] <<= 24;
\r
8226 in += info.inJump;
\r
8227 out += info.outJump;
\r
8230 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8231 Int16 *in = (Int16 *)inBuffer;
\r
8232 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8233 for (j=0; j<info.channels; j++) {
\r
8234 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8235 out[info.outOffset[j]] <<= 16;
\r
8237 in += info.inJump;
\r
8238 out += info.outJump;
\r
8241 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8242 Int24 *in = (Int24 *)inBuffer;
\r
8243 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8244 for (j=0; j<info.channels; j++) {
\r
8245 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8246 out[info.outOffset[j]] <<= 8;
\r
8248 in += info.inJump;
\r
8249 out += info.outJump;
\r
8252 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8253 // Channel compensation and/or (de)interleaving only.
\r
8254 Int32 *in = (Int32 *)inBuffer;
\r
8255 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8256 for (j=0; j<info.channels; j++) {
\r
8257 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8259 in += info.inJump;
\r
8260 out += info.outJump;
\r
8263 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8264 Float32 *in = (Float32 *)inBuffer;
\r
8265 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8266 for (j=0; j<info.channels; j++) {
\r
8267 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8269 in += info.inJump;
\r
8270 out += info.outJump;
\r
8273 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8274 Float64 *in = (Float64 *)inBuffer;
\r
8275 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8276 for (j=0; j<info.channels; j++) {
\r
8277 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8279 in += info.inJump;
\r
8280 out += info.outJump;
\r
8284 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8285 Int24 *out = (Int24 *)outBuffer;
\r
8286 if (info.inFormat == RTAUDIO_SINT8) {
\r
8287 signed char *in = (signed char *)inBuffer;
\r
8288 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8289 for (j=0; j<info.channels; j++) {
\r
8290 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8291 //out[info.outOffset[j]] <<= 16;
\r
8293 in += info.inJump;
\r
8294 out += info.outJump;
\r
8297 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8298 Int16 *in = (Int16 *)inBuffer;
\r
8299 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8300 for (j=0; j<info.channels; j++) {
\r
8301 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8302 //out[info.outOffset[j]] <<= 8;
\r
8304 in += info.inJump;
\r
8305 out += info.outJump;
\r
8308 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8309 // Channel compensation and/or (de)interleaving only.
\r
8310 Int24 *in = (Int24 *)inBuffer;
\r
8311 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8312 for (j=0; j<info.channels; j++) {
\r
8313 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8315 in += info.inJump;
\r
8316 out += info.outJump;
\r
8319 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8320 Int32 *in = (Int32 *)inBuffer;
\r
8321 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8322 for (j=0; j<info.channels; j++) {
\r
8323 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8324 //out[info.outOffset[j]] >>= 8;
\r
8326 in += info.inJump;
\r
8327 out += info.outJump;
\r
8330 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8331 Float32 *in = (Float32 *)inBuffer;
\r
8332 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8333 for (j=0; j<info.channels; j++) {
\r
8334 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8336 in += info.inJump;
\r
8337 out += info.outJump;
\r
8340 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8341 Float64 *in = (Float64 *)inBuffer;
\r
8342 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8343 for (j=0; j<info.channels; j++) {
\r
8344 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8346 in += info.inJump;
\r
8347 out += info.outJump;
\r
8351 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8352 Int16 *out = (Int16 *)outBuffer;
\r
8353 if (info.inFormat == RTAUDIO_SINT8) {
\r
8354 signed char *in = (signed char *)inBuffer;
\r
8355 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8356 for (j=0; j<info.channels; j++) {
\r
8357 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8358 out[info.outOffset[j]] <<= 8;
\r
8360 in += info.inJump;
\r
8361 out += info.outJump;
\r
8364 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8365 // Channel compensation and/or (de)interleaving only.
\r
8366 Int16 *in = (Int16 *)inBuffer;
\r
8367 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8368 for (j=0; j<info.channels; j++) {
\r
8369 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8371 in += info.inJump;
\r
8372 out += info.outJump;
\r
8375 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8376 Int24 *in = (Int24 *)inBuffer;
\r
8377 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8378 for (j=0; j<info.channels; j++) {
\r
8379 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8381 in += info.inJump;
\r
8382 out += info.outJump;
\r
8385 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8386 Int32 *in = (Int32 *)inBuffer;
\r
8387 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8388 for (j=0; j<info.channels; j++) {
\r
8389 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8391 in += info.inJump;
\r
8392 out += info.outJump;
\r
8395 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8396 Float32 *in = (Float32 *)inBuffer;
\r
8397 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8398 for (j=0; j<info.channels; j++) {
\r
8399 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8401 in += info.inJump;
\r
8402 out += info.outJump;
\r
8405 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8406 Float64 *in = (Float64 *)inBuffer;
\r
8407 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8408 for (j=0; j<info.channels; j++) {
\r
8409 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8411 in += info.inJump;
\r
8412 out += info.outJump;
\r
8416 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8417 signed char *out = (signed char *)outBuffer;
\r
8418 if (info.inFormat == RTAUDIO_SINT8) {
\r
8419 // Channel compensation and/or (de)interleaving only.
\r
8420 signed char *in = (signed char *)inBuffer;
\r
8421 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8422 for (j=0; j<info.channels; j++) {
\r
8423 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8425 in += info.inJump;
\r
8426 out += info.outJump;
\r
8429 if (info.inFormat == RTAUDIO_SINT16) {
\r
8430 Int16 *in = (Int16 *)inBuffer;
\r
8431 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8432 for (j=0; j<info.channels; j++) {
\r
8433 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8435 in += info.inJump;
\r
8436 out += info.outJump;
\r
8439 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8440 Int24 *in = (Int24 *)inBuffer;
\r
8441 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8442 for (j=0; j<info.channels; j++) {
\r
8443 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8445 in += info.inJump;
\r
8446 out += info.outJump;
\r
8449 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8450 Int32 *in = (Int32 *)inBuffer;
\r
8451 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8452 for (j=0; j<info.channels; j++) {
\r
8453 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8455 in += info.inJump;
\r
8456 out += info.outJump;
\r
8459 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8460 Float32 *in = (Float32 *)inBuffer;
\r
8461 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8462 for (j=0; j<info.channels; j++) {
\r
8463 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8465 in += info.inJump;
\r
8466 out += info.outJump;
\r
8469 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8470 Float64 *in = (Float64 *)inBuffer;
\r
8471 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8472 for (j=0; j<info.channels; j++) {
\r
8473 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8475 in += info.inJump;
\r
8476 out += info.outJump;
\r
8482 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8483 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8484 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8486 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8488 register char val;
\r
8489 register char *ptr;
\r
8492 if ( format == RTAUDIO_SINT16 ) {
\r
8493 for ( unsigned int i=0; i<samples; i++ ) {
\r
8494 // Swap 1st and 2nd bytes.
\r
8496 *(ptr) = *(ptr+1);
\r
8499 // Increment 2 bytes.
\r
8503 else if ( format == RTAUDIO_SINT32 ||
\r
8504 format == RTAUDIO_FLOAT32 ) {
\r
8505 for ( unsigned int i=0; i<samples; i++ ) {
\r
8506 // Swap 1st and 4th bytes.
\r
8508 *(ptr) = *(ptr+3);
\r
8511 // Swap 2nd and 3rd bytes.
\r
8514 *(ptr) = *(ptr+1);
\r
8517 // Increment 3 more bytes.
\r
8521 else if ( format == RTAUDIO_SINT24 ) {
\r
8522 for ( unsigned int i=0; i<samples; i++ ) {
\r
8523 // Swap 1st and 3rd bytes.
\r
8525 *(ptr) = *(ptr+2);
\r
8528 // Increment 2 more bytes.
\r
8532 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8533 for ( unsigned int i=0; i<samples; i++ ) {
\r
8534 // Swap 1st and 8th bytes
\r
8536 *(ptr) = *(ptr+7);
\r
8539 // Swap 2nd and 7th bytes
\r
8542 *(ptr) = *(ptr+5);
\r
8545 // Swap 3rd and 6th bytes
\r
8548 *(ptr) = *(ptr+3);
\r
8551 // Swap 4th and 5th bytes
\r
8554 *(ptr) = *(ptr+1);
\r
8557 // Increment 5 more bytes.
\r
8563 // Indentation settings for Vim and Emacs
\r
8565 // Local Variables:
\r
8566 // c-basic-offset: 2
\r
8567 // indent-tabs-mode: nil
\r
8570 // vim: et sts=2 sw=2
\r