1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return std::string( RTAUDIO_VERSION );
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_DS__)
\r
105 apis.push_back( WINDOWS_DS );
\r
107 #if defined(__MACOSX_CORE__)
\r
108 apis.push_back( MACOSX_CORE );
\r
110 #if defined(__RTAUDIO_DUMMY__)
\r
111 apis.push_back( RTAUDIO_DUMMY );
\r
115 void RtAudio :: openRtApi( RtAudio::Api api )
\r
121 #if defined(__UNIX_JACK__)
\r
122 if ( api == UNIX_JACK )
\r
123 rtapi_ = new RtApiJack();
\r
125 #if defined(__LINUX_ALSA__)
\r
126 if ( api == LINUX_ALSA )
\r
127 rtapi_ = new RtApiAlsa();
\r
129 #if defined(__LINUX_PULSE__)
\r
130 if ( api == LINUX_PULSE )
\r
131 rtapi_ = new RtApiPulse();
\r
133 #if defined(__LINUX_OSS__)
\r
134 if ( api == LINUX_OSS )
\r
135 rtapi_ = new RtApiOss();
\r
137 #if defined(__WINDOWS_ASIO__)
\r
138 if ( api == WINDOWS_ASIO )
\r
139 rtapi_ = new RtApiAsio();
\r
141 #if defined(__WINDOWS_DS__)
\r
142 if ( api == WINDOWS_DS )
\r
143 rtapi_ = new RtApiDs();
\r
145 #if defined(__MACOSX_CORE__)
\r
146 if ( api == MACOSX_CORE )
\r
147 rtapi_ = new RtApiCore();
\r
149 #if defined(__RTAUDIO_DUMMY__)
\r
150 if ( api == RTAUDIO_DUMMY )
\r
151 rtapi_ = new RtApiDummy();
\r
155 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
159 if ( api != UNSPECIFIED ) {
\r
160 // Attempt to open the specified API.
\r
162 if ( rtapi_ ) return;
\r
164 // No compiled support for specified API value. Issue a debug
\r
165 // warning and continue as if no API was specified.
\r
166 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
169 // Iterate through the compiled APIs and return as soon as we find
\r
170 // one with at least one device or we reach the end of the list.
\r
171 std::vector< RtAudio::Api > apis;
\r
172 getCompiledApi( apis );
\r
173 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
174 openRtApi( apis[i] );
\r
175 if ( rtapi_->getDeviceCount() ) break;
\r
178 if ( rtapi_ ) return;
\r
180 // It should not be possible to get here because the preprocessor
\r
181 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
182 // API-specific definitions are passed to the compiler. But just in
\r
183 // case something weird happens, we'll print out an error message.
\r
184 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
185 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
188 RtAudio :: ~RtAudio() throw()
\r
193 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
194 RtAudio::StreamParameters *inputParameters,
\r
195 RtAudioFormat format, unsigned int sampleRate,
\r
196 unsigned int *bufferFrames,
\r
197 RtAudioCallback callback, void *userData,
\r
198 RtAudio::StreamOptions *options,
\r
199 RtAudioErrorCallback errorCallback )
\r
201 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
202 sampleRate, bufferFrames, callback,
\r
203 userData, options, errorCallback );
\r
206 // *************************************************** //
\r
208 // Public RtApi definitions (see end of file for
\r
209 // private or protected utility functions).
\r
211 // *************************************************** //
\r
215 stream_.state = STREAM_CLOSED;
\r
216 stream_.mode = UNINITIALIZED;
\r
217 stream_.apiHandle = 0;
\r
218 stream_.userBuffer[0] = 0;
\r
219 stream_.userBuffer[1] = 0;
\r
220 MUTEX_INITIALIZE( &stream_.mutex );
\r
221 showWarnings_ = true;
\r
222 firstErrorOccurred_ = false;
\r
227 MUTEX_DESTROY( &stream_.mutex );
\r
230 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
231 RtAudio::StreamParameters *iParams,
\r
232 RtAudioFormat format, unsigned int sampleRate,
\r
233 unsigned int *bufferFrames,
\r
234 RtAudioCallback callback, void *userData,
\r
235 RtAudio::StreamOptions *options,
\r
236 RtAudioErrorCallback errorCallback )
\r
238 if ( stream_.state != STREAM_CLOSED ) {
\r
239 errorText_ = "RtApi::openStream: a stream is already open!";
\r
240 error( RtAudioError::INVALID_USE );
\r
244 if ( oParams && oParams->nChannels < 1 ) {
\r
245 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
246 error( RtAudioError::INVALID_USE );
\r
250 if ( iParams && iParams->nChannels < 1 ) {
\r
251 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
252 error( RtAudioError::INVALID_USE );
\r
256 if ( oParams == NULL && iParams == NULL ) {
\r
257 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
258 error( RtAudioError::INVALID_USE );
\r
262 if ( formatBytes(format) == 0 ) {
\r
263 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 unsigned int nDevices = getDeviceCount();
\r
269 unsigned int oChannels = 0;
\r
271 oChannels = oParams->nChannels;
\r
272 if ( oParams->deviceId >= nDevices ) {
\r
273 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
274 error( RtAudioError::INVALID_USE );
\r
279 unsigned int iChannels = 0;
\r
281 iChannels = iParams->nChannels;
\r
282 if ( iParams->deviceId >= nDevices ) {
\r
283 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
284 error( RtAudioError::INVALID_USE );
\r
292 if ( oChannels > 0 ) {
\r
294 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
295 sampleRate, format, bufferFrames, options );
\r
296 if ( result == false ) {
\r
297 error( RtAudioError::SYSTEM_ERROR );
\r
302 if ( iChannels > 0 ) {
\r
304 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 if ( oChannels > 0 ) closeStream();
\r
308 error( RtAudioError::SYSTEM_ERROR );
\r
313 stream_.callbackInfo.callback = (void *) callback;
\r
314 stream_.callbackInfo.userData = userData;
\r
315 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
317 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
318 stream_.state = STREAM_STOPPED;
\r
321 unsigned int RtApi :: getDefaultInputDevice( void )
\r
323 // Should be implemented in subclasses if possible.
\r
327 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
329 // Should be implemented in subclasses if possible.
\r
333 void RtApi :: closeStream( void )
\r
335 // MUST be implemented in subclasses!
\r
339 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
340 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
341 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
342 RtAudio::StreamOptions * /*options*/ )
\r
344 // MUST be implemented in subclasses!
\r
348 void RtApi :: tickStreamTime( void )
\r
350 // Subclasses that do not provide their own implementation of
\r
351 // getStreamTime should call this function once per buffer I/O to
\r
352 // provide basic stream time support.
\r
354 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
356 #if defined( HAVE_GETTIMEOFDAY )
\r
357 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
361 long RtApi :: getStreamLatency( void )
\r
365 long totalLatency = 0;
\r
366 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
367 totalLatency = stream_.latency[0];
\r
368 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
369 totalLatency += stream_.latency[1];
\r
371 return totalLatency;
\r
374 double RtApi :: getStreamTime( void )
\r
378 #if defined( HAVE_GETTIMEOFDAY )
\r
379 // Return a very accurate estimate of the stream time by
\r
380 // adding in the elapsed time since the last tick.
\r
381 struct timeval then;
\r
382 struct timeval now;
\r
384 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
385 return stream_.streamTime;
\r
387 gettimeofday( &now, NULL );
\r
388 then = stream_.lastTickTimestamp;
\r
389 return stream_.streamTime +
\r
390 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
391 (then.tv_sec + 0.000001 * then.tv_usec));
\r
393 return stream_.streamTime;
\r
397 unsigned int RtApi :: getStreamSampleRate( void )
\r
401 return stream_.sampleRate;
\r
405 // *************************************************** //
\r
407 // OS/API-specific methods.
\r
409 // *************************************************** //
\r
411 #if defined(__MACOSX_CORE__)
\r
413 // The OS X CoreAudio API is designed to use a separate callback
\r
414 // procedure for each of its audio devices. A single RtAudio duplex
\r
415 // stream using two different devices is supported here, though it
\r
416 // cannot be guaranteed to always behave correctly because we cannot
\r
417 // synchronize these two callbacks.
\r
419 // A property listener is installed for over/underrun information.
\r
420 // However, no functionality is currently provided to allow property
\r
421 // listeners to trigger user handlers because it is unclear what could
\r
422 // be done if a critical stream parameter (buffer size, sample rate,
\r
423 // device disconnect) notification arrived. The listeners entail
\r
424 // quite a bit of extra code and most likely, a user program wouldn't
\r
425 // be prepared for the result anyway. However, we do provide a flag
\r
426 // to the client callback function to inform of an over/underrun.
\r
428 // A structure to hold various information related to the CoreAudio API
\r
430 struct CoreHandle {
\r
431 AudioDeviceID id[2]; // device ids
\r
432 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
433 AudioDeviceIOProcID procId[2];
\r
435 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
436 UInt32 nStreams[2]; // number of streams to use
\r
438 char *deviceBuffer;
\r
439 pthread_cond_t condition;
\r
440 int drainCounter; // Tracks callback counts when draining
\r
441 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
444 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
447 RtApiCore:: RtApiCore()
\r
449 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
450 // This is a largely undocumented but absolutely necessary
\r
451 // requirement starting with OS-X 10.6. If not called, queries and
\r
452 // updates to various audio device properties are not handled
\r
454 CFRunLoopRef theRunLoop = NULL;
\r
455 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
456 kAudioObjectPropertyScopeGlobal,
\r
457 kAudioObjectPropertyElementMaster };
\r
458 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
459 if ( result != noErr ) {
\r
460 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
461 error( RtAudioError::WARNING );
\r
466 RtApiCore :: ~RtApiCore()
\r
468 // The subclass destructor gets called before the base class
\r
469 // destructor, so close an existing stream before deallocating
\r
470 // apiDeviceId memory.
\r
471 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
474 unsigned int RtApiCore :: getDeviceCount( void )
\r
476 // Find out how many audio devices there are, if any.
\r
478 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
479 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
480 if ( result != noErr ) {
\r
481 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
482 error( RtAudioError::WARNING );
\r
486 return dataSize / sizeof( AudioDeviceID );
\r
489 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
491 unsigned int nDevices = getDeviceCount();
\r
492 if ( nDevices <= 1 ) return 0;
\r
495 UInt32 dataSize = sizeof( AudioDeviceID );
\r
496 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
497 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
498 if ( result != noErr ) {
\r
499 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
500 error( RtAudioError::WARNING );
\r
504 dataSize *= nDevices;
\r
505 AudioDeviceID deviceList[ nDevices ];
\r
506 property.mSelector = kAudioHardwarePropertyDevices;
\r
507 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
510 error( RtAudioError::WARNING );
\r
514 for ( unsigned int i=0; i<nDevices; i++ )
\r
515 if ( id == deviceList[i] ) return i;
\r
517 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
518 error( RtAudioError::WARNING );
\r
522 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
524 unsigned int nDevices = getDeviceCount();
\r
525 if ( nDevices <= 1 ) return 0;
\r
528 UInt32 dataSize = sizeof( AudioDeviceID );
\r
529 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
530 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
531 if ( result != noErr ) {
\r
532 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
533 error( RtAudioError::WARNING );
\r
537 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
538 AudioDeviceID deviceList[ nDevices ];
\r
539 property.mSelector = kAudioHardwarePropertyDevices;
\r
540 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
543 error( RtAudioError::WARNING );
\r
547 for ( unsigned int i=0; i<nDevices; i++ )
\r
548 if ( id == deviceList[i] ) return i;
\r
550 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
551 error( RtAudioError::WARNING );
\r
555 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
557 RtAudio::DeviceInfo info;
\r
558 info.probed = false;
\r
561 unsigned int nDevices = getDeviceCount();
\r
562 if ( nDevices == 0 ) {
\r
563 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
564 error( RtAudioError::INVALID_USE );
\r
568 if ( device >= nDevices ) {
\r
569 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
570 error( RtAudioError::INVALID_USE );
\r
574 AudioDeviceID deviceList[ nDevices ];
\r
575 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
576 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
577 kAudioObjectPropertyScopeGlobal,
\r
578 kAudioObjectPropertyElementMaster };
\r
579 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
580 0, NULL, &dataSize, (void *) &deviceList );
\r
581 if ( result != noErr ) {
\r
582 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
583 error( RtAudioError::WARNING );
\r
587 AudioDeviceID id = deviceList[ device ];
\r
589 // Get the device name.
\r
591 CFStringRef cfname;
\r
592 dataSize = sizeof( CFStringRef );
\r
593 property.mSelector = kAudioObjectPropertyManufacturer;
\r
594 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
595 if ( result != noErr ) {
\r
596 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
597 errorText_ = errorStream_.str();
\r
598 error( RtAudioError::WARNING );
\r
602 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
603 int length = CFStringGetLength(cfname);
\r
604 char *mname = (char *)malloc(length * 3 + 1);
\r
605 #if defined( UNICODE ) || defined( _UNICODE )
\r
606 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
608 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
610 info.name.append( (const char *)mname, strlen(mname) );
\r
611 info.name.append( ": " );
\r
612 CFRelease( cfname );
\r
615 property.mSelector = kAudioObjectPropertyName;
\r
616 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
617 if ( result != noErr ) {
\r
618 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
619 errorText_ = errorStream_.str();
\r
620 error( RtAudioError::WARNING );
\r
624 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
625 length = CFStringGetLength(cfname);
\r
626 char *name = (char *)malloc(length * 3 + 1);
\r
627 #if defined( UNICODE ) || defined( _UNICODE )
\r
628 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
630 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
632 info.name.append( (const char *)name, strlen(name) );
\r
633 CFRelease( cfname );
\r
636 // Get the output stream "configuration".
\r
637 AudioBufferList *bufferList = nil;
\r
638 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
639 property.mScope = kAudioDevicePropertyScopeOutput;
\r
640 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
642 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
643 if ( result != noErr || dataSize == 0 ) {
\r
644 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
645 errorText_ = errorStream_.str();
\r
646 error( RtAudioError::WARNING );
\r
650 // Allocate the AudioBufferList.
\r
651 bufferList = (AudioBufferList *) malloc( dataSize );
\r
652 if ( bufferList == NULL ) {
\r
653 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
654 error( RtAudioError::WARNING );
\r
658 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
659 if ( result != noErr || dataSize == 0 ) {
\r
660 free( bufferList );
\r
661 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
662 errorText_ = errorStream_.str();
\r
663 error( RtAudioError::WARNING );
\r
667 // Get output channel information.
\r
668 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
669 for ( i=0; i<nStreams; i++ )
\r
670 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
671 free( bufferList );
\r
673 // Get the input stream "configuration".
\r
674 property.mScope = kAudioDevicePropertyScopeInput;
\r
675 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
676 if ( result != noErr || dataSize == 0 ) {
\r
677 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
678 errorText_ = errorStream_.str();
\r
679 error( RtAudioError::WARNING );
\r
683 // Allocate the AudioBufferList.
\r
684 bufferList = (AudioBufferList *) malloc( dataSize );
\r
685 if ( bufferList == NULL ) {
\r
686 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
687 error( RtAudioError::WARNING );
\r
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
692 if (result != noErr || dataSize == 0) {
\r
693 free( bufferList );
\r
694 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
695 errorText_ = errorStream_.str();
\r
696 error( RtAudioError::WARNING );
\r
700 // Get input channel information.
\r
701 nStreams = bufferList->mNumberBuffers;
\r
702 for ( i=0; i<nStreams; i++ )
\r
703 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
704 free( bufferList );
\r
706 // If device opens for both playback and capture, we determine the channels.
\r
707 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
708 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
710 // Probe the device sample rates.
\r
711 bool isInput = false;
\r
712 if ( info.outputChannels == 0 ) isInput = true;
\r
714 // Determine the supported sample rates.
\r
715 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
716 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
717 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
718 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
719 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
720 errorText_ = errorStream_.str();
\r
721 error( RtAudioError::WARNING );
\r
725 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
726 AudioValueRange rangeList[ nRanges ];
\r
727 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
728 if ( result != kAudioHardwareNoError ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // The sample rate reporting mechanism is a bit of a mystery. It
\r
736 // seems that it can either return individual rates or a range of
\r
737 // rates. I assume that if the min / max range values are the same,
\r
738 // then that represents a single supported rate and if the min / max
\r
739 // range values are different, the device supports an arbitrary
\r
740 // range of values (though there might be multiple ranges, so we'll
\r
741 // use the most conservative range).
\r
742 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
743 bool haveValueRange = false;
\r
744 info.sampleRates.clear();
\r
745 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
746 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
747 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
749 haveValueRange = true;
\r
750 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
751 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
755 if ( haveValueRange ) {
\r
756 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
757 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
758 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
762 // Sort and remove any redundant values
\r
763 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
764 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
766 if ( info.sampleRates.size() == 0 ) {
\r
767 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
768 errorText_ = errorStream_.str();
\r
769 error( RtAudioError::WARNING );
\r
773 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
774 // Thus, any other "physical" formats supported by the device are of
\r
775 // no interest to the client.
\r
776 info.nativeFormats = RTAUDIO_FLOAT32;
\r
778 if ( info.outputChannels > 0 )
\r
779 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
780 if ( info.inputChannels > 0 )
\r
781 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
783 info.probed = true;
\r
787 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
788 const AudioTimeStamp* /*inNow*/,
\r
789 const AudioBufferList* inInputData,
\r
790 const AudioTimeStamp* /*inInputTime*/,
\r
791 AudioBufferList* outOutputData,
\r
792 const AudioTimeStamp* /*inOutputTime*/,
\r
793 void* infoPointer )
\r
795 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
797 RtApiCore *object = (RtApiCore *) info->object;
\r
798 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
799 return kAudioHardwareUnspecifiedError;
\r
801 return kAudioHardwareNoError;
\r
804 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
806 const AudioObjectPropertyAddress properties[],
\r
807 void* handlePointer )
\r
809 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
810 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
811 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
812 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
813 handle->xrun[1] = true;
\r
815 handle->xrun[0] = true;
\r
819 return kAudioHardwareNoError;
\r
822 static OSStatus rateListener( AudioObjectID inDevice,
\r
823 UInt32 /*nAddresses*/,
\r
824 const AudioObjectPropertyAddress /*properties*/[],
\r
825 void* ratePointer )
\r
827 Float64 *rate = (Float64 *) ratePointer;
\r
828 UInt32 dataSize = sizeof( Float64 );
\r
829 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
830 kAudioObjectPropertyScopeGlobal,
\r
831 kAudioObjectPropertyElementMaster };
\r
832 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
833 return kAudioHardwareNoError;
\r
836 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
837 unsigned int firstChannel, unsigned int sampleRate,
\r
838 RtAudioFormat format, unsigned int *bufferSize,
\r
839 RtAudio::StreamOptions *options )
\r
842 unsigned int nDevices = getDeviceCount();
\r
843 if ( nDevices == 0 ) {
\r
844 // This should not happen because a check is made before this function is called.
\r
845 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
849 if ( device >= nDevices ) {
\r
850 // This should not happen because a check is made before this function is called.
\r
851 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
855 AudioDeviceID deviceList[ nDevices ];
\r
856 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
857 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
858 kAudioObjectPropertyScopeGlobal,
\r
859 kAudioObjectPropertyElementMaster };
\r
860 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
861 0, NULL, &dataSize, (void *) &deviceList );
\r
862 if ( result != noErr ) {
\r
863 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
867 AudioDeviceID id = deviceList[ device ];
\r
869 // Setup for stream mode.
\r
870 bool isInput = false;
\r
871 if ( mode == INPUT ) {
\r
873 property.mScope = kAudioDevicePropertyScopeInput;
\r
876 property.mScope = kAudioDevicePropertyScopeOutput;
\r
878 // Get the stream "configuration".
\r
879 AudioBufferList *bufferList = nil;
\r
881 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
882 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
883 if ( result != noErr || dataSize == 0 ) {
\r
884 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
885 errorText_ = errorStream_.str();
\r
889 // Allocate the AudioBufferList.
\r
890 bufferList = (AudioBufferList *) malloc( dataSize );
\r
891 if ( bufferList == NULL ) {
\r
892 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
896 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
897 if (result != noErr || dataSize == 0) {
\r
898 free( bufferList );
\r
899 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
900 errorText_ = errorStream_.str();
\r
904 // Search for one or more streams that contain the desired number of
\r
905 // channels. CoreAudio devices can have an arbitrary number of
\r
906 // streams and each stream can have an arbitrary number of channels.
\r
907 // For each stream, a single buffer of interleaved samples is
\r
908 // provided. RtAudio prefers the use of one stream of interleaved
\r
909 // data or multiple consecutive single-channel streams. However, we
\r
910 // now support multiple consecutive multi-channel streams of
\r
911 // interleaved data as well.
\r
912 UInt32 iStream, offsetCounter = firstChannel;
\r
913 UInt32 nStreams = bufferList->mNumberBuffers;
\r
914 bool monoMode = false;
\r
915 bool foundStream = false;
\r
917 // First check that the device supports the requested number of
\r
919 UInt32 deviceChannels = 0;
\r
920 for ( iStream=0; iStream<nStreams; iStream++ )
\r
921 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
923 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
924 free( bufferList );
\r
925 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
926 errorText_ = errorStream_.str();
\r
930 // Look for a single stream meeting our needs.
\r
931 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
932 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
933 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
934 if ( streamChannels >= channels + offsetCounter ) {
\r
935 firstStream = iStream;
\r
936 channelOffset = offsetCounter;
\r
937 foundStream = true;
\r
940 if ( streamChannels > offsetCounter ) break;
\r
941 offsetCounter -= streamChannels;
\r
944 // If we didn't find a single stream above, then we should be able
\r
945 // to meet the channel specification with multiple streams.
\r
946 if ( foundStream == false ) {
\r
948 offsetCounter = firstChannel;
\r
949 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
950 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
951 if ( streamChannels > offsetCounter ) break;
\r
952 offsetCounter -= streamChannels;
\r
955 firstStream = iStream;
\r
956 channelOffset = offsetCounter;
\r
957 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
959 if ( streamChannels > 1 ) monoMode = false;
\r
960 while ( channelCounter > 0 ) {
\r
961 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
962 if ( streamChannels > 1 ) monoMode = false;
\r
963 channelCounter -= streamChannels;
\r
968 free( bufferList );
\r
970 // Determine the buffer size.
\r
971 AudioValueRange bufferRange;
\r
972 dataSize = sizeof( AudioValueRange );
\r
973 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
974 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
976 if ( result != noErr ) {
\r
977 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
978 errorText_ = errorStream_.str();
\r
982 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
983 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
984 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
986 // Set the buffer size. For multiple streams, I'm assuming we only
\r
987 // need to make this setting for the master channel.
\r
988 UInt32 theSize = (UInt32) *bufferSize;
\r
989 dataSize = sizeof( UInt32 );
\r
990 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
991 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
993 if ( result != noErr ) {
\r
994 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
995 errorText_ = errorStream_.str();
\r
999 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1000 // MUST be the same in both directions!
\r
1001 *bufferSize = theSize;
\r
1002 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1004 errorText_ = errorStream_.str();
\r
1008 stream_.bufferSize = *bufferSize;
\r
1009 stream_.nBuffers = 1;
\r
1011 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1012 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1014 dataSize = sizeof( hog_pid );
\r
1015 property.mSelector = kAudioDevicePropertyHogMode;
\r
1016 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1017 if ( result != noErr ) {
\r
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1019 errorText_ = errorStream_.str();
\r
1023 if ( hog_pid != getpid() ) {
\r
1024 hog_pid = getpid();
\r
1025 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1026 if ( result != noErr ) {
\r
1027 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1028 errorText_ = errorStream_.str();
\r
1034 // Check and if necessary, change the sample rate for the device.
\r
1035 Float64 nominalRate;
\r
1036 dataSize = sizeof( Float64 );
\r
1037 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1038 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1039 if ( result != noErr ) {
\r
1040 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1041 errorText_ = errorStream_.str();
\r
1045 // Only change the sample rate if off by more than 1 Hz.
\r
1046 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1048 // Set a property listener for the sample rate change
\r
1049 Float64 reportedRate = 0.0;
\r
1050 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1051 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1052 if ( result != noErr ) {
\r
1053 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1054 errorText_ = errorStream_.str();
\r
1058 nominalRate = (Float64) sampleRate;
\r
1059 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1060 if ( result != noErr ) {
\r
1061 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1063 errorText_ = errorStream_.str();
\r
1067 // Now wait until the reported nominal rate is what we just set.
\r
1068 UInt32 microCounter = 0;
\r
1069 while ( reportedRate != nominalRate ) {
\r
1070 microCounter += 5000;
\r
1071 if ( microCounter > 5000000 ) break;
\r
1075 // Remove the property listener.
\r
1076 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1078 if ( microCounter > 5000000 ) {
\r
1079 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1080 errorText_ = errorStream_.str();
\r
1085 // Now set the stream format for all streams. Also, check the
\r
1086 // physical format of the device and change that if necessary.
\r
1087 AudioStreamBasicDescription description;
\r
1088 dataSize = sizeof( AudioStreamBasicDescription );
\r
1089 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1090 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1091 if ( result != noErr ) {
\r
1092 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1093 errorText_ = errorStream_.str();
\r
1097 // Set the sample rate and data format id. However, only make the
\r
1098 // change if the sample rate is not within 1.0 of the desired
\r
1099 // rate and the format is not linear pcm.
\r
1100 bool updateFormat = false;
\r
1101 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1102 description.mSampleRate = (Float64) sampleRate;
\r
1103 updateFormat = true;
\r
1106 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1107 description.mFormatID = kAudioFormatLinearPCM;
\r
1108 updateFormat = true;
\r
1111 if ( updateFormat ) {
\r
1112 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1113 if ( result != noErr ) {
\r
1114 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1115 errorText_ = errorStream_.str();
\r
1120 // Now check the physical format.
\r
1121 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1122 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1129 //std::cout << "Current physical stream format:" << std::endl;
\r
1130 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1131 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1132 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1133 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1135 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1136 description.mFormatID = kAudioFormatLinearPCM;
\r
1137 //description.mSampleRate = (Float64) sampleRate;
\r
1138 AudioStreamBasicDescription testDescription = description;
\r
1139 UInt32 formatFlags;
\r
1141 // We'll try higher bit rates first and then work our way down.
\r
1142 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1143 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1144 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1145 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1146 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1147 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1148 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1149 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1150 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1151 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1152 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1153 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1156 bool setPhysicalFormat = false;
\r
1157 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1158 testDescription = description;
\r
1159 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1160 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1161 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1162 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1164 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1165 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1166 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1167 if ( result == noErr ) {
\r
1168 setPhysicalFormat = true;
\r
1169 //std::cout << "Updated physical stream format:" << std::endl;
\r
1170 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1171 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1172 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1173 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1178 if ( !setPhysicalFormat ) {
\r
1179 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1180 errorText_ = errorStream_.str();
\r
1183 } // done setting virtual/physical formats.
\r
1185 // Get the stream / device latency.
\r
1187 dataSize = sizeof( UInt32 );
\r
1188 property.mSelector = kAudioDevicePropertyLatency;
\r
1189 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1190 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1191 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1193 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1194 errorText_ = errorStream_.str();
\r
1195 error( RtAudioError::WARNING );
\r
1199 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1200 // always be presented in native-endian format, so we should never
\r
1201 // need to byte swap.
\r
1202 stream_.doByteSwap[mode] = false;
\r
1204 // From the CoreAudio documentation, PCM data must be supplied as
\r
1206 stream_.userFormat = format;
\r
1207 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1209 if ( streamCount == 1 )
\r
1210 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1211 else // multiple streams
\r
1212 stream_.nDeviceChannels[mode] = channels;
\r
1213 stream_.nUserChannels[mode] = channels;
\r
1214 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1215 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1216 else stream_.userInterleaved = true;
\r
1217 stream_.deviceInterleaved[mode] = true;
\r
1218 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1220 // Set flags for buffer conversion.
\r
1221 stream_.doConvertBuffer[mode] = false;
\r
1222 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1223 stream_.doConvertBuffer[mode] = true;
\r
1224 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1225 stream_.doConvertBuffer[mode] = true;
\r
1226 if ( streamCount == 1 ) {
\r
1227 if ( stream_.nUserChannels[mode] > 1 &&
\r
1228 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1229 stream_.doConvertBuffer[mode] = true;
\r
1231 else if ( monoMode && stream_.userInterleaved )
\r
1232 stream_.doConvertBuffer[mode] = true;
\r
1234 // Allocate our CoreHandle structure for the stream.
\r
1235 CoreHandle *handle = 0;
\r
1236 if ( stream_.apiHandle == 0 ) {
\r
1238 handle = new CoreHandle;
\r
1240 catch ( std::bad_alloc& ) {
\r
1241 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1245 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1246 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1249 stream_.apiHandle = (void *) handle;
\r
1252 handle = (CoreHandle *) stream_.apiHandle;
\r
1253 handle->iStream[mode] = firstStream;
\r
1254 handle->nStreams[mode] = streamCount;
\r
1255 handle->id[mode] = id;
\r
1257 // Allocate necessary internal buffers.
\r
1258 unsigned long bufferBytes;
\r
1259 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1260 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1261 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1262 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1263 if ( stream_.userBuffer[mode] == NULL ) {
\r
1264 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1268 // If possible, we will make use of the CoreAudio stream buffers as
\r
1269 // "device buffers". However, we can't do this if using multiple
\r
1271 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1273 bool makeBuffer = true;
\r
1274 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1275 if ( mode == INPUT ) {
\r
1276 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1277 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1278 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1282 if ( makeBuffer ) {
\r
1283 bufferBytes *= *bufferSize;
\r
1284 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1285 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1286 if ( stream_.deviceBuffer == NULL ) {
\r
1287 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1293 stream_.sampleRate = sampleRate;
\r
1294 stream_.device[mode] = device;
\r
1295 stream_.state = STREAM_STOPPED;
\r
1296 stream_.callbackInfo.object = (void *) this;
\r
1298 // Setup the buffer conversion information structure.
\r
1299 if ( stream_.doConvertBuffer[mode] ) {
\r
1300 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1301 else setConvertInfo( mode, channelOffset );
\r
1304 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1305 // Only one callback procedure per device.
\r
1306 stream_.mode = DUPLEX;
\r
1308 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1309 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1311 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1312 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1314 if ( result != noErr ) {
\r
1315 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1316 errorText_ = errorStream_.str();
\r
1319 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1320 stream_.mode = DUPLEX;
\r
1322 stream_.mode = mode;
\r
1325 // Setup the device property listener for over/underload.
\r
1326 property.mSelector = kAudioDeviceProcessorOverload;
\r
1327 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1333 pthread_cond_destroy( &handle->condition );
\r
1335 stream_.apiHandle = 0;
\r
1338 for ( int i=0; i<2; i++ ) {
\r
1339 if ( stream_.userBuffer[i] ) {
\r
1340 free( stream_.userBuffer[i] );
\r
1341 stream_.userBuffer[i] = 0;
\r
1345 if ( stream_.deviceBuffer ) {
\r
1346 free( stream_.deviceBuffer );
\r
1347 stream_.deviceBuffer = 0;
\r
1350 stream_.state = STREAM_CLOSED;
\r
1354 void RtApiCore :: closeStream( void )
\r
1356 if ( stream_.state == STREAM_CLOSED ) {
\r
1357 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1358 error( RtAudioError::WARNING );
\r
1362 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1363 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1364 if ( stream_.state == STREAM_RUNNING )
\r
1365 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1366 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1367 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1369 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1370 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1374 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1385 for ( int i=0; i<2; i++ ) {
\r
1386 if ( stream_.userBuffer[i] ) {
\r
1387 free( stream_.userBuffer[i] );
\r
1388 stream_.userBuffer[i] = 0;
\r
1392 if ( stream_.deviceBuffer ) {
\r
1393 free( stream_.deviceBuffer );
\r
1394 stream_.deviceBuffer = 0;
\r
1397 // Destroy pthread condition variable.
\r
1398 pthread_cond_destroy( &handle->condition );
\r
1400 stream_.apiHandle = 0;
\r
1402 stream_.mode = UNINITIALIZED;
\r
1403 stream_.state = STREAM_CLOSED;
\r
1406 void RtApiCore :: startStream( void )
\r
1409 if ( stream_.state == STREAM_RUNNING ) {
\r
1410 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1411 error( RtAudioError::WARNING );
\r
1415 OSStatus result = noErr;
\r
1416 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1417 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1419 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1420 if ( result != noErr ) {
\r
1421 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1422 errorText_ = errorStream_.str();
\r
1427 if ( stream_.mode == INPUT ||
\r
1428 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1430 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 handle->drainCounter = 0;
\r
1439 handle->internalDrain = false;
\r
1440 stream_.state = STREAM_RUNNING;
\r
1443 if ( result == noErr ) return;
\r
1444 error( RtAudioError::SYSTEM_ERROR );
\r
1447 void RtApiCore :: stopStream( void )
\r
1450 if ( stream_.state == STREAM_STOPPED ) {
\r
1451 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1452 error( RtAudioError::WARNING );
\r
1456 OSStatus result = noErr;
\r
1457 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1458 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1460 if ( handle->drainCounter == 0 ) {
\r
1461 handle->drainCounter = 2;
\r
1462 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1465 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1466 if ( result != noErr ) {
\r
1467 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1468 errorText_ = errorStream_.str();
\r
1473 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1475 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1476 if ( result != noErr ) {
\r
1477 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1478 errorText_ = errorStream_.str();
\r
1483 stream_.state = STREAM_STOPPED;
\r
1486 if ( result == noErr ) return;
\r
1487 error( RtAudioError::SYSTEM_ERROR );
\r
1490 void RtApiCore :: abortStream( void )
\r
1493 if ( stream_.state == STREAM_STOPPED ) {
\r
1494 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1495 error( RtAudioError::WARNING );
\r
1499 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1500 handle->drainCounter = 2;
\r
1505 // This function will be called by a spawned thread when the user
\r
1506 // callback function signals that the stream should be stopped or
\r
1507 // aborted. It is better to handle it this way because the
\r
1508 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1509 // function is called.
\r
1510 static void *coreStopStream( void *ptr )
\r
1512 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1513 RtApiCore *object = (RtApiCore *) info->object;
\r
1515 object->stopStream();
\r
1516 pthread_exit( NULL );
\r
1519 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1520 const AudioBufferList *inBufferList,
\r
1521 const AudioBufferList *outBufferList )
\r
1523 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1524 if ( stream_.state == STREAM_CLOSED ) {
\r
1525 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1526 error( RtAudioError::WARNING );
\r
1530 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1531 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1533 // Check if we were draining the stream and signal is finished.
\r
1534 if ( handle->drainCounter > 3 ) {
\r
1535 ThreadHandle threadId;
\r
1537 stream_.state = STREAM_STOPPING;
\r
1538 if ( handle->internalDrain == true )
\r
1539 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1540 else // external call to stopStream()
\r
1541 pthread_cond_signal( &handle->condition );
\r
1545 AudioDeviceID outputDevice = handle->id[0];
\r
1547 // Invoke user callback to get fresh output data UNLESS we are
\r
1548 // draining stream or duplex mode AND the input/output devices are
\r
1549 // different AND this function is called for the input device.
\r
1550 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1551 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1552 double streamTime = getStreamTime();
\r
1553 RtAudioStreamStatus status = 0;
\r
1554 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1555 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1556 handle->xrun[0] = false;
\r
1558 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1559 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1560 handle->xrun[1] = false;
\r
1563 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1564 stream_.bufferSize, streamTime, status, info->userData );
\r
1565 if ( cbReturnValue == 2 ) {
\r
1566 stream_.state = STREAM_STOPPING;
\r
1567 handle->drainCounter = 2;
\r
1571 else if ( cbReturnValue == 1 ) {
\r
1572 handle->drainCounter = 1;
\r
1573 handle->internalDrain = true;
\r
1577 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1579 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1581 if ( handle->nStreams[0] == 1 ) {
\r
1582 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1584 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1586 else { // fill multiple streams with zeros
\r
1587 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1588 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1590 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1594 else if ( handle->nStreams[0] == 1 ) {
\r
1595 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1596 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1597 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1599 else { // copy from user buffer
\r
1600 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1601 stream_.userBuffer[0],
\r
1602 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1605 else { // fill multiple streams
\r
1606 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1607 if ( stream_.doConvertBuffer[0] ) {
\r
1608 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1609 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1612 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1613 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1614 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1615 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1616 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1619 else { // fill multiple multi-channel streams with interleaved data
\r
1620 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1621 Float32 *out, *in;
\r
1623 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1624 UInt32 inChannels = stream_.nUserChannels[0];
\r
1625 if ( stream_.doConvertBuffer[0] ) {
\r
1626 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1627 inChannels = stream_.nDeviceChannels[0];
\r
1630 if ( inInterleaved ) inOffset = 1;
\r
1631 else inOffset = stream_.bufferSize;
\r
1633 channelsLeft = inChannels;
\r
1634 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1636 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1637 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1640 // Account for possible channel offset in first stream
\r
1641 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1642 streamChannels -= stream_.channelOffset[0];
\r
1643 outJump = stream_.channelOffset[0];
\r
1647 // Account for possible unfilled channels at end of the last stream
\r
1648 if ( streamChannels > channelsLeft ) {
\r
1649 outJump = streamChannels - channelsLeft;
\r
1650 streamChannels = channelsLeft;
\r
1653 // Determine input buffer offsets and skips
\r
1654 if ( inInterleaved ) {
\r
1655 inJump = inChannels;
\r
1656 in += inChannels - channelsLeft;
\r
1660 in += (inChannels - channelsLeft) * inOffset;
\r
1663 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1664 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1665 *out++ = in[j*inOffset];
\r
1670 channelsLeft -= streamChannels;
\r
1675 if ( handle->drainCounter ) {
\r
1676 handle->drainCounter++;
\r
1681 AudioDeviceID inputDevice;
\r
1682 inputDevice = handle->id[1];
\r
1683 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1685 if ( handle->nStreams[1] == 1 ) {
\r
1686 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1687 convertBuffer( stream_.userBuffer[1],
\r
1688 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1689 stream_.convertInfo[1] );
\r
1691 else { // copy to user buffer
\r
1692 memcpy( stream_.userBuffer[1],
\r
1693 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1694 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1697 else { // read from multiple streams
\r
1698 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1699 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1701 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1702 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1703 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1704 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1705 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1708 else { // read from multiple multi-channel streams
\r
1709 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1710 Float32 *out, *in;
\r
1712 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1713 UInt32 outChannels = stream_.nUserChannels[1];
\r
1714 if ( stream_.doConvertBuffer[1] ) {
\r
1715 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1716 outChannels = stream_.nDeviceChannels[1];
\r
1719 if ( outInterleaved ) outOffset = 1;
\r
1720 else outOffset = stream_.bufferSize;
\r
1722 channelsLeft = outChannels;
\r
1723 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1725 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1726 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1729 // Account for possible channel offset in first stream
\r
1730 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1731 streamChannels -= stream_.channelOffset[1];
\r
1732 inJump = stream_.channelOffset[1];
\r
1736 // Account for possible unread channels at end of the last stream
\r
1737 if ( streamChannels > channelsLeft ) {
\r
1738 inJump = streamChannels - channelsLeft;
\r
1739 streamChannels = channelsLeft;
\r
1742 // Determine output buffer offsets and skips
\r
1743 if ( outInterleaved ) {
\r
1744 outJump = outChannels;
\r
1745 out += outChannels - channelsLeft;
\r
1749 out += (outChannels - channelsLeft) * outOffset;
\r
1752 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1753 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1754 out[j*outOffset] = *in++;
\r
1759 channelsLeft -= streamChannels;
\r
1763 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1764 convertBuffer( stream_.userBuffer[1],
\r
1765 stream_.deviceBuffer,
\r
1766 stream_.convertInfo[1] );
\r
1772 //MUTEX_UNLOCK( &stream_.mutex );
\r
1774 RtApi::tickStreamTime();
\r
1778 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1782 case kAudioHardwareNotRunningError:
\r
1783 return "kAudioHardwareNotRunningError";
\r
1785 case kAudioHardwareUnspecifiedError:
\r
1786 return "kAudioHardwareUnspecifiedError";
\r
1788 case kAudioHardwareUnknownPropertyError:
\r
1789 return "kAudioHardwareUnknownPropertyError";
\r
1791 case kAudioHardwareBadPropertySizeError:
\r
1792 return "kAudioHardwareBadPropertySizeError";
\r
1794 case kAudioHardwareIllegalOperationError:
\r
1795 return "kAudioHardwareIllegalOperationError";
\r
1797 case kAudioHardwareBadObjectError:
\r
1798 return "kAudioHardwareBadObjectError";
\r
1800 case kAudioHardwareBadDeviceError:
\r
1801 return "kAudioHardwareBadDeviceError";
\r
1803 case kAudioHardwareBadStreamError:
\r
1804 return "kAudioHardwareBadStreamError";
\r
1806 case kAudioHardwareUnsupportedOperationError:
\r
1807 return "kAudioHardwareUnsupportedOperationError";
\r
1809 case kAudioDeviceUnsupportedFormatError:
\r
1810 return "kAudioDeviceUnsupportedFormatError";
\r
1812 case kAudioDevicePermissionsError:
\r
1813 return "kAudioDevicePermissionsError";
\r
1816 return "CoreAudio unknown error";
\r
1820 //******************** End of __MACOSX_CORE__ *********************//
\r
1823 #if defined(__UNIX_JACK__)
\r
1825 // JACK is a low-latency audio server, originally written for the
\r
1826 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1827 // connect a number of different applications to an audio device, as
\r
1828 // well as allowing them to share audio between themselves.
\r
1830 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1831 // have ports connected to the server. The JACK server is typically
\r
1832 // started in a terminal as follows:
\r
1834 // .jackd -d alsa -d hw:0
\r
1836 // or through an interface program such as qjackctl. Many of the
\r
1837 // parameters normally set for a stream are fixed by the JACK server
\r
1838 // and can be specified when the JACK server is started. In
\r
1841 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1843 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1844 // frames, and number of buffers = 4. Once the server is running, it
\r
1845 // is not possible to override these values. If the values are not
\r
1846 // specified in the command-line, the JACK server uses default values.
\r
1848 // The JACK server does not have to be running when an instance of
\r
1849 // RtApiJack is created, though the function getDeviceCount() will
\r
1850 // report 0 devices found until JACK has been started. When no
\r
1851 // devices are available (i.e., the JACK server is not running), a
\r
1852 // stream cannot be opened.
\r
1854 #include <jack/jack.h>
\r
1855 #include <unistd.h>
\r
1858 // A structure to hold various information related to the Jack API
\r
1859 // implementation.
\r
1860 struct JackHandle {
\r
1861 jack_client_t *client;
\r
1862 jack_port_t **ports[2];
\r
1863 std::string deviceName[2];
\r
1865 pthread_cond_t condition;
\r
1866 int drainCounter; // Tracks callback counts when draining
\r
1867 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1870 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1873 static void jackSilentError( const char * ) {};
\r
1875 RtApiJack :: RtApiJack()
\r
1877 // Nothing to do here.
\r
1878 #if !defined(__RTAUDIO_DEBUG__)
\r
1879 // Turn off Jack's internal error reporting.
\r
1880 jack_set_error_function( &jackSilentError );
\r
1884 RtApiJack :: ~RtApiJack()
\r
1886 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1889 unsigned int RtApiJack :: getDeviceCount( void )
\r
1891 // See if we can become a jack client.
\r
1892 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1893 jack_status_t *status = NULL;
\r
1894 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1895 if ( client == 0 ) return 0;
\r
1897 const char **ports;
\r
1898 std::string port, previousPort;
\r
1899 unsigned int nChannels = 0, nDevices = 0;
\r
1900 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1902 // Parse the port names up to the first colon (:).
\r
1903 size_t iColon = 0;
\r
1905 port = (char *) ports[ nChannels ];
\r
1906 iColon = port.find(":");
\r
1907 if ( iColon != std::string::npos ) {
\r
1908 port = port.substr( 0, iColon + 1 );
\r
1909 if ( port != previousPort ) {
\r
1911 previousPort = port;
\r
1914 } while ( ports[++nChannels] );
\r
1918 jack_client_close( client );
\r
1922 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1924 RtAudio::DeviceInfo info;
\r
1925 info.probed = false;
\r
1927 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1928 jack_status_t *status = NULL;
\r
1929 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1930 if ( client == 0 ) {
\r
1931 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1932 error( RtAudioError::WARNING );
\r
1936 const char **ports;
\r
1937 std::string port, previousPort;
\r
1938 unsigned int nPorts = 0, nDevices = 0;
\r
1939 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1941 // Parse the port names up to the first colon (:).
\r
1942 size_t iColon = 0;
\r
1944 port = (char *) ports[ nPorts ];
\r
1945 iColon = port.find(":");
\r
1946 if ( iColon != std::string::npos ) {
\r
1947 port = port.substr( 0, iColon );
\r
1948 if ( port != previousPort ) {
\r
1949 if ( nDevices == device ) info.name = port;
\r
1951 previousPort = port;
\r
1954 } while ( ports[++nPorts] );
\r
1958 if ( device >= nDevices ) {
\r
1959 jack_client_close( client );
\r
1960 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1961 error( RtAudioError::INVALID_USE );
\r
1965 // Get the current jack server sample rate.
\r
1966 info.sampleRates.clear();
\r
1967 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1969 // Count the available ports containing the client name as device
\r
1970 // channels. Jack "input ports" equal RtAudio output channels.
\r
1971 unsigned int nChannels = 0;
\r
1972 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1974 while ( ports[ nChannels ] ) nChannels++;
\r
1976 info.outputChannels = nChannels;
\r
1979 // Jack "output ports" equal RtAudio input channels.
\r
1981 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1983 while ( ports[ nChannels ] ) nChannels++;
\r
1985 info.inputChannels = nChannels;
\r
1988 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1989 jack_client_close(client);
\r
1990 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1991 error( RtAudioError::WARNING );
\r
1995 // If device opens for both playback and capture, we determine the channels.
\r
1996 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1997 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1999 // Jack always uses 32-bit floats.
\r
2000 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2002 // Jack doesn't provide default devices so we'll use the first available one.
\r
2003 if ( device == 0 && info.outputChannels > 0 )
\r
2004 info.isDefaultOutput = true;
\r
2005 if ( device == 0 && info.inputChannels > 0 )
\r
2006 info.isDefaultInput = true;
\r
2008 jack_client_close(client);
\r
2009 info.probed = true;
\r
2013 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2015 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2017 RtApiJack *object = (RtApiJack *) info->object;
\r
2018 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2023 // This function will be called by a spawned thread when the Jack
\r
2024 // server signals that it is shutting down. It is necessary to handle
\r
2025 // it this way because the jackShutdown() function must return before
\r
2026 // the jack_deactivate() function (in closeStream()) will return.
\r
2027 static void *jackCloseStream( void *ptr )
\r
2029 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2030 RtApiJack *object = (RtApiJack *) info->object;
\r
2032 object->closeStream();
\r
2034 pthread_exit( NULL );
\r
2036 static void jackShutdown( void *infoPointer )
\r
2038 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2039 RtApiJack *object = (RtApiJack *) info->object;
\r
2041 // Check current stream state. If stopped, then we'll assume this
\r
2042 // was called as a result of a call to RtApiJack::stopStream (the
\r
2043 // deactivation of a client handle causes this function to be called).
\r
2044 // If not, we'll assume the Jack server is shutting down or some
\r
2045 // other problem occurred and we should close the stream.
\r
2046 if ( object->isStreamRunning() == false ) return;
\r
2048 ThreadHandle threadId;
\r
2049 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2050 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2053 static int jackXrun( void *infoPointer )
\r
2055 JackHandle *handle = (JackHandle *) infoPointer;
\r
2057 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2058 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2063 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2064 unsigned int firstChannel, unsigned int sampleRate,
\r
2065 RtAudioFormat format, unsigned int *bufferSize,
\r
2066 RtAudio::StreamOptions *options )
\r
2068 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2070 // Look for jack server and try to become a client (only do once per stream).
\r
2071 jack_client_t *client = 0;
\r
2072 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2073 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2074 jack_status_t *status = NULL;
\r
2075 if ( options && !options->streamName.empty() )
\r
2076 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2078 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2079 if ( client == 0 ) {
\r
2080 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2081 error( RtAudioError::WARNING );
\r
2086 // The handle must have been created on an earlier pass.
\r
2087 client = handle->client;
\r
2090 const char **ports;
\r
2091 std::string port, previousPort, deviceName;
\r
2092 unsigned int nPorts = 0, nDevices = 0;
\r
2093 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2095 // Parse the port names up to the first colon (:).
\r
2096 size_t iColon = 0;
\r
2098 port = (char *) ports[ nPorts ];
\r
2099 iColon = port.find(":");
\r
2100 if ( iColon != std::string::npos ) {
\r
2101 port = port.substr( 0, iColon );
\r
2102 if ( port != previousPort ) {
\r
2103 if ( nDevices == device ) deviceName = port;
\r
2105 previousPort = port;
\r
2108 } while ( ports[++nPorts] );
\r
2112 if ( device >= nDevices ) {
\r
2113 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2117 // Count the available ports containing the client name as device
\r
2118 // channels. Jack "input ports" equal RtAudio output channels.
\r
2119 unsigned int nChannels = 0;
\r
2120 unsigned long flag = JackPortIsInput;
\r
2121 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2122 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2124 while ( ports[ nChannels ] ) nChannels++;
\r
2128 // Compare the jack ports for specified client to the requested number of channels.
\r
2129 if ( nChannels < (channels + firstChannel) ) {
\r
2130 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2131 errorText_ = errorStream_.str();
\r
2135 // Check the jack server sample rate.
\r
2136 unsigned int jackRate = jack_get_sample_rate( client );
\r
2137 if ( sampleRate != jackRate ) {
\r
2138 jack_client_close( client );
\r
2139 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2140 errorText_ = errorStream_.str();
\r
2143 stream_.sampleRate = jackRate;
\r
2145 // Get the latency of the JACK port.
\r
2146 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2147 if ( ports[ firstChannel ] ) {
\r
2148 // Added by Ge Wang
\r
2149 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2150 // the range (usually the min and max are equal)
\r
2151 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2152 // get the latency range
\r
2153 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2154 // be optimistic, use the min!
\r
2155 stream_.latency[mode] = latrange.min;
\r
2156 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2160 // The jack server always uses 32-bit floating-point data.
\r
2161 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2162 stream_.userFormat = format;
\r
2164 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2165 else stream_.userInterleaved = true;
\r
2167 // Jack always uses non-interleaved buffers.
\r
2168 stream_.deviceInterleaved[mode] = false;
\r
2170 // Jack always provides host byte-ordered data.
\r
2171 stream_.doByteSwap[mode] = false;
\r
2173 // Get the buffer size. The buffer size and number of buffers
\r
2174 // (periods) is set when the jack server is started.
\r
2175 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2176 *bufferSize = stream_.bufferSize;
\r
2178 stream_.nDeviceChannels[mode] = channels;
\r
2179 stream_.nUserChannels[mode] = channels;
\r
2181 // Set flags for buffer conversion.
\r
2182 stream_.doConvertBuffer[mode] = false;
\r
2183 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2184 stream_.doConvertBuffer[mode] = true;
\r
2185 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2186 stream_.nUserChannels[mode] > 1 )
\r
2187 stream_.doConvertBuffer[mode] = true;
\r
2189 // Allocate our JackHandle structure for the stream.
\r
2190 if ( handle == 0 ) {
\r
2192 handle = new JackHandle;
\r
2194 catch ( std::bad_alloc& ) {
\r
2195 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2199 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2200 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2203 stream_.apiHandle = (void *) handle;
\r
2204 handle->client = client;
\r
2206 handle->deviceName[mode] = deviceName;
\r
2208 // Allocate necessary internal buffers.
\r
2209 unsigned long bufferBytes;
\r
2210 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2211 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2212 if ( stream_.userBuffer[mode] == NULL ) {
\r
2213 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2217 if ( stream_.doConvertBuffer[mode] ) {
\r
2219 bool makeBuffer = true;
\r
2220 if ( mode == OUTPUT )
\r
2221 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2222 else { // mode == INPUT
\r
2223 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2224 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2225 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2226 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2230 if ( makeBuffer ) {
\r
2231 bufferBytes *= *bufferSize;
\r
2232 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2233 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2234 if ( stream_.deviceBuffer == NULL ) {
\r
2235 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2241 // Allocate memory for the Jack ports (channels) identifiers.
\r
2242 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2243 if ( handle->ports[mode] == NULL ) {
\r
2244 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2248 stream_.device[mode] = device;
\r
2249 stream_.channelOffset[mode] = firstChannel;
\r
2250 stream_.state = STREAM_STOPPED;
\r
2251 stream_.callbackInfo.object = (void *) this;
\r
2253 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2254 // We had already set up the stream for output.
\r
2255 stream_.mode = DUPLEX;
\r
2257 stream_.mode = mode;
\r
2258 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2259 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2260 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2263 // Register our ports.
\r
2265 if ( mode == OUTPUT ) {
\r
2266 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2267 snprintf( label, 64, "outport %d", i );
\r
2268 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2269 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2273 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2274 snprintf( label, 64, "inport %d", i );
\r
2275 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2276 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2280 // Setup the buffer conversion information structure. We don't use
\r
2281 // buffers to do channel offsets, so we override that parameter
\r
2283 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2289 pthread_cond_destroy( &handle->condition );
\r
2290 jack_client_close( handle->client );
\r
2292 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2293 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2296 stream_.apiHandle = 0;
\r
2299 for ( int i=0; i<2; i++ ) {
\r
2300 if ( stream_.userBuffer[i] ) {
\r
2301 free( stream_.userBuffer[i] );
\r
2302 stream_.userBuffer[i] = 0;
\r
2306 if ( stream_.deviceBuffer ) {
\r
2307 free( stream_.deviceBuffer );
\r
2308 stream_.deviceBuffer = 0;
\r
2314 void RtApiJack :: closeStream( void )
\r
2316 if ( stream_.state == STREAM_CLOSED ) {
\r
2317 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2318 error( RtAudioError::WARNING );
\r
2322 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2325 if ( stream_.state == STREAM_RUNNING )
\r
2326 jack_deactivate( handle->client );
\r
2328 jack_client_close( handle->client );
\r
2332 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2333 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2334 pthread_cond_destroy( &handle->condition );
\r
2336 stream_.apiHandle = 0;
\r
2339 for ( int i=0; i<2; i++ ) {
\r
2340 if ( stream_.userBuffer[i] ) {
\r
2341 free( stream_.userBuffer[i] );
\r
2342 stream_.userBuffer[i] = 0;
\r
2346 if ( stream_.deviceBuffer ) {
\r
2347 free( stream_.deviceBuffer );
\r
2348 stream_.deviceBuffer = 0;
\r
2351 stream_.mode = UNINITIALIZED;
\r
2352 stream_.state = STREAM_CLOSED;
\r
2355 void RtApiJack :: startStream( void )
\r
2358 if ( stream_.state == STREAM_RUNNING ) {
\r
2359 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2360 error( RtAudioError::WARNING );
\r
2364 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2365 int result = jack_activate( handle->client );
\r
2367 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2371 const char **ports;
\r
2373 // Get the list of available ports.
\r
2374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2376 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2377 if ( ports == NULL) {
\r
2378 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2382 // Now make the port connections. Since RtAudio wasn't designed to
\r
2383 // allow the user to select particular channels of a device, we'll
\r
2384 // just open the first "nChannels" ports with offset.
\r
2385 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2387 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2388 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2391 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2398 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2400 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2401 if ( ports == NULL) {
\r
2402 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2406 // Now make the port connections. See note above.
\r
2407 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2409 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2410 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2413 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2420 handle->drainCounter = 0;
\r
2421 handle->internalDrain = false;
\r
2422 stream_.state = STREAM_RUNNING;
\r
2425 if ( result == 0 ) return;
\r
2426 error( RtAudioError::SYSTEM_ERROR );
\r
2429 void RtApiJack :: stopStream( void )
\r
2432 if ( stream_.state == STREAM_STOPPED ) {
\r
2433 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2434 error( RtAudioError::WARNING );
\r
2438 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2439 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2441 if ( handle->drainCounter == 0 ) {
\r
2442 handle->drainCounter = 2;
\r
2443 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2447 jack_deactivate( handle->client );
\r
2448 stream_.state = STREAM_STOPPED;
\r
2451 void RtApiJack :: abortStream( void )
\r
2454 if ( stream_.state == STREAM_STOPPED ) {
\r
2455 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2456 error( RtAudioError::WARNING );
\r
2460 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2461 handle->drainCounter = 2;
\r
2466 // This function will be called by a spawned thread when the user
\r
2467 // callback function signals that the stream should be stopped or
\r
2468 // aborted. It is necessary to handle it this way because the
\r
2469 // callbackEvent() function must return before the jack_deactivate()
\r
2470 // function will return.
\r
2471 static void *jackStopStream( void *ptr )
\r
2473 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2474 RtApiJack *object = (RtApiJack *) info->object;
\r
2476 object->stopStream();
\r
2477 pthread_exit( NULL );
\r
2480 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2482 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2483 if ( stream_.state == STREAM_CLOSED ) {
\r
2484 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2485 error( RtAudioError::WARNING );
\r
2488 if ( stream_.bufferSize != nframes ) {
\r
2489 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2490 error( RtAudioError::WARNING );
\r
2494 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2495 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2497 // Check if we were draining the stream and signal is finished.
\r
2498 if ( handle->drainCounter > 3 ) {
\r
2499 ThreadHandle threadId;
\r
2501 stream_.state = STREAM_STOPPING;
\r
2502 if ( handle->internalDrain == true )
\r
2503 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2505 pthread_cond_signal( &handle->condition );
\r
2509 // Invoke user callback first, to get fresh output data.
\r
2510 if ( handle->drainCounter == 0 ) {
\r
2511 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2512 double streamTime = getStreamTime();
\r
2513 RtAudioStreamStatus status = 0;
\r
2514 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2515 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2516 handle->xrun[0] = false;
\r
2518 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2519 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2520 handle->xrun[1] = false;
\r
2522 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2523 stream_.bufferSize, streamTime, status, info->userData );
\r
2524 if ( cbReturnValue == 2 ) {
\r
2525 stream_.state = STREAM_STOPPING;
\r
2526 handle->drainCounter = 2;
\r
2528 pthread_create( &id, NULL, jackStopStream, info );
\r
2531 else if ( cbReturnValue == 1 ) {
\r
2532 handle->drainCounter = 1;
\r
2533 handle->internalDrain = true;
\r
2537 jack_default_audio_sample_t *jackbuffer;
\r
2538 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2539 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2541 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2543 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2544 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2545 memset( jackbuffer, 0, bufferBytes );
\r
2549 else if ( stream_.doConvertBuffer[0] ) {
\r
2551 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2553 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2554 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2555 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2558 else { // no buffer conversion
\r
2559 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2560 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2561 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2565 if ( handle->drainCounter ) {
\r
2566 handle->drainCounter++;
\r
2571 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2573 if ( stream_.doConvertBuffer[1] ) {
\r
2574 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2575 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2576 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2578 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2580 else { // no buffer conversion
\r
2581 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2582 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2583 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2589 RtApi::tickStreamTime();
\r
2592 //******************** End of __UNIX_JACK__ *********************//
\r
2595 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2597 // The ASIO API is designed around a callback scheme, so this
\r
2598 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2599 // Jack. The primary constraint with ASIO is that it only allows
\r
2600 // access to a single driver at a time. Thus, it is not possible to
\r
2601 // have more than one simultaneous RtAudio stream.
\r
2603 // This implementation also requires a number of external ASIO files
\r
2604 // and a few global variables. The ASIO callback scheme does not
\r
2605 // allow for the passing of user data, so we must create a global
\r
2606 // pointer to our callbackInfo structure.
\r
2608 // On unix systems, we make use of a pthread condition variable.
\r
2609 // Since there is no equivalent in Windows, I hacked something based
\r
2610 // on information found in
\r
2611 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2613 #include "asiosys.h"
\r
2615 #include "iasiothiscallresolver.h"
\r
2616 #include "asiodrivers.h"
\r
2619 static AsioDrivers drivers;
\r
2620 static ASIOCallbacks asioCallbacks;
\r
2621 static ASIODriverInfo driverInfo;
\r
2622 static CallbackInfo *asioCallbackInfo;
\r
2623 static bool asioXRun;
\r
2625 struct AsioHandle {
\r
2626 int drainCounter; // Tracks callback counts when draining
\r
2627 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2628 ASIOBufferInfo *bufferInfos;
\r
2632 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2635 // Function declarations (definitions at end of section)
\r
2636 static const char* getAsioErrorString( ASIOError result );
\r
2637 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2638 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2640 RtApiAsio :: RtApiAsio()
\r
2642 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2643 // CoInitialize beforehand, but it must be for appartment threading
\r
2644 // (in which case, CoInitilialize will return S_FALSE here).
\r
2645 coInitialized_ = false;
\r
2646 HRESULT hr = CoInitialize( NULL );
\r
2647 if ( FAILED(hr) ) {
\r
2648 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2649 error( RtAudioError::WARNING );
\r
2651 coInitialized_ = true;
\r
2653 drivers.removeCurrentDriver();
\r
2654 driverInfo.asioVersion = 2;
\r
2656 // See note in DirectSound implementation about GetDesktopWindow().
\r
2657 driverInfo.sysRef = GetForegroundWindow();
\r
2660 RtApiAsio :: ~RtApiAsio()
\r
2662 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2663 if ( coInitialized_ ) CoUninitialize();
\r
2666 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2668 return (unsigned int) drivers.asioGetNumDev();
\r
2671 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2673 RtAudio::DeviceInfo info;
\r
2674 info.probed = false;
\r
2677 unsigned int nDevices = getDeviceCount();
\r
2678 if ( nDevices == 0 ) {
\r
2679 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2680 error( RtAudioError::INVALID_USE );
\r
2684 if ( device >= nDevices ) {
\r
2685 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2686 error( RtAudioError::INVALID_USE );
\r
2690 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2691 if ( stream_.state != STREAM_CLOSED ) {
\r
2692 if ( device >= devices_.size() ) {
\r
2693 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2694 error( RtAudioError::WARNING );
\r
2697 return devices_[ device ];
\r
2700 char driverName[32];
\r
2701 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2702 if ( result != ASE_OK ) {
\r
2703 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2704 errorText_ = errorStream_.str();
\r
2705 error( RtAudioError::WARNING );
\r
2709 info.name = driverName;
\r
2711 if ( !drivers.loadDriver( driverName ) ) {
\r
2712 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2713 errorText_ = errorStream_.str();
\r
2714 error( RtAudioError::WARNING );
\r
2718 result = ASIOInit( &driverInfo );
\r
2719 if ( result != ASE_OK ) {
\r
2720 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2721 errorText_ = errorStream_.str();
\r
2722 error( RtAudioError::WARNING );
\r
2726 // Determine the device channel information.
\r
2727 long inputChannels, outputChannels;
\r
2728 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2729 if ( result != ASE_OK ) {
\r
2730 drivers.removeCurrentDriver();
\r
2731 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2732 errorText_ = errorStream_.str();
\r
2733 error( RtAudioError::WARNING );
\r
2737 info.outputChannels = outputChannels;
\r
2738 info.inputChannels = inputChannels;
\r
2739 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2740 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2742 // Determine the supported sample rates.
\r
2743 info.sampleRates.clear();
\r
2744 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2745 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2746 if ( result == ASE_OK )
\r
2747 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2750 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2751 ASIOChannelInfo channelInfo;
\r
2752 channelInfo.channel = 0;
\r
2753 channelInfo.isInput = true;
\r
2754 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2755 result = ASIOGetChannelInfo( &channelInfo );
\r
2756 if ( result != ASE_OK ) {
\r
2757 drivers.removeCurrentDriver();
\r
2758 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2759 errorText_ = errorStream_.str();
\r
2760 error( RtAudioError::WARNING );
\r
2764 info.nativeFormats = 0;
\r
2765 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2766 info.nativeFormats |= RTAUDIO_SINT16;
\r
2767 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2768 info.nativeFormats |= RTAUDIO_SINT32;
\r
2769 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2770 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2771 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2772 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2773 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2774 info.nativeFormats |= RTAUDIO_SINT24;
\r
2776 if ( info.outputChannels > 0 )
\r
2777 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2778 if ( info.inputChannels > 0 )
\r
2779 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2781 info.probed = true;
\r
2782 drivers.removeCurrentDriver();
\r
2786 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2788 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2789 object->callbackEvent( index );
\r
2792 void RtApiAsio :: saveDeviceInfo( void )
\r
2796 unsigned int nDevices = getDeviceCount();
\r
2797 devices_.resize( nDevices );
\r
2798 for ( unsigned int i=0; i<nDevices; i++ )
\r
2799 devices_[i] = getDeviceInfo( i );
\r
2802 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2803 unsigned int firstChannel, unsigned int sampleRate,
\r
2804 RtAudioFormat format, unsigned int *bufferSize,
\r
2805 RtAudio::StreamOptions *options )
\r
2807 // For ASIO, a duplex stream MUST use the same driver.
\r
2808 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2809 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2813 char driverName[32];
\r
2814 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2815 if ( result != ASE_OK ) {
\r
2816 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2817 errorText_ = errorStream_.str();
\r
2821 // Only load the driver once for duplex stream.
\r
2822 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2823 // The getDeviceInfo() function will not work when a stream is open
\r
2824 // because ASIO does not allow multiple devices to run at the same
\r
2825 // time. Thus, we'll probe the system before opening a stream and
\r
2826 // save the results for use by getDeviceInfo().
\r
2827 this->saveDeviceInfo();
\r
2829 if ( !drivers.loadDriver( driverName ) ) {
\r
2830 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2831 errorText_ = errorStream_.str();
\r
2835 result = ASIOInit( &driverInfo );
\r
2836 if ( result != ASE_OK ) {
\r
2837 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2838 errorText_ = errorStream_.str();
\r
2843 // Check the device channel count.
\r
2844 long inputChannels, outputChannels;
\r
2845 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2846 if ( result != ASE_OK ) {
\r
2847 drivers.removeCurrentDriver();
\r
2848 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2849 errorText_ = errorStream_.str();
\r
2853 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2854 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2855 drivers.removeCurrentDriver();
\r
2856 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2857 errorText_ = errorStream_.str();
\r
2860 stream_.nDeviceChannels[mode] = channels;
\r
2861 stream_.nUserChannels[mode] = channels;
\r
2862 stream_.channelOffset[mode] = firstChannel;
\r
2864 // Verify the sample rate is supported.
\r
2865 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2866 if ( result != ASE_OK ) {
\r
2867 drivers.removeCurrentDriver();
\r
2868 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2869 errorText_ = errorStream_.str();
\r
2873 // Get the current sample rate
\r
2874 ASIOSampleRate currentRate;
\r
2875 result = ASIOGetSampleRate( ¤tRate );
\r
2876 if ( result != ASE_OK ) {
\r
2877 drivers.removeCurrentDriver();
\r
2878 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2879 errorText_ = errorStream_.str();
\r
2883 // Set the sample rate only if necessary
\r
2884 if ( currentRate != sampleRate ) {
\r
2885 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2886 if ( result != ASE_OK ) {
\r
2887 drivers.removeCurrentDriver();
\r
2888 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2889 errorText_ = errorStream_.str();
\r
2894 // Determine the driver data type.
\r
2895 ASIOChannelInfo channelInfo;
\r
2896 channelInfo.channel = 0;
\r
2897 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2898 else channelInfo.isInput = true;
\r
2899 result = ASIOGetChannelInfo( &channelInfo );
\r
2900 if ( result != ASE_OK ) {
\r
2901 drivers.removeCurrentDriver();
\r
2902 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2903 errorText_ = errorStream_.str();
\r
2907 // Assuming WINDOWS host is always little-endian.
\r
2908 stream_.doByteSwap[mode] = false;
\r
2909 stream_.userFormat = format;
\r
2910 stream_.deviceFormat[mode] = 0;
\r
2911 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2912 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2913 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2915 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2916 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2917 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2919 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2920 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2921 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2923 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2924 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2925 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2927 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2928 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2929 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2932 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2933 drivers.removeCurrentDriver();
\r
2934 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2935 errorText_ = errorStream_.str();
\r
2939 // Set the buffer size. For a duplex stream, this will end up
\r
2940 // setting the buffer size based on the input constraints, which
\r
2942 long minSize, maxSize, preferSize, granularity;
\r
2943 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2944 if ( result != ASE_OK ) {
\r
2945 drivers.removeCurrentDriver();
\r
2946 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2947 errorText_ = errorStream_.str();
\r
2951 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2952 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2953 else if ( granularity == -1 ) {
\r
2954 // Make sure bufferSize is a power of two.
\r
2955 int log2_of_min_size = 0;
\r
2956 int log2_of_max_size = 0;
\r
2958 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2959 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2960 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2963 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2964 int min_delta_num = log2_of_min_size;
\r
2966 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2967 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2968 if (current_delta < min_delta) {
\r
2969 min_delta = current_delta;
\r
2970 min_delta_num = i;
\r
2974 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2975 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2976 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2978 else if ( granularity != 0 ) {
\r
2979 // Set to an even multiple of granularity, rounding up.
\r
2980 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2983 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2984 drivers.removeCurrentDriver();
\r
2985 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2989 stream_.bufferSize = *bufferSize;
\r
2990 stream_.nBuffers = 2;
\r
2992 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2993 else stream_.userInterleaved = true;
\r
2995 // ASIO always uses non-interleaved buffers.
\r
2996 stream_.deviceInterleaved[mode] = false;
\r
2998 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2999 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3000 if ( handle == 0 ) {
\r
3002 handle = new AsioHandle;
\r
3004 catch ( std::bad_alloc& ) {
\r
3005 //if ( handle == NULL ) {
\r
3006 drivers.removeCurrentDriver();
\r
3007 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3010 handle->bufferInfos = 0;
\r
3012 // Create a manual-reset event.
\r
3013 handle->condition = CreateEvent( NULL, // no security
\r
3014 TRUE, // manual-reset
\r
3015 FALSE, // non-signaled initially
\r
3016 NULL ); // unnamed
\r
3017 stream_.apiHandle = (void *) handle;
\r
3020 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3021 // and output separately, we'll have to dispose of previously
\r
3022 // created output buffers for a duplex stream.
\r
3023 long inputLatency, outputLatency;
\r
3024 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3025 ASIODisposeBuffers();
\r
3026 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3029 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3030 bool buffersAllocated = false;
\r
3031 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3032 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3033 if ( handle->bufferInfos == NULL ) {
\r
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3035 errorText_ = errorStream_.str();
\r
3039 ASIOBufferInfo *infos;
\r
3040 infos = handle->bufferInfos;
\r
3041 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3042 infos->isInput = ASIOFalse;
\r
3043 infos->channelNum = i + stream_.channelOffset[0];
\r
3044 infos->buffers[0] = infos->buffers[1] = 0;
\r
3046 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3047 infos->isInput = ASIOTrue;
\r
3048 infos->channelNum = i + stream_.channelOffset[1];
\r
3049 infos->buffers[0] = infos->buffers[1] = 0;
\r
3052 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3053 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3054 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3055 asioCallbacks.asioMessage = &asioMessages;
\r
3056 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3057 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3058 if ( result != ASE_OK ) {
\r
3059 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3060 errorText_ = errorStream_.str();
\r
3063 buffersAllocated = true;
\r
3065 // Set flags for buffer conversion.
\r
3066 stream_.doConvertBuffer[mode] = false;
\r
3067 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3068 stream_.doConvertBuffer[mode] = true;
\r
3069 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3070 stream_.nUserChannels[mode] > 1 )
\r
3071 stream_.doConvertBuffer[mode] = true;
\r
3073 // Allocate necessary internal buffers
\r
3074 unsigned long bufferBytes;
\r
3075 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3076 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3077 if ( stream_.userBuffer[mode] == NULL ) {
\r
3078 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3082 if ( stream_.doConvertBuffer[mode] ) {
\r
3084 bool makeBuffer = true;
\r
3085 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3086 if ( mode == INPUT ) {
\r
3087 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3088 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3089 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3093 if ( makeBuffer ) {
\r
3094 bufferBytes *= *bufferSize;
\r
3095 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3096 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3097 if ( stream_.deviceBuffer == NULL ) {
\r
3098 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3104 stream_.sampleRate = sampleRate;
\r
3105 stream_.device[mode] = device;
\r
3106 stream_.state = STREAM_STOPPED;
\r
3107 asioCallbackInfo = &stream_.callbackInfo;
\r
3108 stream_.callbackInfo.object = (void *) this;
\r
3109 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3110 // We had already set up an output stream.
\r
3111 stream_.mode = DUPLEX;
\r
3113 stream_.mode = mode;
\r
3115 // Determine device latencies
\r
3116 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3117 if ( result != ASE_OK ) {
\r
3118 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3119 errorText_ = errorStream_.str();
\r
3120 error( RtAudioError::WARNING); // warn but don't fail
\r
3123 stream_.latency[0] = outputLatency;
\r
3124 stream_.latency[1] = inputLatency;
\r
3127 // Setup the buffer conversion information structure. We don't use
\r
3128 // buffers to do channel offsets, so we override that parameter
\r
3130 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3135 if ( buffersAllocated )
\r
3136 ASIODisposeBuffers();
\r
3137 drivers.removeCurrentDriver();
\r
3140 CloseHandle( handle->condition );
\r
3141 if ( handle->bufferInfos )
\r
3142 free( handle->bufferInfos );
\r
3144 stream_.apiHandle = 0;
\r
3147 for ( int i=0; i<2; i++ ) {
\r
3148 if ( stream_.userBuffer[i] ) {
\r
3149 free( stream_.userBuffer[i] );
\r
3150 stream_.userBuffer[i] = 0;
\r
3154 if ( stream_.deviceBuffer ) {
\r
3155 free( stream_.deviceBuffer );
\r
3156 stream_.deviceBuffer = 0;
\r
3162 void RtApiAsio :: closeStream()
\r
3164 if ( stream_.state == STREAM_CLOSED ) {
\r
3165 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3166 error( RtAudioError::WARNING );
\r
3170 if ( stream_.state == STREAM_RUNNING ) {
\r
3171 stream_.state = STREAM_STOPPED;
\r
3174 ASIODisposeBuffers();
\r
3175 drivers.removeCurrentDriver();
\r
3177 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3179 CloseHandle( handle->condition );
\r
3180 if ( handle->bufferInfos )
\r
3181 free( handle->bufferInfos );
\r
3183 stream_.apiHandle = 0;
\r
3186 for ( int i=0; i<2; i++ ) {
\r
3187 if ( stream_.userBuffer[i] ) {
\r
3188 free( stream_.userBuffer[i] );
\r
3189 stream_.userBuffer[i] = 0;
\r
3193 if ( stream_.deviceBuffer ) {
\r
3194 free( stream_.deviceBuffer );
\r
3195 stream_.deviceBuffer = 0;
\r
3198 stream_.mode = UNINITIALIZED;
\r
3199 stream_.state = STREAM_CLOSED;
\r
3202 bool stopThreadCalled = false;
\r
3204 void RtApiAsio :: startStream()
\r
3207 if ( stream_.state == STREAM_RUNNING ) {
\r
3208 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3209 error( RtAudioError::WARNING );
\r
3213 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3214 ASIOError result = ASIOStart();
\r
3215 if ( result != ASE_OK ) {
\r
3216 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3217 errorText_ = errorStream_.str();
\r
3221 handle->drainCounter = 0;
\r
3222 handle->internalDrain = false;
\r
3223 ResetEvent( handle->condition );
\r
3224 stream_.state = STREAM_RUNNING;
\r
3228 stopThreadCalled = false;
\r
3230 if ( result == ASE_OK ) return;
\r
3231 error( RtAudioError::SYSTEM_ERROR );
\r
3234 void RtApiAsio :: stopStream()
\r
3237 if ( stream_.state == STREAM_STOPPED ) {
\r
3238 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3239 error( RtAudioError::WARNING );
\r
3243 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3244 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3245 if ( handle->drainCounter == 0 ) {
\r
3246 handle->drainCounter = 2;
\r
3247 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3251 stream_.state = STREAM_STOPPED;
\r
3253 ASIOError result = ASIOStop();
\r
3254 if ( result != ASE_OK ) {
\r
3255 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3256 errorText_ = errorStream_.str();
\r
3259 if ( result == ASE_OK ) return;
\r
3260 error( RtAudioError::SYSTEM_ERROR );
\r
3263 void RtApiAsio :: abortStream()
\r
3266 if ( stream_.state == STREAM_STOPPED ) {
\r
3267 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3268 error( RtAudioError::WARNING );
\r
3272 // The following lines were commented-out because some behavior was
\r
3273 // noted where the device buffers need to be zeroed to avoid
\r
3274 // continuing sound, even when the device buffers are completely
\r
3275 // disposed. So now, calling abort is the same as calling stop.
\r
3276 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3277 // handle->drainCounter = 2;
\r
3281 // This function will be called by a spawned thread when the user
\r
3282 // callback function signals that the stream should be stopped or
\r
3283 // aborted. It is necessary to handle it this way because the
\r
3284 // callbackEvent() function must return before the ASIOStop()
\r
3285 // function will return.
\r
3286 static unsigned __stdcall asioStopStream( void *ptr )
\r
3288 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3289 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3291 object->stopStream();
\r
3292 _endthreadex( 0 );
\r
3296 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3298 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3299 if ( stream_.state == STREAM_CLOSED ) {
\r
3300 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3301 error( RtAudioError::WARNING );
\r
3305 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3306 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3308 // Check if we were draining the stream and signal if finished.
\r
3309 if ( handle->drainCounter > 3 ) {
\r
3311 stream_.state = STREAM_STOPPING;
\r
3312 if ( handle->internalDrain == false )
\r
3313 SetEvent( handle->condition );
\r
3314 else { // spawn a thread to stop the stream
\r
3315 unsigned threadId;
\r
3316 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3317 &stream_.callbackInfo, 0, &threadId );
\r
3322 // Invoke user callback to get fresh output data UNLESS we are
\r
3323 // draining stream.
\r
3324 if ( handle->drainCounter == 0 ) {
\r
3325 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3326 double streamTime = getStreamTime();
\r
3327 RtAudioStreamStatus status = 0;
\r
3328 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3329 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3332 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3333 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3336 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3337 stream_.bufferSize, streamTime, status, info->userData );
\r
3338 if ( cbReturnValue == 2 ) {
\r
3339 stream_.state = STREAM_STOPPING;
\r
3340 handle->drainCounter = 2;
\r
3341 unsigned threadId;
\r
3342 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3343 &stream_.callbackInfo, 0, &threadId );
\r
3346 else if ( cbReturnValue == 1 ) {
\r
3347 handle->drainCounter = 1;
\r
3348 handle->internalDrain = true;
\r
3352 unsigned int nChannels, bufferBytes, i, j;
\r
3353 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3356 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3358 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3360 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3361 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3362 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3366 else if ( stream_.doConvertBuffer[0] ) {
\r
3368 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3369 if ( stream_.doByteSwap[0] )
\r
3370 byteSwapBuffer( stream_.deviceBuffer,
\r
3371 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3372 stream_.deviceFormat[0] );
\r
3374 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3375 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3376 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3377 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3383 if ( stream_.doByteSwap[0] )
\r
3384 byteSwapBuffer( stream_.userBuffer[0],
\r
3385 stream_.bufferSize * stream_.nUserChannels[0],
\r
3386 stream_.userFormat );
\r
3388 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3389 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3390 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3391 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3396 if ( handle->drainCounter ) {
\r
3397 handle->drainCounter++;
\r
3402 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3404 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3406 if (stream_.doConvertBuffer[1]) {
\r
3408 // Always interleave ASIO input data.
\r
3409 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3410 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3411 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3412 handle->bufferInfos[i].buffers[bufferIndex],
\r
3416 if ( stream_.doByteSwap[1] )
\r
3417 byteSwapBuffer( stream_.deviceBuffer,
\r
3418 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3419 stream_.deviceFormat[1] );
\r
3420 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3424 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3425 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3426 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3427 handle->bufferInfos[i].buffers[bufferIndex],
\r
3432 if ( stream_.doByteSwap[1] )
\r
3433 byteSwapBuffer( stream_.userBuffer[1],
\r
3434 stream_.bufferSize * stream_.nUserChannels[1],
\r
3435 stream_.userFormat );
\r
3440 // The following call was suggested by Malte Clasen. While the API
\r
3441 // documentation indicates it should not be required, some device
\r
3442 // drivers apparently do not function correctly without it.
\r
3443 ASIOOutputReady();
\r
3445 RtApi::tickStreamTime();
\r
3449 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3451 // The ASIO documentation says that this usually only happens during
\r
3452 // external sync. Audio processing is not stopped by the driver,
\r
3453 // actual sample rate might not have even changed, maybe only the
\r
3454 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3457 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3459 object->stopStream();
\r
3461 catch ( RtAudioError &exception ) {
\r
3462 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3466 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3469 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3473 switch( selector ) {
\r
3474 case kAsioSelectorSupported:
\r
3475 if ( value == kAsioResetRequest
\r
3476 || value == kAsioEngineVersion
\r
3477 || value == kAsioResyncRequest
\r
3478 || value == kAsioLatenciesChanged
\r
3479 // The following three were added for ASIO 2.0, you don't
\r
3480 // necessarily have to support them.
\r
3481 || value == kAsioSupportsTimeInfo
\r
3482 || value == kAsioSupportsTimeCode
\r
3483 || value == kAsioSupportsInputMonitor)
\r
3486 case kAsioResetRequest:
\r
3487 // Defer the task and perform the reset of the driver during the
\r
3488 // next "safe" situation. You cannot reset the driver right now,
\r
3489 // as this code is called from the driver. Reset the driver is
\r
3490 // done by completely destruct is. I.e. ASIOStop(),
\r
3491 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3493 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3496 case kAsioResyncRequest:
\r
3497 // This informs the application that the driver encountered some
\r
3498 // non-fatal data loss. It is used for synchronization purposes
\r
3499 // of different media. Added mainly to work around the Win16Mutex
\r
3500 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3501 // which could lose data because the Mutex was held too long by
\r
3502 // another thread. However a driver can issue it in other
\r
3503 // situations, too.
\r
3504 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3508 case kAsioLatenciesChanged:
\r
3509 // This will inform the host application that the drivers were
\r
3510 // latencies changed. Beware, it this does not mean that the
\r
3511 // buffer sizes have changed! You might need to update internal
\r
3513 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3516 case kAsioEngineVersion:
\r
3517 // Return the supported ASIO version of the host application. If
\r
3518 // a host application does not implement this selector, ASIO 1.0
\r
3519 // is assumed by the driver.
\r
3522 case kAsioSupportsTimeInfo:
\r
3523 // Informs the driver whether the
\r
3524 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3525 // For compatibility with ASIO 1.0 drivers the host application
\r
3526 // should always support the "old" bufferSwitch method, too.
\r
3529 case kAsioSupportsTimeCode:
\r
3530 // Informs the driver whether application is interested in time
\r
3531 // code info. If an application does not need to know about time
\r
3532 // code, the driver has less work to do.
\r
3539 static const char* getAsioErrorString( ASIOError result )
\r
3544 const char*message;
\r
3547 static const Messages m[] =
\r
3549 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3550 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3551 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3552 { ASE_InvalidMode, "Invalid mode." },
\r
3553 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3554 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3555 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3558 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3559 if ( m[i].value == result ) return m[i].message;
\r
3561 return "Unknown error.";
\r
3563 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3567 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3569 // Modified by Robin Davies, October 2005
\r
3570 // - Improvements to DirectX pointer chasing.
\r
3571 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3572 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3573 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3574 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3576 #include <dsound.h>
\r
3577 #include <assert.h>
\r
3578 #include <algorithm>
\r
3580 #if defined(__MINGW32__)
\r
3581 // missing from latest mingw winapi
\r
3582 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3583 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3584 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3585 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3588 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3590 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3591 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3594 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3596 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3597 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3598 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3599 return pointer >= earlierPointer && pointer < laterPointer;
\r
3602 // A structure to hold various information related to the DirectSound
\r
3603 // API implementation.
\r
3605 unsigned int drainCounter; // Tracks callback counts when draining
\r
3606 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3610 UINT bufferPointer[2];
\r
3611 DWORD dsBufferSize[2];
\r
3612 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3616 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3619 // Declarations for utility functions, callbacks, and structures
\r
3620 // specific to the DirectSound implementation.
\r
3621 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3622 LPCTSTR description,
\r
3624 LPVOID lpContext );
\r
3626 static const char* getErrorString( int code );
\r
3628 static unsigned __stdcall callbackHandler( void *ptr );
\r
3637 : found(false) { validId[0] = false; validId[1] = false; }
\r
3640 struct DsProbeData {
\r
3642 std::vector<struct DsDevice>* dsDevices;
\r
3645 RtApiDs :: RtApiDs()
\r
3647 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3648 // accept whatever the mainline chose for a threading model.
\r
3649 coInitialized_ = false;
\r
3650 HRESULT hr = CoInitialize( NULL );
\r
3651 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3654 RtApiDs :: ~RtApiDs()
\r
3656 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3657 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3660 // The DirectSound default output is always the first device.
\r
3661 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3666 // The DirectSound default input is always the first input device,
\r
3667 // which is the first capture device enumerated.
\r
3668 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3673 unsigned int RtApiDs :: getDeviceCount( void )
\r
3675 // Set query flag for previously found devices to false, so that we
\r
3676 // can check for any devices that have disappeared.
\r
3677 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3678 dsDevices[i].found = false;
\r
3680 // Query DirectSound devices.
\r
3681 struct DsProbeData probeInfo;
\r
3682 probeInfo.isInput = false;
\r
3683 probeInfo.dsDevices = &dsDevices;
\r
3684 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3685 if ( FAILED( result ) ) {
\r
3686 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3687 errorText_ = errorStream_.str();
\r
3688 error( RtAudioError::WARNING );
\r
3691 // Query DirectSoundCapture devices.
\r
3692 probeInfo.isInput = true;
\r
3693 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3694 if ( FAILED( result ) ) {
\r
3695 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3696 errorText_ = errorStream_.str();
\r
3697 error( RtAudioError::WARNING );
\r
3700 // Clean out any devices that may have disappeared.
\r
3701 std::vector< int > indices;
\r
3702 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3703 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3704 unsigned int nErased = 0;
\r
3705 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3706 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3708 return static_cast<unsigned int>(dsDevices.size());
\r
3711 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3713 RtAudio::DeviceInfo info;
\r
3714 info.probed = false;
\r
3716 if ( dsDevices.size() == 0 ) {
\r
3717 // Force a query of all devices
\r
3719 if ( dsDevices.size() == 0 ) {
\r
3720 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3721 error( RtAudioError::INVALID_USE );
\r
3726 if ( device >= dsDevices.size() ) {
\r
3727 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3728 error( RtAudioError::INVALID_USE );
\r
3733 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3735 LPDIRECTSOUND output;
\r
3737 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3738 if ( FAILED( result ) ) {
\r
3739 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3740 errorText_ = errorStream_.str();
\r
3741 error( RtAudioError::WARNING );
\r
3745 outCaps.dwSize = sizeof( outCaps );
\r
3746 result = output->GetCaps( &outCaps );
\r
3747 if ( FAILED( result ) ) {
\r
3748 output->Release();
\r
3749 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3750 errorText_ = errorStream_.str();
\r
3751 error( RtAudioError::WARNING );
\r
3755 // Get output channel information.
\r
3756 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3758 // Get sample rate information.
\r
3759 info.sampleRates.clear();
\r
3760 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3761 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3762 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3763 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3766 // Get format information.
\r
3767 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3768 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3770 output->Release();
\r
3772 if ( getDefaultOutputDevice() == device )
\r
3773 info.isDefaultOutput = true;
\r
3775 if ( dsDevices[ device ].validId[1] == false ) {
\r
3776 info.name = dsDevices[ device ].name;
\r
3777 info.probed = true;
\r
3783 LPDIRECTSOUNDCAPTURE input;
\r
3784 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3785 if ( FAILED( result ) ) {
\r
3786 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3787 errorText_ = errorStream_.str();
\r
3788 error( RtAudioError::WARNING );
\r
3793 inCaps.dwSize = sizeof( inCaps );
\r
3794 result = input->GetCaps( &inCaps );
\r
3795 if ( FAILED( result ) ) {
\r
3797 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3798 errorText_ = errorStream_.str();
\r
3799 error( RtAudioError::WARNING );
\r
3803 // Get input channel information.
\r
3804 info.inputChannels = inCaps.dwChannels;
\r
3806 // Get sample rate and format information.
\r
3807 std::vector<unsigned int> rates;
\r
3808 if ( inCaps.dwChannels >= 2 ) {
\r
3809 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3813 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3814 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3815 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3818 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3819 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3820 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3821 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3822 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3824 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3825 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3826 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3827 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3828 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3831 else if ( inCaps.dwChannels == 1 ) {
\r
3832 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3833 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3834 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3835 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3836 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3837 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3838 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3839 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3841 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3842 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3843 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3844 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3845 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3847 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3848 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3849 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3850 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3851 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3854 else info.inputChannels = 0; // technically, this would be an error
\r
3858 if ( info.inputChannels == 0 ) return info;
\r
3860 // Copy the supported rates to the info structure but avoid duplication.
\r
3862 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3864 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3865 if ( rates[i] == info.sampleRates[j] ) {
\r
3870 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3872 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3874 // If device opens for both playback and capture, we determine the channels.
\r
3875 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3876 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3878 if ( device == 0 ) info.isDefaultInput = true;
\r
3880 // Copy name and return.
\r
3881 info.name = dsDevices[ device ].name;
\r
3882 info.probed = true;
\r
3886 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3887 unsigned int firstChannel, unsigned int sampleRate,
\r
3888 RtAudioFormat format, unsigned int *bufferSize,
\r
3889 RtAudio::StreamOptions *options )
\r
3891 if ( channels + firstChannel > 2 ) {
\r
3892 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3896 size_t nDevices = dsDevices.size();
\r
3897 if ( nDevices == 0 ) {
\r
3898 // This should not happen because a check is made before this function is called.
\r
3899 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3903 if ( device >= nDevices ) {
\r
3904 // This should not happen because a check is made before this function is called.
\r
3905 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3909 if ( mode == OUTPUT ) {
\r
3910 if ( dsDevices[ device ].validId[0] == false ) {
\r
3911 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3912 errorText_ = errorStream_.str();
\r
3916 else { // mode == INPUT
\r
3917 if ( dsDevices[ device ].validId[1] == false ) {
\r
3918 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3919 errorText_ = errorStream_.str();
\r
3924 // According to a note in PortAudio, using GetDesktopWindow()
\r
3925 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3926 // that occur when the application's window is not the foreground
\r
3927 // window. Also, if the application window closes before the
\r
3928 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3929 // problems when using GetDesktopWindow() but it seems fine now
\r
3930 // (January 2010). I'll leave it commented here.
\r
3931 // HWND hWnd = GetForegroundWindow();
\r
3932 HWND hWnd = GetDesktopWindow();
\r
3934 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3935 // two. This is a judgement call and a value of two is probably too
\r
3936 // low for capture, but it should work for playback.
\r
3938 if ( options ) nBuffers = options->numberOfBuffers;
\r
3939 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3940 if ( nBuffers < 2 ) nBuffers = 3;
\r
3942 // Check the lower range of the user-specified buffer size and set
\r
3943 // (arbitrarily) to a lower bound of 32.
\r
3944 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3946 // Create the wave format structure. The data format setting will
\r
3947 // be determined later.
\r
3948 WAVEFORMATEX waveFormat;
\r
3949 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3950 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3951 waveFormat.nChannels = channels + firstChannel;
\r
3952 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3954 // Determine the device buffer size. By default, we'll use the value
\r
3955 // defined above (32K), but we will grow it to make allowances for
\r
3956 // very large software buffer sizes.
\r
3957 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3958 DWORD dsPointerLeadTime = 0;
\r
3960 void *ohandle = 0, *bhandle = 0;
\r
3962 if ( mode == OUTPUT ) {
\r
3964 LPDIRECTSOUND output;
\r
3965 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3966 if ( FAILED( result ) ) {
\r
3967 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3968 errorText_ = errorStream_.str();
\r
3973 outCaps.dwSize = sizeof( outCaps );
\r
3974 result = output->GetCaps( &outCaps );
\r
3975 if ( FAILED( result ) ) {
\r
3976 output->Release();
\r
3977 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3978 errorText_ = errorStream_.str();
\r
3982 // Check channel information.
\r
3983 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3984 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3985 errorText_ = errorStream_.str();
\r
3989 // Check format information. Use 16-bit format unless not
\r
3990 // supported or user requests 8-bit.
\r
3991 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3992 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3993 waveFormat.wBitsPerSample = 16;
\r
3994 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3997 waveFormat.wBitsPerSample = 8;
\r
3998 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4000 stream_.userFormat = format;
\r
4002 // Update wave format structure and buffer information.
\r
4003 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4004 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4005 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4007 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4008 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4009 dsBufferSize *= 2;
\r
4011 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
4012 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
4013 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
4014 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
4015 if ( FAILED( result ) ) {
\r
4016 output->Release();
\r
4017 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
4018 errorText_ = errorStream_.str();
\r
4022 // Even though we will write to the secondary buffer, we need to
\r
4023 // access the primary buffer to set the correct output format
\r
4024 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
4025 // buffer description.
\r
4026 DSBUFFERDESC bufferDescription;
\r
4027 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4028 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4029 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
4031 // Obtain the primary buffer
\r
4032 LPDIRECTSOUNDBUFFER buffer;
\r
4033 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4034 if ( FAILED( result ) ) {
\r
4035 output->Release();
\r
4036 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4037 errorText_ = errorStream_.str();
\r
4041 // Set the primary DS buffer sound format.
\r
4042 result = buffer->SetFormat( &waveFormat );
\r
4043 if ( FAILED( result ) ) {
\r
4044 output->Release();
\r
4045 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4046 errorText_ = errorStream_.str();
\r
4050 // Setup the secondary DS buffer description.
\r
4051 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4052 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4053 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4054 DSBCAPS_GLOBALFOCUS |
\r
4055 DSBCAPS_GETCURRENTPOSITION2 |
\r
4056 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4057 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4058 bufferDescription.lpwfxFormat = &waveFormat;
\r
4060 // Try to create the secondary DS buffer. If that doesn't work,
\r
4061 // try to use software mixing. Otherwise, there's a problem.
\r
4062 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4063 if ( FAILED( result ) ) {
\r
4064 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4065 DSBCAPS_GLOBALFOCUS |
\r
4066 DSBCAPS_GETCURRENTPOSITION2 |
\r
4067 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4068 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4069 if ( FAILED( result ) ) {
\r
4070 output->Release();
\r
4071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4072 errorText_ = errorStream_.str();
\r
4077 // Get the buffer size ... might be different from what we specified.
\r
4079 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4080 result = buffer->GetCaps( &dsbcaps );
\r
4081 if ( FAILED( result ) ) {
\r
4082 output->Release();
\r
4083 buffer->Release();
\r
4084 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4085 errorText_ = errorStream_.str();
\r
4089 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4091 // Lock the DS buffer
\r
4094 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4095 if ( FAILED( result ) ) {
\r
4096 output->Release();
\r
4097 buffer->Release();
\r
4098 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4099 errorText_ = errorStream_.str();
\r
4103 // Zero the DS buffer
\r
4104 ZeroMemory( audioPtr, dataLen );
\r
4106 // Unlock the DS buffer
\r
4107 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4108 if ( FAILED( result ) ) {
\r
4109 output->Release();
\r
4110 buffer->Release();
\r
4111 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4112 errorText_ = errorStream_.str();
\r
4116 ohandle = (void *) output;
\r
4117 bhandle = (void *) buffer;
\r
4120 if ( mode == INPUT ) {
\r
4122 LPDIRECTSOUNDCAPTURE input;
\r
4123 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4124 if ( FAILED( result ) ) {
\r
4125 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4126 errorText_ = errorStream_.str();
\r
4131 inCaps.dwSize = sizeof( inCaps );
\r
4132 result = input->GetCaps( &inCaps );
\r
4133 if ( FAILED( result ) ) {
\r
4135 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4136 errorText_ = errorStream_.str();
\r
4140 // Check channel information.
\r
4141 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4142 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4146 // Check format information. Use 16-bit format unless user
\r
4147 // requests 8-bit.
\r
4148 DWORD deviceFormats;
\r
4149 if ( channels + firstChannel == 2 ) {
\r
4150 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4151 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4152 waveFormat.wBitsPerSample = 8;
\r
4153 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4155 else { // assume 16-bit is supported
\r
4156 waveFormat.wBitsPerSample = 16;
\r
4157 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4160 else { // channel == 1
\r
4161 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4162 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4163 waveFormat.wBitsPerSample = 8;
\r
4164 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4166 else { // assume 16-bit is supported
\r
4167 waveFormat.wBitsPerSample = 16;
\r
4168 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4171 stream_.userFormat = format;
\r
4173 // Update wave format structure and buffer information.
\r
4174 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4175 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4176 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4178 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4179 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4180 dsBufferSize *= 2;
\r
4182 // Setup the secondary DS buffer description.
\r
4183 DSCBUFFERDESC bufferDescription;
\r
4184 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4185 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4186 bufferDescription.dwFlags = 0;
\r
4187 bufferDescription.dwReserved = 0;
\r
4188 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4189 bufferDescription.lpwfxFormat = &waveFormat;
\r
4191 // Create the capture buffer.
\r
4192 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4193 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4194 if ( FAILED( result ) ) {
\r
4196 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4197 errorText_ = errorStream_.str();
\r
4201 // Get the buffer size ... might be different from what we specified.
\r
4202 DSCBCAPS dscbcaps;
\r
4203 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4204 result = buffer->GetCaps( &dscbcaps );
\r
4205 if ( FAILED( result ) ) {
\r
4207 buffer->Release();
\r
4208 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4209 errorText_ = errorStream_.str();
\r
4213 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4215 // NOTE: We could have a problem here if this is a duplex stream
\r
4216 // and the play and capture hardware buffer sizes are different
\r
4217 // (I'm actually not sure if that is a problem or not).
\r
4218 // Currently, we are not verifying that.
\r
4220 // Lock the capture buffer
\r
4223 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4224 if ( FAILED( result ) ) {
\r
4226 buffer->Release();
\r
4227 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4228 errorText_ = errorStream_.str();
\r
4232 // Zero the buffer
\r
4233 ZeroMemory( audioPtr, dataLen );
\r
4235 // Unlock the buffer
\r
4236 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4237 if ( FAILED( result ) ) {
\r
4239 buffer->Release();
\r
4240 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4241 errorText_ = errorStream_.str();
\r
4245 ohandle = (void *) input;
\r
4246 bhandle = (void *) buffer;
\r
4249 // Set various stream parameters
\r
4250 DsHandle *handle = 0;
\r
4251 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4252 stream_.nUserChannels[mode] = channels;
\r
4253 stream_.bufferSize = *bufferSize;
\r
4254 stream_.channelOffset[mode] = firstChannel;
\r
4255 stream_.deviceInterleaved[mode] = true;
\r
4256 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4257 else stream_.userInterleaved = true;
\r
4259 // Set flag for buffer conversion
\r
4260 stream_.doConvertBuffer[mode] = false;
\r
4261 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4262 stream_.doConvertBuffer[mode] = true;
\r
4263 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4264 stream_.doConvertBuffer[mode] = true;
\r
4265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4266 stream_.nUserChannels[mode] > 1 )
\r
4267 stream_.doConvertBuffer[mode] = true;
\r
4269 // Allocate necessary internal buffers
\r
4270 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4271 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4272 if ( stream_.userBuffer[mode] == NULL ) {
\r
4273 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4277 if ( stream_.doConvertBuffer[mode] ) {
\r
4279 bool makeBuffer = true;
\r
4280 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4281 if ( mode == INPUT ) {
\r
4282 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4283 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4284 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4288 if ( makeBuffer ) {
\r
4289 bufferBytes *= *bufferSize;
\r
4290 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4291 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4292 if ( stream_.deviceBuffer == NULL ) {
\r
4293 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4299 // Allocate our DsHandle structures for the stream.
\r
4300 if ( stream_.apiHandle == 0 ) {
\r
4302 handle = new DsHandle;
\r
4304 catch ( std::bad_alloc& ) {
\r
4305 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4309 // Create a manual-reset event.
\r
4310 handle->condition = CreateEvent( NULL, // no security
\r
4311 TRUE, // manual-reset
\r
4312 FALSE, // non-signaled initially
\r
4313 NULL ); // unnamed
\r
4314 stream_.apiHandle = (void *) handle;
\r
4317 handle = (DsHandle *) stream_.apiHandle;
\r
4318 handle->id[mode] = ohandle;
\r
4319 handle->buffer[mode] = bhandle;
\r
4320 handle->dsBufferSize[mode] = dsBufferSize;
\r
4321 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4323 stream_.device[mode] = device;
\r
4324 stream_.state = STREAM_STOPPED;
\r
4325 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4326 // We had already set up an output stream.
\r
4327 stream_.mode = DUPLEX;
\r
4329 stream_.mode = mode;
\r
4330 stream_.nBuffers = nBuffers;
\r
4331 stream_.sampleRate = sampleRate;
\r
4333 // Setup the buffer conversion information structure.
\r
4334 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4336 // Setup the callback thread.
\r
4337 if ( stream_.callbackInfo.isRunning == false ) {
\r
4338 unsigned threadId;
\r
4339 stream_.callbackInfo.isRunning = true;
\r
4340 stream_.callbackInfo.object = (void *) this;
\r
4341 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4342 &stream_.callbackInfo, 0, &threadId );
\r
4343 if ( stream_.callbackInfo.thread == 0 ) {
\r
4344 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4348 // Boost DS thread priority
\r
4349 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4355 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4356 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4357 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4358 if ( buffer ) buffer->Release();
\r
4359 object->Release();
\r
4361 if ( handle->buffer[1] ) {
\r
4362 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4363 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4364 if ( buffer ) buffer->Release();
\r
4365 object->Release();
\r
4367 CloseHandle( handle->condition );
\r
4369 stream_.apiHandle = 0;
\r
4372 for ( int i=0; i<2; i++ ) {
\r
4373 if ( stream_.userBuffer[i] ) {
\r
4374 free( stream_.userBuffer[i] );
\r
4375 stream_.userBuffer[i] = 0;
\r
4379 if ( stream_.deviceBuffer ) {
\r
4380 free( stream_.deviceBuffer );
\r
4381 stream_.deviceBuffer = 0;
\r
4384 stream_.state = STREAM_CLOSED;
\r
4388 void RtApiDs :: closeStream()
\r
4390 if ( stream_.state == STREAM_CLOSED ) {
\r
4391 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4392 error( RtAudioError::WARNING );
\r
4396 // Stop the callback thread.
\r
4397 stream_.callbackInfo.isRunning = false;
\r
4398 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4399 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4401 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4403 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4404 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4405 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4408 buffer->Release();
\r
4410 object->Release();
\r
4412 if ( handle->buffer[1] ) {
\r
4413 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4414 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4417 buffer->Release();
\r
4419 object->Release();
\r
4421 CloseHandle( handle->condition );
\r
4423 stream_.apiHandle = 0;
\r
4426 for ( int i=0; i<2; i++ ) {
\r
4427 if ( stream_.userBuffer[i] ) {
\r
4428 free( stream_.userBuffer[i] );
\r
4429 stream_.userBuffer[i] = 0;
\r
4433 if ( stream_.deviceBuffer ) {
\r
4434 free( stream_.deviceBuffer );
\r
4435 stream_.deviceBuffer = 0;
\r
4438 stream_.mode = UNINITIALIZED;
\r
4439 stream_.state = STREAM_CLOSED;
\r
4442 void RtApiDs :: startStream()
\r
4445 if ( stream_.state == STREAM_RUNNING ) {
\r
4446 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4447 error( RtAudioError::WARNING );
\r
4451 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4453 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4454 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4455 // this is already in effect.
\r
4456 timeBeginPeriod( 1 );
\r
4458 buffersRolling = false;
\r
4459 duplexPrerollBytes = 0;
\r
4461 if ( stream_.mode == DUPLEX ) {
\r
4462 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4463 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4466 HRESULT result = 0;
\r
4467 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4469 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4470 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4471 if ( FAILED( result ) ) {
\r
4472 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4473 errorText_ = errorStream_.str();
\r
4478 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4480 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4481 result = buffer->Start( DSCBSTART_LOOPING );
\r
4482 if ( FAILED( result ) ) {
\r
4483 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4484 errorText_ = errorStream_.str();
\r
4489 handle->drainCounter = 0;
\r
4490 handle->internalDrain = false;
\r
4491 ResetEvent( handle->condition );
\r
4492 stream_.state = STREAM_RUNNING;
\r
4495 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4498 void RtApiDs :: stopStream()
\r
4501 if ( stream_.state == STREAM_STOPPED ) {
\r
4502 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4503 error( RtAudioError::WARNING );
\r
4507 HRESULT result = 0;
\r
4510 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4511 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4512 if ( handle->drainCounter == 0 ) {
\r
4513 handle->drainCounter = 2;
\r
4514 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4517 stream_.state = STREAM_STOPPED;
\r
4519 // Stop the buffer and clear memory
\r
4520 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4521 result = buffer->Stop();
\r
4522 if ( FAILED( result ) ) {
\r
4523 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4524 errorText_ = errorStream_.str();
\r
4528 // Lock the buffer and clear it so that if we start to play again,
\r
4529 // we won't have old data playing.
\r
4530 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4531 if ( FAILED( result ) ) {
\r
4532 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4533 errorText_ = errorStream_.str();
\r
4537 // Zero the DS buffer
\r
4538 ZeroMemory( audioPtr, dataLen );
\r
4540 // Unlock the DS buffer
\r
4541 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4542 if ( FAILED( result ) ) {
\r
4543 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4544 errorText_ = errorStream_.str();
\r
4548 // If we start playing again, we must begin at beginning of buffer.
\r
4549 handle->bufferPointer[0] = 0;
\r
4552 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4553 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4557 stream_.state = STREAM_STOPPED;
\r
4559 result = buffer->Stop();
\r
4560 if ( FAILED( result ) ) {
\r
4561 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4562 errorText_ = errorStream_.str();
\r
4566 // Lock the buffer and clear it so that if we start to play again,
\r
4567 // we won't have old data playing.
\r
4568 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4569 if ( FAILED( result ) ) {
\r
4570 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4571 errorText_ = errorStream_.str();
\r
4575 // Zero the DS buffer
\r
4576 ZeroMemory( audioPtr, dataLen );
\r
4578 // Unlock the DS buffer
\r
4579 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4580 if ( FAILED( result ) ) {
\r
4581 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4582 errorText_ = errorStream_.str();
\r
4586 // If we start recording again, we must begin at beginning of buffer.
\r
4587 handle->bufferPointer[1] = 0;
\r
4591 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4592 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4595 void RtApiDs :: abortStream()
\r
4598 if ( stream_.state == STREAM_STOPPED ) {
\r
4599 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4600 error( RtAudioError::WARNING );
\r
4604 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4605 handle->drainCounter = 2;
\r
4610 void RtApiDs :: callbackEvent()
\r
4612 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4613 Sleep( 50 ); // sleep 50 milliseconds
\r
4617 if ( stream_.state == STREAM_CLOSED ) {
\r
4618 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4619 error( RtAudioError::WARNING );
\r
4623 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4624 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4626 // Check if we were draining the stream and signal is finished.
\r
4627 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4629 stream_.state = STREAM_STOPPING;
\r
4630 if ( handle->internalDrain == false )
\r
4631 SetEvent( handle->condition );
\r
4637 // Invoke user callback to get fresh output data UNLESS we are
\r
4638 // draining stream.
\r
4639 if ( handle->drainCounter == 0 ) {
\r
4640 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4641 double streamTime = getStreamTime();
\r
4642 RtAudioStreamStatus status = 0;
\r
4643 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4644 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4645 handle->xrun[0] = false;
\r
4647 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4648 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4649 handle->xrun[1] = false;
\r
4651 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4652 stream_.bufferSize, streamTime, status, info->userData );
\r
4653 if ( cbReturnValue == 2 ) {
\r
4654 stream_.state = STREAM_STOPPING;
\r
4655 handle->drainCounter = 2;
\r
4659 else if ( cbReturnValue == 1 ) {
\r
4660 handle->drainCounter = 1;
\r
4661 handle->internalDrain = true;
\r
4666 DWORD currentWritePointer, safeWritePointer;
\r
4667 DWORD currentReadPointer, safeReadPointer;
\r
4668 UINT nextWritePointer;
\r
4670 LPVOID buffer1 = NULL;
\r
4671 LPVOID buffer2 = NULL;
\r
4672 DWORD bufferSize1 = 0;
\r
4673 DWORD bufferSize2 = 0;
\r
4678 if ( buffersRolling == false ) {
\r
4679 if ( stream_.mode == DUPLEX ) {
\r
4680 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4682 // It takes a while for the devices to get rolling. As a result,
\r
4683 // there's no guarantee that the capture and write device pointers
\r
4684 // will move in lockstep. Wait here for both devices to start
\r
4685 // rolling, and then set our buffer pointers accordingly.
\r
4686 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4687 // bytes later than the write buffer.
\r
4689 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4690 // take place between the two GetCurrentPosition calls... but I'm
\r
4691 // really not sure how to solve the problem. Temporarily boost to
\r
4692 // Realtime priority, maybe; but I'm not sure what priority the
\r
4693 // DirectSound service threads run at. We *should* be roughly
\r
4694 // within a ms or so of correct.
\r
4696 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4697 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4699 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4701 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4702 if ( FAILED( result ) ) {
\r
4703 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4704 errorText_ = errorStream_.str();
\r
4705 error( RtAudioError::SYSTEM_ERROR );
\r
4708 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4709 if ( FAILED( result ) ) {
\r
4710 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4711 errorText_ = errorStream_.str();
\r
4712 error( RtAudioError::SYSTEM_ERROR );
\r
4716 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4717 if ( FAILED( result ) ) {
\r
4718 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4719 errorText_ = errorStream_.str();
\r
4720 error( RtAudioError::SYSTEM_ERROR );
\r
4723 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4724 if ( FAILED( result ) ) {
\r
4725 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4726 errorText_ = errorStream_.str();
\r
4727 error( RtAudioError::SYSTEM_ERROR );
\r
4730 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4734 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4736 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4737 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4738 handle->bufferPointer[1] = safeReadPointer;
\r
4740 else if ( stream_.mode == OUTPUT ) {
\r
4742 // Set the proper nextWritePosition after initial startup.
\r
4743 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4744 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4745 if ( FAILED( result ) ) {
\r
4746 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4747 errorText_ = errorStream_.str();
\r
4748 error( RtAudioError::SYSTEM_ERROR );
\r
4751 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4752 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4755 buffersRolling = true;
\r
4758 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4760 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4762 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4763 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4764 bufferBytes *= formatBytes( stream_.userFormat );
\r
4765 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4768 // Setup parameters and do buffer conversion if necessary.
\r
4769 if ( stream_.doConvertBuffer[0] ) {
\r
4770 buffer = stream_.deviceBuffer;
\r
4771 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4772 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4773 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4776 buffer = stream_.userBuffer[0];
\r
4777 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4778 bufferBytes *= formatBytes( stream_.userFormat );
\r
4781 // No byte swapping necessary in DirectSound implementation.
\r
4783 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4784 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4786 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4787 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4789 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4790 nextWritePointer = handle->bufferPointer[0];
\r
4792 DWORD endWrite, leadPointer;
\r
4794 // Find out where the read and "safe write" pointers are.
\r
4795 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4796 if ( FAILED( result ) ) {
\r
4797 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4798 errorText_ = errorStream_.str();
\r
4799 error( RtAudioError::SYSTEM_ERROR );
\r
4803 // We will copy our output buffer into the region between
\r
4804 // safeWritePointer and leadPointer. If leadPointer is not
\r
4805 // beyond the next endWrite position, wait until it is.
\r
4806 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4807 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4808 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4809 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4810 endWrite = nextWritePointer + bufferBytes;
\r
4812 // Check whether the entire write region is behind the play pointer.
\r
4813 if ( leadPointer >= endWrite ) break;
\r
4815 // If we are here, then we must wait until the leadPointer advances
\r
4816 // beyond the end of our next write region. We use the
\r
4817 // Sleep() function to suspend operation until that happens.
\r
4818 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4819 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4820 if ( millis < 1.0 ) millis = 1.0;
\r
4821 Sleep( (DWORD) millis );
\r
4824 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4825 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4826 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4827 handle->xrun[0] = true;
\r
4828 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4829 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4830 handle->bufferPointer[0] = nextWritePointer;
\r
4831 endWrite = nextWritePointer + bufferBytes;
\r
4834 // Lock free space in the buffer
\r
4835 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4836 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4837 if ( FAILED( result ) ) {
\r
4838 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4839 errorText_ = errorStream_.str();
\r
4840 error( RtAudioError::SYSTEM_ERROR );
\r
4844 // Copy our buffer into the DS buffer
\r
4845 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4846 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4848 // Update our buffer offset and unlock sound buffer
\r
4849 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4850 if ( FAILED( result ) ) {
\r
4851 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4852 errorText_ = errorStream_.str();
\r
4853 error( RtAudioError::SYSTEM_ERROR );
\r
4856 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4857 handle->bufferPointer[0] = nextWritePointer;
\r
4859 if ( handle->drainCounter ) {
\r
4860 handle->drainCounter++;
\r
4865 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4867 // Setup parameters.
\r
4868 if ( stream_.doConvertBuffer[1] ) {
\r
4869 buffer = stream_.deviceBuffer;
\r
4870 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4871 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4874 buffer = stream_.userBuffer[1];
\r
4875 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4876 bufferBytes *= formatBytes( stream_.userFormat );
\r
4879 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4880 long nextReadPointer = handle->bufferPointer[1];
\r
4881 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4883 // Find out where the write and "safe read" pointers are.
\r
4884 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4885 if ( FAILED( result ) ) {
\r
4886 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4887 errorText_ = errorStream_.str();
\r
4888 error( RtAudioError::SYSTEM_ERROR );
\r
4892 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4893 DWORD endRead = nextReadPointer + bufferBytes;
\r
4895 // Handling depends on whether we are INPUT or DUPLEX.
\r
4896 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4897 // then a wait here will drag the write pointers into the forbidden zone.
\r
4899 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4900 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4901 // practical way to sync up the read and write pointers reliably, given the
\r
4902 // the very complex relationship between phase and increment of the read and write
\r
4905 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4906 // provide a pre-roll period of 0.5 seconds in which we return
\r
4907 // zeros from the read buffer while the pointers sync up.
\r
4909 if ( stream_.mode == DUPLEX ) {
\r
4910 if ( safeReadPointer < endRead ) {
\r
4911 if ( duplexPrerollBytes <= 0 ) {
\r
4912 // Pre-roll time over. Be more agressive.
\r
4913 int adjustment = endRead-safeReadPointer;
\r
4915 handle->xrun[1] = true;
\r
4917 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4918 // and perform fine adjustments later.
\r
4919 // - small adjustments: back off by twice as much.
\r
4920 if ( adjustment >= 2*bufferBytes )
\r
4921 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4923 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4925 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4929 // In pre=roll time. Just do it.
\r
4930 nextReadPointer = safeReadPointer - bufferBytes;
\r
4931 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4933 endRead = nextReadPointer + bufferBytes;
\r
4936 else { // mode == INPUT
\r
4937 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4938 // See comments for playback.
\r
4939 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4940 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4941 if ( millis < 1.0 ) millis = 1.0;
\r
4942 Sleep( (DWORD) millis );
\r
4944 // Wake up and find out where we are now.
\r
4945 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4946 if ( FAILED( result ) ) {
\r
4947 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4948 errorText_ = errorStream_.str();
\r
4949 error( RtAudioError::SYSTEM_ERROR );
\r
4953 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4957 // Lock free space in the buffer
\r
4958 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4959 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4960 if ( FAILED( result ) ) {
\r
4961 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4962 errorText_ = errorStream_.str();
\r
4963 error( RtAudioError::SYSTEM_ERROR );
\r
4967 if ( duplexPrerollBytes <= 0 ) {
\r
4968 // Copy our buffer into the DS buffer
\r
4969 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4970 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4973 memset( buffer, 0, bufferSize1 );
\r
4974 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4975 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4978 // Update our buffer offset and unlock sound buffer
\r
4979 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4980 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4981 if ( FAILED( result ) ) {
\r
4982 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4983 errorText_ = errorStream_.str();
\r
4984 error( RtAudioError::SYSTEM_ERROR );
\r
4987 handle->bufferPointer[1] = nextReadPointer;
\r
4989 // No byte swapping necessary in DirectSound implementation.
\r
4991 // If necessary, convert 8-bit data from unsigned to signed.
\r
4992 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4993 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4995 // Do buffer conversion if necessary.
\r
4996 if ( stream_.doConvertBuffer[1] )
\r
4997 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
5001 RtApi::tickStreamTime();
\r
5004 // Definitions for utility functions and callbacks
\r
5005 // specific to the DirectSound implementation.
\r
5007 static unsigned __stdcall callbackHandler( void *ptr )
\r
5009 CallbackInfo *info = (CallbackInfo *) ptr;
\r
5010 RtApiDs *object = (RtApiDs *) info->object;
\r
5011 bool* isRunning = &info->isRunning;
\r
5013 while ( *isRunning == true ) {
\r
5014 object->callbackEvent();
\r
5017 _endthreadex( 0 );
\r
5021 #include "tchar.h"
\r
5023 static std::string convertTChar( LPCTSTR name )
\r
5025 #if defined( UNICODE ) || defined( _UNICODE )
\r
5026 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
5027 std::string s( length-1, '\0' );
\r
5028 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
5030 std::string s( name );
\r
5036 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5037 LPCTSTR description,
\r
5038 LPCTSTR /*module*/,
\r
5039 LPVOID lpContext )
\r
5041 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
5042 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
5045 bool validDevice = false;
\r
5046 if ( probeInfo.isInput == true ) {
\r
5048 LPDIRECTSOUNDCAPTURE object;
\r
5050 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5051 if ( hr != DS_OK ) return TRUE;
\r
5053 caps.dwSize = sizeof(caps);
\r
5054 hr = object->GetCaps( &caps );
\r
5055 if ( hr == DS_OK ) {
\r
5056 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5057 validDevice = true;
\r
5059 object->Release();
\r
5063 LPDIRECTSOUND object;
\r
5064 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5065 if ( hr != DS_OK ) return TRUE;
\r
5067 caps.dwSize = sizeof(caps);
\r
5068 hr = object->GetCaps( &caps );
\r
5069 if ( hr == DS_OK ) {
\r
5070 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5071 validDevice = true;
\r
5073 object->Release();
\r
5076 // If good device, then save its name and guid.
\r
5077 std::string name = convertTChar( description );
\r
5078 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5079 if ( lpguid == NULL )
\r
5080 name = "Default Device";
\r
5081 if ( validDevice ) {
\r
5082 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5083 if ( dsDevices[i].name == name ) {
\r
5084 dsDevices[i].found = true;
\r
5085 if ( probeInfo.isInput ) {
\r
5086 dsDevices[i].id[1] = lpguid;
\r
5087 dsDevices[i].validId[1] = true;
\r
5090 dsDevices[i].id[0] = lpguid;
\r
5091 dsDevices[i].validId[0] = true;
\r
5098 device.name = name;
\r
5099 device.found = true;
\r
5100 if ( probeInfo.isInput ) {
\r
5101 device.id[1] = lpguid;
\r
5102 device.validId[1] = true;
\r
5105 device.id[0] = lpguid;
\r
5106 device.validId[0] = true;
\r
5108 dsDevices.push_back( device );
\r
5114 static const char* getErrorString( int code )
\r
5118 case DSERR_ALLOCATED:
\r
5119 return "Already allocated";
\r
5121 case DSERR_CONTROLUNAVAIL:
\r
5122 return "Control unavailable";
\r
5124 case DSERR_INVALIDPARAM:
\r
5125 return "Invalid parameter";
\r
5127 case DSERR_INVALIDCALL:
\r
5128 return "Invalid call";
\r
5130 case DSERR_GENERIC:
\r
5131 return "Generic error";
\r
5133 case DSERR_PRIOLEVELNEEDED:
\r
5134 return "Priority level needed";
\r
5136 case DSERR_OUTOFMEMORY:
\r
5137 return "Out of memory";
\r
5139 case DSERR_BADFORMAT:
\r
5140 return "The sample rate or the channel format is not supported";
\r
5142 case DSERR_UNSUPPORTED:
\r
5143 return "Not supported";
\r
5145 case DSERR_NODRIVER:
\r
5146 return "No driver";
\r
5148 case DSERR_ALREADYINITIALIZED:
\r
5149 return "Already initialized";
\r
5151 case DSERR_NOAGGREGATION:
\r
5152 return "No aggregation";
\r
5154 case DSERR_BUFFERLOST:
\r
5155 return "Buffer lost";
\r
5157 case DSERR_OTHERAPPHASPRIO:
\r
5158 return "Another application already has priority";
\r
5160 case DSERR_UNINITIALIZED:
\r
5161 return "Uninitialized";
\r
5164 return "DirectSound unknown error";
\r
5167 //******************** End of __WINDOWS_DS__ *********************//
\r
5171 #if defined(__LINUX_ALSA__)
\r
5173 #include <alsa/asoundlib.h>
\r
5174 #include <unistd.h>
\r
5176 // A structure to hold various information related to the ALSA API
\r
5177 // implementation.
\r
5178 struct AlsaHandle {
\r
5179 snd_pcm_t *handles[2];
\r
5180 bool synchronized;
\r
5182 pthread_cond_t runnable_cv;
\r
5186 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5189 static void *alsaCallbackHandler( void * ptr );
\r
5191 RtApiAlsa :: RtApiAlsa()
\r
5193 // Nothing to do here.
\r
5196 RtApiAlsa :: ~RtApiAlsa()
\r
5198 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5201 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5203 unsigned nDevices = 0;
\r
5204 int result, subdevice, card;
\r
5206 snd_ctl_t *handle;
\r
5208 // Count cards and devices
\r
5210 snd_card_next( &card );
\r
5211 while ( card >= 0 ) {
\r
5212 sprintf( name, "hw:%d", card );
\r
5213 result = snd_ctl_open( &handle, name, 0 );
\r
5214 if ( result < 0 ) {
\r
5215 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5216 errorText_ = errorStream_.str();
\r
5217 error( RtAudioError::WARNING );
\r
5222 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5223 if ( result < 0 ) {
\r
5224 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5225 errorText_ = errorStream_.str();
\r
5226 error( RtAudioError::WARNING );
\r
5229 if ( subdevice < 0 )
\r
5234 snd_ctl_close( handle );
\r
5235 snd_card_next( &card );
\r
5238 result = snd_ctl_open( &handle, "default", 0 );
\r
5239 if (result == 0) {
\r
5241 snd_ctl_close( handle );
\r
5247 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5249 RtAudio::DeviceInfo info;
\r
5250 info.probed = false;
\r
5252 unsigned nDevices = 0;
\r
5253 int result, subdevice, card;
\r
5255 snd_ctl_t *chandle;
\r
5257 // Count cards and devices
\r
5259 snd_card_next( &card );
\r
5260 while ( card >= 0 ) {
\r
5261 sprintf( name, "hw:%d", card );
\r
5262 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5263 if ( result < 0 ) {
\r
5264 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5265 errorText_ = errorStream_.str();
\r
5266 error( RtAudioError::WARNING );
\r
5271 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5272 if ( result < 0 ) {
\r
5273 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5274 errorText_ = errorStream_.str();
\r
5275 error( RtAudioError::WARNING );
\r
5278 if ( subdevice < 0 ) break;
\r
5279 if ( nDevices == device ) {
\r
5280 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5286 snd_ctl_close( chandle );
\r
5287 snd_card_next( &card );
\r
5290 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5291 if ( result == 0 ) {
\r
5292 if ( nDevices == device ) {
\r
5293 strcpy( name, "default" );
\r
5299 if ( nDevices == 0 ) {
\r
5300 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5301 error( RtAudioError::INVALID_USE );
\r
5305 if ( device >= nDevices ) {
\r
5306 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5307 error( RtAudioError::INVALID_USE );
\r
5313 // If a stream is already open, we cannot probe the stream devices.
\r
5314 // Thus, use the saved results.
\r
5315 if ( stream_.state != STREAM_CLOSED &&
\r
5316 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5317 snd_ctl_close( chandle );
\r
5318 if ( device >= devices_.size() ) {
\r
5319 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5320 error( RtAudioError::WARNING );
\r
5323 return devices_[ device ];
\r
5326 int openMode = SND_PCM_ASYNC;
\r
5327 snd_pcm_stream_t stream;
\r
5328 snd_pcm_info_t *pcminfo;
\r
5329 snd_pcm_info_alloca( &pcminfo );
\r
5330 snd_pcm_t *phandle;
\r
5331 snd_pcm_hw_params_t *params;
\r
5332 snd_pcm_hw_params_alloca( ¶ms );
\r
5334 // First try for playback unless default device (which has subdev -1)
\r
5335 stream = SND_PCM_STREAM_PLAYBACK;
\r
5336 snd_pcm_info_set_stream( pcminfo, stream );
\r
5337 if ( subdevice != -1 ) {
\r
5338 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5339 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5341 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5342 if ( result < 0 ) {
\r
5343 // Device probably doesn't support playback.
\r
5344 goto captureProbe;
\r
5348 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5349 if ( result < 0 ) {
\r
5350 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5351 errorText_ = errorStream_.str();
\r
5352 error( RtAudioError::WARNING );
\r
5353 goto captureProbe;
\r
5356 // The device is open ... fill the parameter structure.
\r
5357 result = snd_pcm_hw_params_any( phandle, params );
\r
5358 if ( result < 0 ) {
\r
5359 snd_pcm_close( phandle );
\r
5360 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5361 errorText_ = errorStream_.str();
\r
5362 error( RtAudioError::WARNING );
\r
5363 goto captureProbe;
\r
5366 // Get output channel information.
\r
5367 unsigned int value;
\r
5368 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5369 if ( result < 0 ) {
\r
5370 snd_pcm_close( phandle );
\r
5371 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5372 errorText_ = errorStream_.str();
\r
5373 error( RtAudioError::WARNING );
\r
5374 goto captureProbe;
\r
5376 info.outputChannels = value;
\r
5377 snd_pcm_close( phandle );
\r
5380 stream = SND_PCM_STREAM_CAPTURE;
\r
5381 snd_pcm_info_set_stream( pcminfo, stream );
\r
5383 // Now try for capture unless default device (with subdev = -1)
\r
5384 if ( subdevice != -1 ) {
\r
5385 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5386 snd_ctl_close( chandle );
\r
5387 if ( result < 0 ) {
\r
5388 // Device probably doesn't support capture.
\r
5389 if ( info.outputChannels == 0 ) return info;
\r
5390 goto probeParameters;
\r
5394 snd_ctl_close( chandle );
\r
5396 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5397 if ( result < 0 ) {
\r
5398 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5399 errorText_ = errorStream_.str();
\r
5400 error( RtAudioError::WARNING );
\r
5401 if ( info.outputChannels == 0 ) return info;
\r
5402 goto probeParameters;
\r
5405 // The device is open ... fill the parameter structure.
\r
5406 result = snd_pcm_hw_params_any( phandle, params );
\r
5407 if ( result < 0 ) {
\r
5408 snd_pcm_close( phandle );
\r
5409 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5410 errorText_ = errorStream_.str();
\r
5411 error( RtAudioError::WARNING );
\r
5412 if ( info.outputChannels == 0 ) return info;
\r
5413 goto probeParameters;
\r
5416 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5417 if ( result < 0 ) {
\r
5418 snd_pcm_close( phandle );
\r
5419 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5420 errorText_ = errorStream_.str();
\r
5421 error( RtAudioError::WARNING );
\r
5422 if ( info.outputChannels == 0 ) return info;
\r
5423 goto probeParameters;
\r
5425 info.inputChannels = value;
\r
5426 snd_pcm_close( phandle );
\r
5428 // If device opens for both playback and capture, we determine the channels.
\r
5429 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5430 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5432 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5433 if ( device == 0 && info.outputChannels > 0 )
\r
5434 info.isDefaultOutput = true;
\r
5435 if ( device == 0 && info.inputChannels > 0 )
\r
5436 info.isDefaultInput = true;
\r
5439 // At this point, we just need to figure out the supported data
\r
5440 // formats and sample rates. We'll proceed by opening the device in
\r
5441 // the direction with the maximum number of channels, or playback if
\r
5442 // they are equal. This might limit our sample rate options, but so
\r
5445 if ( info.outputChannels >= info.inputChannels )
\r
5446 stream = SND_PCM_STREAM_PLAYBACK;
\r
5448 stream = SND_PCM_STREAM_CAPTURE;
\r
5449 snd_pcm_info_set_stream( pcminfo, stream );
\r
5451 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5452 if ( result < 0 ) {
\r
5453 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5454 errorText_ = errorStream_.str();
\r
5455 error( RtAudioError::WARNING );
\r
5459 // The device is open ... fill the parameter structure.
\r
5460 result = snd_pcm_hw_params_any( phandle, params );
\r
5461 if ( result < 0 ) {
\r
5462 snd_pcm_close( phandle );
\r
5463 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5464 errorText_ = errorStream_.str();
\r
5465 error( RtAudioError::WARNING );
\r
5469 // Test our discrete set of sample rate values.
\r
5470 info.sampleRates.clear();
\r
5471 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5472 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5473 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5475 if ( info.sampleRates.size() == 0 ) {
\r
5476 snd_pcm_close( phandle );
\r
5477 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5478 errorText_ = errorStream_.str();
\r
5479 error( RtAudioError::WARNING );
\r
5483 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5484 snd_pcm_format_t format;
\r
5485 info.nativeFormats = 0;
\r
5486 format = SND_PCM_FORMAT_S8;
\r
5487 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5488 info.nativeFormats |= RTAUDIO_SINT8;
\r
5489 format = SND_PCM_FORMAT_S16;
\r
5490 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5491 info.nativeFormats |= RTAUDIO_SINT16;
\r
5492 format = SND_PCM_FORMAT_S24;
\r
5493 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5494 info.nativeFormats |= RTAUDIO_SINT24;
\r
5495 format = SND_PCM_FORMAT_S32;
\r
5496 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5497 info.nativeFormats |= RTAUDIO_SINT32;
\r
5498 format = SND_PCM_FORMAT_FLOAT;
\r
5499 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5500 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5501 format = SND_PCM_FORMAT_FLOAT64;
\r
5502 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5503 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5505 // Check that we have at least one supported format
\r
5506 if ( info.nativeFormats == 0 ) {
\r
5507 snd_pcm_close( phandle );
\r
5508 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5509 errorText_ = errorStream_.str();
\r
5510 error( RtAudioError::WARNING );
\r
5514 // Get the device name
\r
5516 result = snd_card_get_name( card, &cardname );
\r
5517 if ( result >= 0 ) {
\r
5518 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5523 // That's all ... close the device and return
\r
5524 snd_pcm_close( phandle );
\r
5525 info.probed = true;
\r
5529 void RtApiAlsa :: saveDeviceInfo( void )
\r
5533 unsigned int nDevices = getDeviceCount();
\r
5534 devices_.resize( nDevices );
\r
5535 for ( unsigned int i=0; i<nDevices; i++ )
\r
5536 devices_[i] = getDeviceInfo( i );
\r
5539 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5540 unsigned int firstChannel, unsigned int sampleRate,
\r
5541 RtAudioFormat format, unsigned int *bufferSize,
\r
5542 RtAudio::StreamOptions *options )
\r
5545 #if defined(__RTAUDIO_DEBUG__)
\r
5546 snd_output_t *out;
\r
5547 snd_output_stdio_attach(&out, stderr, 0);
\r
5550 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5552 unsigned nDevices = 0;
\r
5553 int result, subdevice, card;
\r
5555 snd_ctl_t *chandle;
\r
5557 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5558 snprintf(name, sizeof(name), "%s", "default");
\r
5560 // Count cards and devices
\r
5562 snd_card_next( &card );
\r
5563 while ( card >= 0 ) {
\r
5564 sprintf( name, "hw:%d", card );
\r
5565 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5566 if ( result < 0 ) {
\r
5567 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5568 errorText_ = errorStream_.str();
\r
5573 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5574 if ( result < 0 ) break;
\r
5575 if ( subdevice < 0 ) break;
\r
5576 if ( nDevices == device ) {
\r
5577 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5578 snd_ctl_close( chandle );
\r
5583 snd_ctl_close( chandle );
\r
5584 snd_card_next( &card );
\r
5587 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5588 if ( result == 0 ) {
\r
5589 if ( nDevices == device ) {
\r
5590 strcpy( name, "default" );
\r
5596 if ( nDevices == 0 ) {
\r
5597 // This should not happen because a check is made before this function is called.
\r
5598 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5602 if ( device >= nDevices ) {
\r
5603 // This should not happen because a check is made before this function is called.
\r
5604 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5611 // The getDeviceInfo() function will not work for a device that is
\r
5612 // already open. Thus, we'll probe the system before opening a
\r
5613 // stream and save the results for use by getDeviceInfo().
\r
5614 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5615 this->saveDeviceInfo();
\r
5617 snd_pcm_stream_t stream;
\r
5618 if ( mode == OUTPUT )
\r
5619 stream = SND_PCM_STREAM_PLAYBACK;
\r
5621 stream = SND_PCM_STREAM_CAPTURE;
\r
5623 snd_pcm_t *phandle;
\r
5624 int openMode = SND_PCM_ASYNC;
\r
5625 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5626 if ( result < 0 ) {
\r
5627 if ( mode == OUTPUT )
\r
5628 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5630 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5631 errorText_ = errorStream_.str();
\r
5635 // Fill the parameter structure.
\r
5636 snd_pcm_hw_params_t *hw_params;
\r
5637 snd_pcm_hw_params_alloca( &hw_params );
\r
5638 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5639 if ( result < 0 ) {
\r
5640 snd_pcm_close( phandle );
\r
5641 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5642 errorText_ = errorStream_.str();
\r
5646 #if defined(__RTAUDIO_DEBUG__)
\r
5647 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5648 snd_pcm_hw_params_dump( hw_params, out );
\r
5651 // Set access ... check user preference.
\r
5652 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5653 stream_.userInterleaved = false;
\r
5654 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5655 if ( result < 0 ) {
\r
5656 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5657 stream_.deviceInterleaved[mode] = true;
\r
5660 stream_.deviceInterleaved[mode] = false;
\r
5663 stream_.userInterleaved = true;
\r
5664 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5665 if ( result < 0 ) {
\r
5666 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5667 stream_.deviceInterleaved[mode] = false;
\r
5670 stream_.deviceInterleaved[mode] = true;
\r
5673 if ( result < 0 ) {
\r
5674 snd_pcm_close( phandle );
\r
5675 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5676 errorText_ = errorStream_.str();
\r
5680 // Determine how to set the device format.
\r
5681 stream_.userFormat = format;
\r
5682 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5684 if ( format == RTAUDIO_SINT8 )
\r
5685 deviceFormat = SND_PCM_FORMAT_S8;
\r
5686 else if ( format == RTAUDIO_SINT16 )
\r
5687 deviceFormat = SND_PCM_FORMAT_S16;
\r
5688 else if ( format == RTAUDIO_SINT24 )
\r
5689 deviceFormat = SND_PCM_FORMAT_S24;
\r
5690 else if ( format == RTAUDIO_SINT32 )
\r
5691 deviceFormat = SND_PCM_FORMAT_S32;
\r
5692 else if ( format == RTAUDIO_FLOAT32 )
\r
5693 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5694 else if ( format == RTAUDIO_FLOAT64 )
\r
5695 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5697 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5698 stream_.deviceFormat[mode] = format;
\r
5702 // The user requested format is not natively supported by the device.
\r
5703 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5704 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5705 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5709 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5710 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5711 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5715 deviceFormat = SND_PCM_FORMAT_S32;
\r
5716 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5717 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5721 deviceFormat = SND_PCM_FORMAT_S24;
\r
5722 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5723 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5727 deviceFormat = SND_PCM_FORMAT_S16;
\r
5728 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5729 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5733 deviceFormat = SND_PCM_FORMAT_S8;
\r
5734 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5735 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5739 // If we get here, no supported format was found.
\r
5740 snd_pcm_close( phandle );
\r
5741 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5742 errorText_ = errorStream_.str();
\r
5746 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5747 if ( result < 0 ) {
\r
5748 snd_pcm_close( phandle );
\r
5749 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5750 errorText_ = errorStream_.str();
\r
5754 // Determine whether byte-swaping is necessary.
\r
5755 stream_.doByteSwap[mode] = false;
\r
5756 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5757 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5758 if ( result == 0 )
\r
5759 stream_.doByteSwap[mode] = true;
\r
5760 else if (result < 0) {
\r
5761 snd_pcm_close( phandle );
\r
5762 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5763 errorText_ = errorStream_.str();
\r
5768 // Set the sample rate.
\r
5769 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5770 if ( result < 0 ) {
\r
5771 snd_pcm_close( phandle );
\r
5772 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5773 errorText_ = errorStream_.str();
\r
5777 // Determine the number of channels for this device. We support a possible
\r
5778 // minimum device channel number > than the value requested by the user.
\r
5779 stream_.nUserChannels[mode] = channels;
\r
5780 unsigned int value;
\r
5781 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5782 unsigned int deviceChannels = value;
\r
5783 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5784 snd_pcm_close( phandle );
\r
5785 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5786 errorText_ = errorStream_.str();
\r
5790 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5791 if ( result < 0 ) {
\r
5792 snd_pcm_close( phandle );
\r
5793 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5794 errorText_ = errorStream_.str();
\r
5797 deviceChannels = value;
\r
5798 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5799 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5801 // Set the device channels.
\r
5802 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5803 if ( result < 0 ) {
\r
5804 snd_pcm_close( phandle );
\r
5805 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5806 errorText_ = errorStream_.str();
\r
5810 // Set the buffer (or period) size.
\r
5812 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5813 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5814 if ( result < 0 ) {
\r
5815 snd_pcm_close( phandle );
\r
5816 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5817 errorText_ = errorStream_.str();
\r
5820 *bufferSize = periodSize;
\r
5822 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5823 unsigned int periods = 0;
\r
5824 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5825 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5826 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5827 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5828 if ( result < 0 ) {
\r
5829 snd_pcm_close( phandle );
\r
5830 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5831 errorText_ = errorStream_.str();
\r
5835 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5836 // MUST be the same in both directions!
\r
5837 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5838 snd_pcm_close( phandle );
\r
5839 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5840 errorText_ = errorStream_.str();
\r
5844 stream_.bufferSize = *bufferSize;
\r
5846 // Install the hardware configuration
\r
5847 result = snd_pcm_hw_params( phandle, hw_params );
\r
5848 if ( result < 0 ) {
\r
5849 snd_pcm_close( phandle );
\r
5850 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5851 errorText_ = errorStream_.str();
\r
5855 #if defined(__RTAUDIO_DEBUG__)
\r
5856 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5857 snd_pcm_hw_params_dump( hw_params, out );
\r
5860 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5861 snd_pcm_sw_params_t *sw_params = NULL;
\r
5862 snd_pcm_sw_params_alloca( &sw_params );
\r
5863 snd_pcm_sw_params_current( phandle, sw_params );
\r
5864 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5865 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5866 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5868 // The following two settings were suggested by Theo Veenker
\r
5869 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5870 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5872 // here are two options for a fix
\r
5873 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5874 snd_pcm_uframes_t val;
\r
5875 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5876 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5878 result = snd_pcm_sw_params( phandle, sw_params );
\r
5879 if ( result < 0 ) {
\r
5880 snd_pcm_close( phandle );
\r
5881 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5882 errorText_ = errorStream_.str();
\r
5886 #if defined(__RTAUDIO_DEBUG__)
\r
5887 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5888 snd_pcm_sw_params_dump( sw_params, out );
\r
5891 // Set flags for buffer conversion
\r
5892 stream_.doConvertBuffer[mode] = false;
\r
5893 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5894 stream_.doConvertBuffer[mode] = true;
\r
5895 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5896 stream_.doConvertBuffer[mode] = true;
\r
5897 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5898 stream_.nUserChannels[mode] > 1 )
\r
5899 stream_.doConvertBuffer[mode] = true;
\r
5901 // Allocate the ApiHandle if necessary and then save.
\r
5902 AlsaHandle *apiInfo = 0;
\r
5903 if ( stream_.apiHandle == 0 ) {
\r
5905 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5907 catch ( std::bad_alloc& ) {
\r
5908 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5912 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5913 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5917 stream_.apiHandle = (void *) apiInfo;
\r
5918 apiInfo->handles[0] = 0;
\r
5919 apiInfo->handles[1] = 0;
\r
5922 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5924 apiInfo->handles[mode] = phandle;
\r
5927 // Allocate necessary internal buffers.
\r
5928 unsigned long bufferBytes;
\r
5929 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5930 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5931 if ( stream_.userBuffer[mode] == NULL ) {
\r
5932 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5936 if ( stream_.doConvertBuffer[mode] ) {
\r
5938 bool makeBuffer = true;
\r
5939 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5940 if ( mode == INPUT ) {
\r
5941 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5942 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5943 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5947 if ( makeBuffer ) {
\r
5948 bufferBytes *= *bufferSize;
\r
5949 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5950 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5951 if ( stream_.deviceBuffer == NULL ) {
\r
5952 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5958 stream_.sampleRate = sampleRate;
\r
5959 stream_.nBuffers = periods;
\r
5960 stream_.device[mode] = device;
\r
5961 stream_.state = STREAM_STOPPED;
\r
5963 // Setup the buffer conversion information structure.
\r
5964 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5966 // Setup thread if necessary.
\r
5967 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5968 // We had already set up an output stream.
\r
5969 stream_.mode = DUPLEX;
\r
5970 // Link the streams if possible.
\r
5971 apiInfo->synchronized = false;
\r
5972 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5973 apiInfo->synchronized = true;
\r
5975 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5976 error( RtAudioError::WARNING );
\r
5980 stream_.mode = mode;
\r
5982 // Setup callback thread.
\r
5983 stream_.callbackInfo.object = (void *) this;
\r
5985 // Set the thread attributes for joinable and realtime scheduling
\r
5986 // priority (optional). The higher priority will only take affect
\r
5987 // if the program is run as root or suid. Note, under Linux
\r
5988 // processes with CAP_SYS_NICE privilege, a user can change
\r
5989 // scheduling policy and priority (thus need not be root). See
\r
5990 // POSIX "capabilities".
\r
5991 pthread_attr_t attr;
\r
5992 pthread_attr_init( &attr );
\r
5993 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5995 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5996 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5997 // We previously attempted to increase the audio callback priority
\r
5998 // to SCHED_RR here via the attributes. However, while no errors
\r
5999 // were reported in doing so, it did not work. So, now this is
\r
6000 // done in the alsaCallbackHandler function.
\r
6001 stream_.callbackInfo.doRealtime = true;
\r
6002 int priority = options->priority;
\r
6003 int min = sched_get_priority_min( SCHED_RR );
\r
6004 int max = sched_get_priority_max( SCHED_RR );
\r
6005 if ( priority < min ) priority = min;
\r
6006 else if ( priority > max ) priority = max;
\r
6007 stream_.callbackInfo.priority = priority;
\r
6011 stream_.callbackInfo.isRunning = true;
\r
6012 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
6013 pthread_attr_destroy( &attr );
\r
6015 stream_.callbackInfo.isRunning = false;
\r
6016 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
6025 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6026 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6027 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6029 stream_.apiHandle = 0;
\r
6032 if ( phandle) snd_pcm_close( phandle );
\r
6034 for ( int i=0; i<2; i++ ) {
\r
6035 if ( stream_.userBuffer[i] ) {
\r
6036 free( stream_.userBuffer[i] );
\r
6037 stream_.userBuffer[i] = 0;
\r
6041 if ( stream_.deviceBuffer ) {
\r
6042 free( stream_.deviceBuffer );
\r
6043 stream_.deviceBuffer = 0;
\r
6046 stream_.state = STREAM_CLOSED;
\r
6050 void RtApiAlsa :: closeStream()
\r
6052 if ( stream_.state == STREAM_CLOSED ) {
\r
6053 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6054 error( RtAudioError::WARNING );
\r
6058 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6059 stream_.callbackInfo.isRunning = false;
\r
6060 MUTEX_LOCK( &stream_.mutex );
\r
6061 if ( stream_.state == STREAM_STOPPED ) {
\r
6062 apiInfo->runnable = true;
\r
6063 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6065 MUTEX_UNLOCK( &stream_.mutex );
\r
6066 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6068 if ( stream_.state == STREAM_RUNNING ) {
\r
6069 stream_.state = STREAM_STOPPED;
\r
6070 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6071 snd_pcm_drop( apiInfo->handles[0] );
\r
6072 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6073 snd_pcm_drop( apiInfo->handles[1] );
\r
6077 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6078 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6079 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6081 stream_.apiHandle = 0;
\r
6084 for ( int i=0; i<2; i++ ) {
\r
6085 if ( stream_.userBuffer[i] ) {
\r
6086 free( stream_.userBuffer[i] );
\r
6087 stream_.userBuffer[i] = 0;
\r
6091 if ( stream_.deviceBuffer ) {
\r
6092 free( stream_.deviceBuffer );
\r
6093 stream_.deviceBuffer = 0;
\r
6096 stream_.mode = UNINITIALIZED;
\r
6097 stream_.state = STREAM_CLOSED;
\r
6100 void RtApiAlsa :: startStream()
\r
6102 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6105 if ( stream_.state == STREAM_RUNNING ) {
\r
6106 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6107 error( RtAudioError::WARNING );
\r
6111 MUTEX_LOCK( &stream_.mutex );
\r
6114 snd_pcm_state_t state;
\r
6115 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6116 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6117 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6118 state = snd_pcm_state( handle[0] );
\r
6119 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6120 result = snd_pcm_prepare( handle[0] );
\r
6121 if ( result < 0 ) {
\r
6122 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6123 errorText_ = errorStream_.str();
\r
6129 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6130 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
6131 state = snd_pcm_state( handle[1] );
\r
6132 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6133 result = snd_pcm_prepare( handle[1] );
\r
6134 if ( result < 0 ) {
\r
6135 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6136 errorText_ = errorStream_.str();
\r
6142 stream_.state = STREAM_RUNNING;
\r
6145 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6146 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6147 MUTEX_UNLOCK( &stream_.mutex );
\r
6149 if ( result >= 0 ) return;
\r
6150 error( RtAudioError::SYSTEM_ERROR );
\r
6153 void RtApiAlsa :: stopStream()
\r
6156 if ( stream_.state == STREAM_STOPPED ) {
\r
6157 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6158 error( RtAudioError::WARNING );
\r
6162 stream_.state = STREAM_STOPPED;
\r
6163 MUTEX_LOCK( &stream_.mutex );
\r
6166 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6167 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6168 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6169 if ( apiInfo->synchronized )
\r
6170 result = snd_pcm_drop( handle[0] );
\r
6172 result = snd_pcm_drain( handle[0] );
\r
6173 if ( result < 0 ) {
\r
6174 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6175 errorText_ = errorStream_.str();
\r
6180 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6181 result = snd_pcm_drop( handle[1] );
\r
6182 if ( result < 0 ) {
\r
6183 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6184 errorText_ = errorStream_.str();
\r
6190 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6191 MUTEX_UNLOCK( &stream_.mutex );
\r
6193 if ( result >= 0 ) return;
\r
6194 error( RtAudioError::SYSTEM_ERROR );
\r
6197 void RtApiAlsa :: abortStream()
\r
6200 if ( stream_.state == STREAM_STOPPED ) {
\r
6201 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6202 error( RtAudioError::WARNING );
\r
6206 stream_.state = STREAM_STOPPED;
\r
6207 MUTEX_LOCK( &stream_.mutex );
\r
6210 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6211 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6212 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6213 result = snd_pcm_drop( handle[0] );
\r
6214 if ( result < 0 ) {
\r
6215 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6216 errorText_ = errorStream_.str();
\r
6221 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6222 result = snd_pcm_drop( handle[1] );
\r
6223 if ( result < 0 ) {
\r
6224 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6225 errorText_ = errorStream_.str();
\r
6231 MUTEX_UNLOCK( &stream_.mutex );
\r
6233 if ( result >= 0 ) return;
\r
6234 error( RtAudioError::SYSTEM_ERROR );
\r
6237 void RtApiAlsa :: callbackEvent()
\r
6239 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6240 if ( stream_.state == STREAM_STOPPED ) {
\r
6241 MUTEX_LOCK( &stream_.mutex );
\r
6242 while ( !apiInfo->runnable )
\r
6243 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6245 if ( stream_.state != STREAM_RUNNING ) {
\r
6246 MUTEX_UNLOCK( &stream_.mutex );
\r
6249 MUTEX_UNLOCK( &stream_.mutex );
\r
6252 if ( stream_.state == STREAM_CLOSED ) {
\r
6253 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6254 error( RtAudioError::WARNING );
\r
6258 int doStopStream = 0;
\r
6259 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6260 double streamTime = getStreamTime();
\r
6261 RtAudioStreamStatus status = 0;
\r
6262 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6263 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6264 apiInfo->xrun[0] = false;
\r
6266 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6267 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6268 apiInfo->xrun[1] = false;
\r
6270 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6271 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6273 if ( doStopStream == 2 ) {
\r
6278 MUTEX_LOCK( &stream_.mutex );
\r
6280 // The state might change while waiting on a mutex.
\r
6281 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6286 snd_pcm_t **handle;
\r
6287 snd_pcm_sframes_t frames;
\r
6288 RtAudioFormat format;
\r
6289 handle = (snd_pcm_t **) apiInfo->handles;
\r
6291 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6293 // Setup parameters.
\r
6294 if ( stream_.doConvertBuffer[1] ) {
\r
6295 buffer = stream_.deviceBuffer;
\r
6296 channels = stream_.nDeviceChannels[1];
\r
6297 format = stream_.deviceFormat[1];
\r
6300 buffer = stream_.userBuffer[1];
\r
6301 channels = stream_.nUserChannels[1];
\r
6302 format = stream_.userFormat;
\r
6305 // Read samples from device in interleaved/non-interleaved format.
\r
6306 if ( stream_.deviceInterleaved[1] )
\r
6307 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6309 void *bufs[channels];
\r
6310 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6311 for ( int i=0; i<channels; i++ )
\r
6312 bufs[i] = (void *) (buffer + (i * offset));
\r
6313 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6316 if ( result < (int) stream_.bufferSize ) {
\r
6317 // Either an error or overrun occured.
\r
6318 if ( result == -EPIPE ) {
\r
6319 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6320 if ( state == SND_PCM_STATE_XRUN ) {
\r
6321 apiInfo->xrun[1] = true;
\r
6322 result = snd_pcm_prepare( handle[1] );
\r
6323 if ( result < 0 ) {
\r
6324 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6325 errorText_ = errorStream_.str();
\r
6329 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6330 errorText_ = errorStream_.str();
\r
6334 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6335 errorText_ = errorStream_.str();
\r
6337 error( RtAudioError::WARNING );
\r
6341 // Do byte swapping if necessary.
\r
6342 if ( stream_.doByteSwap[1] )
\r
6343 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6345 // Do buffer conversion if necessary.
\r
6346 if ( stream_.doConvertBuffer[1] )
\r
6347 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6349 // Check stream latency
\r
6350 result = snd_pcm_delay( handle[1], &frames );
\r
6351 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6358 // Setup parameters and do buffer conversion if necessary.
\r
6359 if ( stream_.doConvertBuffer[0] ) {
\r
6360 buffer = stream_.deviceBuffer;
\r
6361 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6362 channels = stream_.nDeviceChannels[0];
\r
6363 format = stream_.deviceFormat[0];
\r
6366 buffer = stream_.userBuffer[0];
\r
6367 channels = stream_.nUserChannels[0];
\r
6368 format = stream_.userFormat;
\r
6371 // Do byte swapping if necessary.
\r
6372 if ( stream_.doByteSwap[0] )
\r
6373 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6375 // Write samples to device in interleaved/non-interleaved format.
\r
6376 if ( stream_.deviceInterleaved[0] )
\r
6377 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6379 void *bufs[channels];
\r
6380 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6381 for ( int i=0; i<channels; i++ )
\r
6382 bufs[i] = (void *) (buffer + (i * offset));
\r
6383 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6386 if ( result < (int) stream_.bufferSize ) {
\r
6387 // Either an error or underrun occured.
\r
6388 if ( result == -EPIPE ) {
\r
6389 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6390 if ( state == SND_PCM_STATE_XRUN ) {
\r
6391 apiInfo->xrun[0] = true;
\r
6392 result = snd_pcm_prepare( handle[0] );
\r
6393 if ( result < 0 ) {
\r
6394 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6395 errorText_ = errorStream_.str();
\r
6399 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6400 errorText_ = errorStream_.str();
\r
6404 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6405 errorText_ = errorStream_.str();
\r
6407 error( RtAudioError::WARNING );
\r
6411 // Check stream latency
\r
6412 result = snd_pcm_delay( handle[0], &frames );
\r
6413 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6417 MUTEX_UNLOCK( &stream_.mutex );
\r
6419 RtApi::tickStreamTime();
\r
6420 if ( doStopStream == 1 ) this->stopStream();
\r
6423 static void *alsaCallbackHandler( void *ptr )
\r
6425 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6426 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6427 bool *isRunning = &info->isRunning;
\r
6429 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6430 if ( &info->doRealtime ) {
\r
6431 pthread_t tID = pthread_self(); // ID of this thread
\r
6432 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6433 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6437 while ( *isRunning == true ) {
\r
6438 pthread_testcancel();
\r
6439 object->callbackEvent();
\r
6442 pthread_exit( NULL );
\r
6445 //******************** End of __LINUX_ALSA__ *********************//
\r
6448 #if defined(__LINUX_PULSE__)
\r
6450 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6451 // and Tristan Matthews.
\r
6453 #include <pulse/error.h>
\r
6454 #include <pulse/simple.h>
\r
6457 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6458 44100, 48000, 96000, 0};
\r
6460 struct rtaudio_pa_format_mapping_t {
\r
6461 RtAudioFormat rtaudio_format;
\r
6462 pa_sample_format_t pa_format;
\r
6465 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6466 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6467 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6468 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6469 {0, PA_SAMPLE_INVALID}};
\r
6471 struct PulseAudioHandle {
\r
6472 pa_simple *s_play;
\r
6475 pthread_cond_t runnable_cv;
\r
6477 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6480 RtApiPulse::~RtApiPulse()
\r
6482 if ( stream_.state != STREAM_CLOSED )
\r
6486 unsigned int RtApiPulse::getDeviceCount( void )
\r
6491 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6493 RtAudio::DeviceInfo info;
\r
6494 info.probed = true;
\r
6495 info.name = "PulseAudio";
\r
6496 info.outputChannels = 2;
\r
6497 info.inputChannels = 2;
\r
6498 info.duplexChannels = 2;
\r
6499 info.isDefaultOutput = true;
\r
6500 info.isDefaultInput = true;
\r
6502 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6503 info.sampleRates.push_back( *sr );
\r
6505 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6510 static void *pulseaudio_callback( void * user )
\r
6512 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6513 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6514 volatile bool *isRunning = &cbi->isRunning;
\r
6516 while ( *isRunning ) {
\r
6517 pthread_testcancel();
\r
6518 context->callbackEvent();
\r
6521 pthread_exit( NULL );
\r
6524 void RtApiPulse::closeStream( void )
\r
6526 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6528 stream_.callbackInfo.isRunning = false;
\r
6530 MUTEX_LOCK( &stream_.mutex );
\r
6531 if ( stream_.state == STREAM_STOPPED ) {
\r
6532 pah->runnable = true;
\r
6533 pthread_cond_signal( &pah->runnable_cv );
\r
6535 MUTEX_UNLOCK( &stream_.mutex );
\r
6537 pthread_join( pah->thread, 0 );
\r
6538 if ( pah->s_play ) {
\r
6539 pa_simple_flush( pah->s_play, NULL );
\r
6540 pa_simple_free( pah->s_play );
\r
6543 pa_simple_free( pah->s_rec );
\r
6545 pthread_cond_destroy( &pah->runnable_cv );
\r
6547 stream_.apiHandle = 0;
\r
6550 if ( stream_.userBuffer[0] ) {
\r
6551 free( stream_.userBuffer[0] );
\r
6552 stream_.userBuffer[0] = 0;
\r
6554 if ( stream_.userBuffer[1] ) {
\r
6555 free( stream_.userBuffer[1] );
\r
6556 stream_.userBuffer[1] = 0;
\r
6559 stream_.state = STREAM_CLOSED;
\r
6560 stream_.mode = UNINITIALIZED;
\r
6563 void RtApiPulse::callbackEvent( void )
\r
6565 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6567 if ( stream_.state == STREAM_STOPPED ) {
\r
6568 MUTEX_LOCK( &stream_.mutex );
\r
6569 while ( !pah->runnable )
\r
6570 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6572 if ( stream_.state != STREAM_RUNNING ) {
\r
6573 MUTEX_UNLOCK( &stream_.mutex );
\r
6576 MUTEX_UNLOCK( &stream_.mutex );
\r
6579 if ( stream_.state == STREAM_CLOSED ) {
\r
6580 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6581 "this shouldn't happen!";
\r
6582 error( RtAudioError::WARNING );
\r
6586 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6587 double streamTime = getStreamTime();
\r
6588 RtAudioStreamStatus status = 0;
\r
6589 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6590 stream_.bufferSize, streamTime, status,
\r
6591 stream_.callbackInfo.userData );
\r
6593 if ( doStopStream == 2 ) {
\r
6598 MUTEX_LOCK( &stream_.mutex );
\r
6599 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6600 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6602 if ( stream_.state != STREAM_RUNNING )
\r
6607 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6608 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6609 convertBuffer( stream_.deviceBuffer,
\r
6610 stream_.userBuffer[OUTPUT],
\r
6611 stream_.convertInfo[OUTPUT] );
\r
6612 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6613 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6615 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6616 formatBytes( stream_.userFormat );
\r
6618 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6619 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6620 pa_strerror( pa_error ) << ".";
\r
6621 errorText_ = errorStream_.str();
\r
6622 error( RtAudioError::WARNING );
\r
6626 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6627 if ( stream_.doConvertBuffer[INPUT] )
\r
6628 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6629 formatBytes( stream_.deviceFormat[INPUT] );
\r
6631 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6632 formatBytes( stream_.userFormat );
\r
6634 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6635 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6636 pa_strerror( pa_error ) << ".";
\r
6637 errorText_ = errorStream_.str();
\r
6638 error( RtAudioError::WARNING );
\r
6640 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6641 convertBuffer( stream_.userBuffer[INPUT],
\r
6642 stream_.deviceBuffer,
\r
6643 stream_.convertInfo[INPUT] );
\r
6648 MUTEX_UNLOCK( &stream_.mutex );
\r
6649 RtApi::tickStreamTime();
\r
6651 if ( doStopStream == 1 )
\r
6655 void RtApiPulse::startStream( void )
\r
6657 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6659 if ( stream_.state == STREAM_CLOSED ) {
\r
6660 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6661 error( RtAudioError::INVALID_USE );
\r
6664 if ( stream_.state == STREAM_RUNNING ) {
\r
6665 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6666 error( RtAudioError::WARNING );
\r
6670 MUTEX_LOCK( &stream_.mutex );
\r
6672 stream_.state = STREAM_RUNNING;
\r
6674 pah->runnable = true;
\r
6675 pthread_cond_signal( &pah->runnable_cv );
\r
6676 MUTEX_UNLOCK( &stream_.mutex );
\r
6679 void RtApiPulse::stopStream( void )
\r
6681 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6683 if ( stream_.state == STREAM_CLOSED ) {
\r
6684 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6685 error( RtAudioError::INVALID_USE );
\r
6688 if ( stream_.state == STREAM_STOPPED ) {
\r
6689 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6690 error( RtAudioError::WARNING );
\r
6694 stream_.state = STREAM_STOPPED;
\r
6695 MUTEX_LOCK( &stream_.mutex );
\r
6697 if ( pah && pah->s_play ) {
\r
6699 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6700 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6701 pa_strerror( pa_error ) << ".";
\r
6702 errorText_ = errorStream_.str();
\r
6703 MUTEX_UNLOCK( &stream_.mutex );
\r
6704 error( RtAudioError::SYSTEM_ERROR );
\r
6709 stream_.state = STREAM_STOPPED;
\r
6710 MUTEX_UNLOCK( &stream_.mutex );
\r
6713 void RtApiPulse::abortStream( void )
\r
6715 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6717 if ( stream_.state == STREAM_CLOSED ) {
\r
6718 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6719 error( RtAudioError::INVALID_USE );
\r
6722 if ( stream_.state == STREAM_STOPPED ) {
\r
6723 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6724 error( RtAudioError::WARNING );
\r
6728 stream_.state = STREAM_STOPPED;
\r
6729 MUTEX_LOCK( &stream_.mutex );
\r
6731 if ( pah && pah->s_play ) {
\r
6733 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6734 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6735 pa_strerror( pa_error ) << ".";
\r
6736 errorText_ = errorStream_.str();
\r
6737 MUTEX_UNLOCK( &stream_.mutex );
\r
6738 error( RtAudioError::SYSTEM_ERROR );
\r
6743 stream_.state = STREAM_STOPPED;
\r
6744 MUTEX_UNLOCK( &stream_.mutex );
\r
6747 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6748 unsigned int channels, unsigned int firstChannel,
\r
6749 unsigned int sampleRate, RtAudioFormat format,
\r
6750 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6752 PulseAudioHandle *pah = 0;
\r
6753 unsigned long bufferBytes = 0;
\r
6754 pa_sample_spec ss;
\r
6756 if ( device != 0 ) return false;
\r
6757 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6758 if ( channels != 1 && channels != 2 ) {
\r
6759 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6762 ss.channels = channels;
\r
6764 if ( firstChannel != 0 ) return false;
\r
6766 bool sr_found = false;
\r
6767 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6768 if ( sampleRate == *sr ) {
\r
6770 stream_.sampleRate = sampleRate;
\r
6771 ss.rate = sampleRate;
\r
6775 if ( !sr_found ) {
\r
6776 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6780 bool sf_found = 0;
\r
6781 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6782 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6783 if ( format == sf->rtaudio_format ) {
\r
6785 stream_.userFormat = sf->rtaudio_format;
\r
6786 ss.format = sf->pa_format;
\r
6790 if ( !sf_found ) {
\r
6791 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6795 // Set interleaving parameters.
\r
6796 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6797 else stream_.userInterleaved = true;
\r
6798 stream_.deviceInterleaved[mode] = true;
\r
6799 stream_.nBuffers = 1;
\r
6800 stream_.doByteSwap[mode] = false;
\r
6801 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6802 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6803 stream_.nUserChannels[mode] = channels;
\r
6804 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6805 stream_.channelOffset[mode] = 0;
\r
6807 // Allocate necessary internal buffers.
\r
6808 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6809 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6810 if ( stream_.userBuffer[mode] == NULL ) {
\r
6811 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6814 stream_.bufferSize = *bufferSize;
\r
6816 if ( stream_.doConvertBuffer[mode] ) {
\r
6818 bool makeBuffer = true;
\r
6819 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6820 if ( mode == INPUT ) {
\r
6821 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6822 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6823 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6827 if ( makeBuffer ) {
\r
6828 bufferBytes *= *bufferSize;
\r
6829 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6830 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6831 if ( stream_.deviceBuffer == NULL ) {
\r
6832 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6838 stream_.device[mode] = device;
\r
6840 // Setup the buffer conversion information structure.
\r
6841 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6843 if ( !stream_.apiHandle ) {
\r
6844 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6846 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6850 stream_.apiHandle = pah;
\r
6851 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6852 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6856 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6859 std::string streamName = "RtAudio";
\r
6860 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
6863 pa_buffer_attr buffer_attr;
\r
6864 buffer_attr.fragsize = bufferBytes;
\r
6865 buffer_attr.maxlength = -1;
\r
6867 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
6868 if ( !pah->s_rec ) {
\r
6869 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6874 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6875 if ( !pah->s_play ) {
\r
6876 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6884 if ( stream_.mode == UNINITIALIZED )
\r
6885 stream_.mode = mode;
\r
6886 else if ( stream_.mode == mode )
\r
6889 stream_.mode = DUPLEX;
\r
6891 if ( !stream_.callbackInfo.isRunning ) {
\r
6892 stream_.callbackInfo.object = this;
\r
6893 stream_.callbackInfo.isRunning = true;
\r
6894 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6895 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6900 stream_.state = STREAM_STOPPED;
\r
6904 if ( pah && stream_.callbackInfo.isRunning ) {
\r
6905 pthread_cond_destroy( &pah->runnable_cv );
\r
6907 stream_.apiHandle = 0;
\r
6910 for ( int i=0; i<2; i++ ) {
\r
6911 if ( stream_.userBuffer[i] ) {
\r
6912 free( stream_.userBuffer[i] );
\r
6913 stream_.userBuffer[i] = 0;
\r
6917 if ( stream_.deviceBuffer ) {
\r
6918 free( stream_.deviceBuffer );
\r
6919 stream_.deviceBuffer = 0;
\r
6925 //******************** End of __LINUX_PULSE__ *********************//
\r
6928 #if defined(__LINUX_OSS__)
\r
6930 #include <unistd.h>
\r
6931 #include <sys/ioctl.h>
\r
6932 #include <unistd.h>
\r
6933 #include <fcntl.h>
\r
6934 #include <sys/soundcard.h>
\r
6935 #include <errno.h>
\r
6938 static void *ossCallbackHandler(void * ptr);
\r
6940 // A structure to hold various information related to the OSS API
\r
6941 // implementation.
\r
6942 struct OssHandle {
\r
6943 int id[2]; // device ids
\r
6946 pthread_cond_t runnable;
\r
6949 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6952 RtApiOss :: RtApiOss()
\r
6954 // Nothing to do here.
\r
6957 RtApiOss :: ~RtApiOss()
\r
6959 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6962 unsigned int RtApiOss :: getDeviceCount( void )
\r
6964 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6965 if ( mixerfd == -1 ) {
\r
6966 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6967 error( RtAudioError::WARNING );
\r
6971 oss_sysinfo sysinfo;
\r
6972 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6974 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6975 error( RtAudioError::WARNING );
\r
6980 return sysinfo.numaudios;
\r
6983 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6985 RtAudio::DeviceInfo info;
\r
6986 info.probed = false;
\r
6988 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6989 if ( mixerfd == -1 ) {
\r
6990 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6991 error( RtAudioError::WARNING );
\r
6995 oss_sysinfo sysinfo;
\r
6996 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6997 if ( result == -1 ) {
\r
6999 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7000 error( RtAudioError::WARNING );
\r
7004 unsigned nDevices = sysinfo.numaudios;
\r
7005 if ( nDevices == 0 ) {
\r
7007 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
7008 error( RtAudioError::INVALID_USE );
\r
7012 if ( device >= nDevices ) {
\r
7014 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
7015 error( RtAudioError::INVALID_USE );
\r
7019 oss_audioinfo ainfo;
\r
7020 ainfo.dev = device;
\r
7021 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7023 if ( result == -1 ) {
\r
7024 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7025 errorText_ = errorStream_.str();
\r
7026 error( RtAudioError::WARNING );
\r
7031 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
7032 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
7033 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
7034 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
7035 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7038 // Probe data formats ... do for input
\r
7039 unsigned long mask = ainfo.iformats;
\r
7040 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
7041 info.nativeFormats |= RTAUDIO_SINT16;
\r
7042 if ( mask & AFMT_S8 )
\r
7043 info.nativeFormats |= RTAUDIO_SINT8;
\r
7044 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
7045 info.nativeFormats |= RTAUDIO_SINT32;
\r
7046 if ( mask & AFMT_FLOAT )
\r
7047 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7048 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
7049 info.nativeFormats |= RTAUDIO_SINT24;
\r
7051 // Check that we have at least one supported format
\r
7052 if ( info.nativeFormats == 0 ) {
\r
7053 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7054 errorText_ = errorStream_.str();
\r
7055 error( RtAudioError::WARNING );
\r
7059 // Probe the supported sample rates.
\r
7060 info.sampleRates.clear();
\r
7061 if ( ainfo.nrates ) {
\r
7062 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
7063 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7064 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
7065 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7072 // Check min and max rate values;
\r
7073 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7074 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
7075 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7079 if ( info.sampleRates.size() == 0 ) {
\r
7080 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
7081 errorText_ = errorStream_.str();
\r
7082 error( RtAudioError::WARNING );
\r
7085 info.probed = true;
\r
7086 info.name = ainfo.name;
\r
7093 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7094 unsigned int firstChannel, unsigned int sampleRate,
\r
7095 RtAudioFormat format, unsigned int *bufferSize,
\r
7096 RtAudio::StreamOptions *options )
\r
7098 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
7099 if ( mixerfd == -1 ) {
\r
7100 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
7104 oss_sysinfo sysinfo;
\r
7105 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
7106 if ( result == -1 ) {
\r
7108 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7112 unsigned nDevices = sysinfo.numaudios;
\r
7113 if ( nDevices == 0 ) {
\r
7114 // This should not happen because a check is made before this function is called.
\r
7116 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7120 if ( device >= nDevices ) {
\r
7121 // This should not happen because a check is made before this function is called.
\r
7123 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7127 oss_audioinfo ainfo;
\r
7128 ainfo.dev = device;
\r
7129 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7131 if ( result == -1 ) {
\r
7132 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7133 errorText_ = errorStream_.str();
\r
7137 // Check if device supports input or output
\r
7138 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7139 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7140 if ( mode == OUTPUT )
\r
7141 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7143 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7144 errorText_ = errorStream_.str();
\r
7149 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7150 if ( mode == OUTPUT )
\r
7151 flags |= O_WRONLY;
\r
7152 else { // mode == INPUT
\r
7153 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7154 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7155 close( handle->id[0] );
\r
7156 handle->id[0] = 0;
\r
7157 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7158 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7159 errorText_ = errorStream_.str();
\r
7162 // Check that the number previously set channels is the same.
\r
7163 if ( stream_.nUserChannels[0] != channels ) {
\r
7164 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7165 errorText_ = errorStream_.str();
\r
7171 flags |= O_RDONLY;
\r
7174 // Set exclusive access if specified.
\r
7175 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7177 // Try to open the device.
\r
7179 fd = open( ainfo.devnode, flags, 0 );
\r
7181 if ( errno == EBUSY )
\r
7182 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7184 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7185 errorText_ = errorStream_.str();
\r
7189 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7191 if ( flags | O_RDWR ) {
\r
7192 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7193 if ( result == -1) {
\r
7194 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7195 errorText_ = errorStream_.str();
\r
7201 // Check the device channel support.
\r
7202 stream_.nUserChannels[mode] = channels;
\r
7203 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7205 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7206 errorText_ = errorStream_.str();
\r
7210 // Set the number of channels.
\r
7211 int deviceChannels = channels + firstChannel;
\r
7212 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7213 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7215 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7216 errorText_ = errorStream_.str();
\r
7219 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7221 // Get the data format mask
\r
7223 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7224 if ( result == -1 ) {
\r
7226 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7227 errorText_ = errorStream_.str();
\r
7231 // Determine how to set the device format.
\r
7232 stream_.userFormat = format;
\r
7233 int deviceFormat = -1;
\r
7234 stream_.doByteSwap[mode] = false;
\r
7235 if ( format == RTAUDIO_SINT8 ) {
\r
7236 if ( mask & AFMT_S8 ) {
\r
7237 deviceFormat = AFMT_S8;
\r
7238 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7241 else if ( format == RTAUDIO_SINT16 ) {
\r
7242 if ( mask & AFMT_S16_NE ) {
\r
7243 deviceFormat = AFMT_S16_NE;
\r
7244 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7246 else if ( mask & AFMT_S16_OE ) {
\r
7247 deviceFormat = AFMT_S16_OE;
\r
7248 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7249 stream_.doByteSwap[mode] = true;
\r
7252 else if ( format == RTAUDIO_SINT24 ) {
\r
7253 if ( mask & AFMT_S24_NE ) {
\r
7254 deviceFormat = AFMT_S24_NE;
\r
7255 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7257 else if ( mask & AFMT_S24_OE ) {
\r
7258 deviceFormat = AFMT_S24_OE;
\r
7259 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7260 stream_.doByteSwap[mode] = true;
\r
7263 else if ( format == RTAUDIO_SINT32 ) {
\r
7264 if ( mask & AFMT_S32_NE ) {
\r
7265 deviceFormat = AFMT_S32_NE;
\r
7266 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7268 else if ( mask & AFMT_S32_OE ) {
\r
7269 deviceFormat = AFMT_S32_OE;
\r
7270 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7271 stream_.doByteSwap[mode] = true;
\r
7275 if ( deviceFormat == -1 ) {
\r
7276 // The user requested format is not natively supported by the device.
\r
7277 if ( mask & AFMT_S16_NE ) {
\r
7278 deviceFormat = AFMT_S16_NE;
\r
7279 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7281 else if ( mask & AFMT_S32_NE ) {
\r
7282 deviceFormat = AFMT_S32_NE;
\r
7283 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7285 else if ( mask & AFMT_S24_NE ) {
\r
7286 deviceFormat = AFMT_S24_NE;
\r
7287 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7289 else if ( mask & AFMT_S16_OE ) {
\r
7290 deviceFormat = AFMT_S16_OE;
\r
7291 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7292 stream_.doByteSwap[mode] = true;
\r
7294 else if ( mask & AFMT_S32_OE ) {
\r
7295 deviceFormat = AFMT_S32_OE;
\r
7296 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7297 stream_.doByteSwap[mode] = true;
\r
7299 else if ( mask & AFMT_S24_OE ) {
\r
7300 deviceFormat = AFMT_S24_OE;
\r
7301 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7302 stream_.doByteSwap[mode] = true;
\r
7304 else if ( mask & AFMT_S8) {
\r
7305 deviceFormat = AFMT_S8;
\r
7306 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7310 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7311 // This really shouldn't happen ...
\r
7313 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7314 errorText_ = errorStream_.str();
\r
7318 // Set the data format.
\r
7319 int temp = deviceFormat;
\r
7320 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7321 if ( result == -1 || deviceFormat != temp ) {
\r
7323 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7324 errorText_ = errorStream_.str();
\r
7328 // Attempt to set the buffer size. According to OSS, the minimum
\r
7329 // number of buffers is two. The supposed minimum buffer size is 16
\r
7330 // bytes, so that will be our lower bound. The argument to this
\r
7331 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7332 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7333 // We'll check the actual value used near the end of the setup
\r
7335 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7336 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7338 if ( options ) buffers = options->numberOfBuffers;
\r
7339 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7340 if ( buffers < 2 ) buffers = 3;
\r
7341 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7342 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7343 if ( result == -1 ) {
\r
7345 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7346 errorText_ = errorStream_.str();
\r
7349 stream_.nBuffers = buffers;
\r
7351 // Save buffer size (in sample frames).
\r
7352 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7353 stream_.bufferSize = *bufferSize;
\r
7355 // Set the sample rate.
\r
7356 int srate = sampleRate;
\r
7357 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7358 if ( result == -1 ) {
\r
7360 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7361 errorText_ = errorStream_.str();
\r
7365 // Verify the sample rate setup worked.
\r
7366 if ( abs( srate - sampleRate ) > 100 ) {
\r
7368 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7369 errorText_ = errorStream_.str();
\r
7372 stream_.sampleRate = sampleRate;
\r
7374 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7375 // We're doing duplex setup here.
\r
7376 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7377 stream_.nDeviceChannels[0] = deviceChannels;
\r
7380 // Set interleaving parameters.
\r
7381 stream_.userInterleaved = true;
\r
7382 stream_.deviceInterleaved[mode] = true;
\r
7383 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7384 stream_.userInterleaved = false;
\r
7386 // Set flags for buffer conversion
\r
7387 stream_.doConvertBuffer[mode] = false;
\r
7388 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7389 stream_.doConvertBuffer[mode] = true;
\r
7390 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7391 stream_.doConvertBuffer[mode] = true;
\r
7392 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7393 stream_.nUserChannels[mode] > 1 )
\r
7394 stream_.doConvertBuffer[mode] = true;
\r
7396 // Allocate the stream handles if necessary and then save.
\r
7397 if ( stream_.apiHandle == 0 ) {
\r
7399 handle = new OssHandle;
\r
7401 catch ( std::bad_alloc& ) {
\r
7402 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7406 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7407 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7411 stream_.apiHandle = (void *) handle;
\r
7414 handle = (OssHandle *) stream_.apiHandle;
\r
7416 handle->id[mode] = fd;
\r
7418 // Allocate necessary internal buffers.
\r
7419 unsigned long bufferBytes;
\r
7420 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7421 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7422 if ( stream_.userBuffer[mode] == NULL ) {
\r
7423 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7427 if ( stream_.doConvertBuffer[mode] ) {
\r
7429 bool makeBuffer = true;
\r
7430 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7431 if ( mode == INPUT ) {
\r
7432 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7433 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7434 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7438 if ( makeBuffer ) {
\r
7439 bufferBytes *= *bufferSize;
\r
7440 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7441 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7442 if ( stream_.deviceBuffer == NULL ) {
\r
7443 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7449 stream_.device[mode] = device;
\r
7450 stream_.state = STREAM_STOPPED;
\r
7452 // Setup the buffer conversion information structure.
\r
7453 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7455 // Setup thread if necessary.
\r
7456 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7457 // We had already set up an output stream.
\r
7458 stream_.mode = DUPLEX;
\r
7459 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7462 stream_.mode = mode;
\r
7464 // Setup callback thread.
\r
7465 stream_.callbackInfo.object = (void *) this;
\r
7467 // Set the thread attributes for joinable and realtime scheduling
\r
7468 // priority. The higher priority will only take affect if the
\r
7469 // program is run as root or suid.
\r
7470 pthread_attr_t attr;
\r
7471 pthread_attr_init( &attr );
\r
7472 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7473 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7474 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7475 struct sched_param param;
\r
7476 int priority = options->priority;
\r
7477 int min = sched_get_priority_min( SCHED_RR );
\r
7478 int max = sched_get_priority_max( SCHED_RR );
\r
7479 if ( priority < min ) priority = min;
\r
7480 else if ( priority > max ) priority = max;
\r
7481 param.sched_priority = priority;
\r
7482 pthread_attr_setschedparam( &attr, ¶m );
\r
7483 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7486 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7488 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7491 stream_.callbackInfo.isRunning = true;
\r
7492 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7493 pthread_attr_destroy( &attr );
\r
7495 stream_.callbackInfo.isRunning = false;
\r
7496 errorText_ = "RtApiOss::error creating callback thread!";
\r
7505 pthread_cond_destroy( &handle->runnable );
\r
7506 if ( handle->id[0] ) close( handle->id[0] );
\r
7507 if ( handle->id[1] ) close( handle->id[1] );
\r
7509 stream_.apiHandle = 0;
\r
7512 for ( int i=0; i<2; i++ ) {
\r
7513 if ( stream_.userBuffer[i] ) {
\r
7514 free( stream_.userBuffer[i] );
\r
7515 stream_.userBuffer[i] = 0;
\r
7519 if ( stream_.deviceBuffer ) {
\r
7520 free( stream_.deviceBuffer );
\r
7521 stream_.deviceBuffer = 0;
\r
7527 void RtApiOss :: closeStream()
\r
7529 if ( stream_.state == STREAM_CLOSED ) {
\r
7530 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7531 error( RtAudioError::WARNING );
\r
7535 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7536 stream_.callbackInfo.isRunning = false;
\r
7537 MUTEX_LOCK( &stream_.mutex );
\r
7538 if ( stream_.state == STREAM_STOPPED )
\r
7539 pthread_cond_signal( &handle->runnable );
\r
7540 MUTEX_UNLOCK( &stream_.mutex );
\r
7541 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7543 if ( stream_.state == STREAM_RUNNING ) {
\r
7544 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7545 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7547 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7548 stream_.state = STREAM_STOPPED;
\r
7552 pthread_cond_destroy( &handle->runnable );
\r
7553 if ( handle->id[0] ) close( handle->id[0] );
\r
7554 if ( handle->id[1] ) close( handle->id[1] );
\r
7556 stream_.apiHandle = 0;
\r
7559 for ( int i=0; i<2; i++ ) {
\r
7560 if ( stream_.userBuffer[i] ) {
\r
7561 free( stream_.userBuffer[i] );
\r
7562 stream_.userBuffer[i] = 0;
\r
7566 if ( stream_.deviceBuffer ) {
\r
7567 free( stream_.deviceBuffer );
\r
7568 stream_.deviceBuffer = 0;
\r
7571 stream_.mode = UNINITIALIZED;
\r
7572 stream_.state = STREAM_CLOSED;
\r
7575 void RtApiOss :: startStream()
\r
7578 if ( stream_.state == STREAM_RUNNING ) {
\r
7579 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7580 error( RtAudioError::WARNING );
\r
7584 MUTEX_LOCK( &stream_.mutex );
\r
7586 stream_.state = STREAM_RUNNING;
\r
7588 // No need to do anything else here ... OSS automatically starts
\r
7589 // when fed samples.
\r
7591 MUTEX_UNLOCK( &stream_.mutex );
\r
7593 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7594 pthread_cond_signal( &handle->runnable );
\r
7597 void RtApiOss :: stopStream()
\r
7600 if ( stream_.state == STREAM_STOPPED ) {
\r
7601 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7602 error( RtAudioError::WARNING );
\r
7606 MUTEX_LOCK( &stream_.mutex );
\r
7608 // The state might change while waiting on a mutex.
\r
7609 if ( stream_.state == STREAM_STOPPED ) {
\r
7610 MUTEX_UNLOCK( &stream_.mutex );
\r
7615 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7616 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7618 // Flush the output with zeros a few times.
\r
7621 RtAudioFormat format;
\r
7623 if ( stream_.doConvertBuffer[0] ) {
\r
7624 buffer = stream_.deviceBuffer;
\r
7625 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7626 format = stream_.deviceFormat[0];
\r
7629 buffer = stream_.userBuffer[0];
\r
7630 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7631 format = stream_.userFormat;
\r
7634 memset( buffer, 0, samples * formatBytes(format) );
\r
7635 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7636 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7637 if ( result == -1 ) {
\r
7638 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7639 error( RtAudioError::WARNING );
\r
7643 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7644 if ( result == -1 ) {
\r
7645 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7646 errorText_ = errorStream_.str();
\r
7649 handle->triggered = false;
\r
7652 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7653 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7654 if ( result == -1 ) {
\r
7655 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7656 errorText_ = errorStream_.str();
\r
7662 stream_.state = STREAM_STOPPED;
\r
7663 MUTEX_UNLOCK( &stream_.mutex );
\r
7665 if ( result != -1 ) return;
\r
7666 error( RtAudioError::SYSTEM_ERROR );
\r
7669 void RtApiOss :: abortStream()
\r
7672 if ( stream_.state == STREAM_STOPPED ) {
\r
7673 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7674 error( RtAudioError::WARNING );
\r
7678 MUTEX_LOCK( &stream_.mutex );
\r
7680 // The state might change while waiting on a mutex.
\r
7681 if ( stream_.state == STREAM_STOPPED ) {
\r
7682 MUTEX_UNLOCK( &stream_.mutex );
\r
7687 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7688 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7689 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7690 if ( result == -1 ) {
\r
7691 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7692 errorText_ = errorStream_.str();
\r
7695 handle->triggered = false;
\r
7698 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7699 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7700 if ( result == -1 ) {
\r
7701 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7702 errorText_ = errorStream_.str();
\r
7708 stream_.state = STREAM_STOPPED;
\r
7709 MUTEX_UNLOCK( &stream_.mutex );
\r
7711 if ( result != -1 ) return;
\r
7712 error( RtAudioError::SYSTEM_ERROR );
\r
7715 void RtApiOss :: callbackEvent()
\r
7717 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7718 if ( stream_.state == STREAM_STOPPED ) {
\r
7719 MUTEX_LOCK( &stream_.mutex );
\r
7720 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7721 if ( stream_.state != STREAM_RUNNING ) {
\r
7722 MUTEX_UNLOCK( &stream_.mutex );
\r
7725 MUTEX_UNLOCK( &stream_.mutex );
\r
7728 if ( stream_.state == STREAM_CLOSED ) {
\r
7729 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7730 error( RtAudioError::WARNING );
\r
7734 // Invoke user callback to get fresh output data.
\r
7735 int doStopStream = 0;
\r
7736 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7737 double streamTime = getStreamTime();
\r
7738 RtAudioStreamStatus status = 0;
\r
7739 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7740 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7741 handle->xrun[0] = false;
\r
7743 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7744 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7745 handle->xrun[1] = false;
\r
7747 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7748 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7749 if ( doStopStream == 2 ) {
\r
7750 this->abortStream();
\r
7754 MUTEX_LOCK( &stream_.mutex );
\r
7756 // The state might change while waiting on a mutex.
\r
7757 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7762 RtAudioFormat format;
\r
7764 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7766 // Setup parameters and do buffer conversion if necessary.
\r
7767 if ( stream_.doConvertBuffer[0] ) {
\r
7768 buffer = stream_.deviceBuffer;
\r
7769 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7770 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7771 format = stream_.deviceFormat[0];
\r
7774 buffer = stream_.userBuffer[0];
\r
7775 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7776 format = stream_.userFormat;
\r
7779 // Do byte swapping if necessary.
\r
7780 if ( stream_.doByteSwap[0] )
\r
7781 byteSwapBuffer( buffer, samples, format );
\r
7783 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7785 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7786 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7787 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7788 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7789 handle->triggered = true;
\r
7792 // Write samples to device.
\r
7793 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7795 if ( result == -1 ) {
\r
7796 // We'll assume this is an underrun, though there isn't a
\r
7797 // specific means for determining that.
\r
7798 handle->xrun[0] = true;
\r
7799 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7800 error( RtAudioError::WARNING );
\r
7801 // Continue on to input section.
\r
7805 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7807 // Setup parameters.
\r
7808 if ( stream_.doConvertBuffer[1] ) {
\r
7809 buffer = stream_.deviceBuffer;
\r
7810 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7811 format = stream_.deviceFormat[1];
\r
7814 buffer = stream_.userBuffer[1];
\r
7815 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7816 format = stream_.userFormat;
\r
7819 // Read samples from device.
\r
7820 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7822 if ( result == -1 ) {
\r
7823 // We'll assume this is an overrun, though there isn't a
\r
7824 // specific means for determining that.
\r
7825 handle->xrun[1] = true;
\r
7826 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7827 error( RtAudioError::WARNING );
\r
7831 // Do byte swapping if necessary.
\r
7832 if ( stream_.doByteSwap[1] )
\r
7833 byteSwapBuffer( buffer, samples, format );
\r
7835 // Do buffer conversion if necessary.
\r
7836 if ( stream_.doConvertBuffer[1] )
\r
7837 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7841 MUTEX_UNLOCK( &stream_.mutex );
\r
7843 RtApi::tickStreamTime();
\r
7844 if ( doStopStream == 1 ) this->stopStream();
\r
7847 static void *ossCallbackHandler( void *ptr )
\r
7849 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7850 RtApiOss *object = (RtApiOss *) info->object;
\r
7851 bool *isRunning = &info->isRunning;
\r
7853 while ( *isRunning == true ) {
\r
7854 pthread_testcancel();
\r
7855 object->callbackEvent();
\r
7858 pthread_exit( NULL );
\r
7861 //******************** End of __LINUX_OSS__ *********************//
\r
7865 // *************************************************** //
\r
7867 // Protected common (OS-independent) RtAudio methods.
\r
7869 // *************************************************** //
\r
7871 // This method can be modified to control the behavior of error
\r
7872 // message printing.
\r
7873 void RtApi :: error( RtAudioError::Type type )
\r
7875 errorStream_.str(""); // clear the ostringstream
\r
7877 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
7878 if ( errorCallback ) {
\r
7879 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
7881 if ( firstErrorOccurred_ )
\r
7884 firstErrorOccurred_ = true;
\r
7885 const std::string errorMessage = errorText_;
\r
7887 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
7888 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
7892 errorCallback( type, errorMessage );
\r
7893 firstErrorOccurred_ = false;
\r
7897 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
7898 std::cerr << '\n' << errorText_ << "\n\n";
\r
7899 else if ( type != RtAudioError::WARNING )
\r
7900 throw( RtAudioError( errorText_, type ) );
\r
7903 void RtApi :: verifyStream()
\r
7905 if ( stream_.state == STREAM_CLOSED ) {
\r
7906 errorText_ = "RtApi:: a stream is not open!";
\r
7907 error( RtAudioError::INVALID_USE );
\r
7911 void RtApi :: clearStreamInfo()
\r
7913 stream_.mode = UNINITIALIZED;
\r
7914 stream_.state = STREAM_CLOSED;
\r
7915 stream_.sampleRate = 0;
\r
7916 stream_.bufferSize = 0;
\r
7917 stream_.nBuffers = 0;
\r
7918 stream_.userFormat = 0;
\r
7919 stream_.userInterleaved = true;
\r
7920 stream_.streamTime = 0.0;
\r
7921 stream_.apiHandle = 0;
\r
7922 stream_.deviceBuffer = 0;
\r
7923 stream_.callbackInfo.callback = 0;
\r
7924 stream_.callbackInfo.userData = 0;
\r
7925 stream_.callbackInfo.isRunning = false;
\r
7926 stream_.callbackInfo.errorCallback = 0;
\r
7927 for ( int i=0; i<2; i++ ) {
\r
7928 stream_.device[i] = 11111;
\r
7929 stream_.doConvertBuffer[i] = false;
\r
7930 stream_.deviceInterleaved[i] = true;
\r
7931 stream_.doByteSwap[i] = false;
\r
7932 stream_.nUserChannels[i] = 0;
\r
7933 stream_.nDeviceChannels[i] = 0;
\r
7934 stream_.channelOffset[i] = 0;
\r
7935 stream_.deviceFormat[i] = 0;
\r
7936 stream_.latency[i] = 0;
\r
7937 stream_.userBuffer[i] = 0;
\r
7938 stream_.convertInfo[i].channels = 0;
\r
7939 stream_.convertInfo[i].inJump = 0;
\r
7940 stream_.convertInfo[i].outJump = 0;
\r
7941 stream_.convertInfo[i].inFormat = 0;
\r
7942 stream_.convertInfo[i].outFormat = 0;
\r
7943 stream_.convertInfo[i].inOffset.clear();
\r
7944 stream_.convertInfo[i].outOffset.clear();
\r
7948 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7950 if ( format == RTAUDIO_SINT16 )
\r
7952 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7954 else if ( format == RTAUDIO_FLOAT64 )
\r
7956 else if ( format == RTAUDIO_SINT24 )
\r
7958 else if ( format == RTAUDIO_SINT8 )
\r
7961 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7962 error( RtAudioError::WARNING );
\r
7967 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7969 if ( mode == INPUT ) { // convert device to user buffer
\r
7970 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7971 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7972 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7973 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7975 else { // convert user to device buffer
\r
7976 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7977 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7978 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7979 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7982 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7983 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7985 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7987 // Set up the interleave/deinterleave offsets.
\r
7988 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7989 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7990 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7991 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7992 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7993 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7994 stream_.convertInfo[mode].inJump = 1;
\r
7998 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7999 stream_.convertInfo[mode].inOffset.push_back( k );
\r
8000 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
8001 stream_.convertInfo[mode].outJump = 1;
\r
8005 else { // no (de)interleaving
\r
8006 if ( stream_.userInterleaved ) {
\r
8007 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8008 stream_.convertInfo[mode].inOffset.push_back( k );
\r
8009 stream_.convertInfo[mode].outOffset.push_back( k );
\r
8013 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8014 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
8015 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
8016 stream_.convertInfo[mode].inJump = 1;
\r
8017 stream_.convertInfo[mode].outJump = 1;
\r
8022 // Add channel offset.
\r
8023 if ( firstChannel > 0 ) {
\r
8024 if ( stream_.deviceInterleaved[mode] ) {
\r
8025 if ( mode == OUTPUT ) {
\r
8026 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8027 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
8030 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8031 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
8035 if ( mode == OUTPUT ) {
\r
8036 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8037 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8040 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8041 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8047 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
8049 // This function does format conversion, input/output channel compensation, and
\r
8050 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
8051 // the lower three bytes of a 32-bit integer.
\r
8053 // Clear our device buffer when in/out duplex device channels are different
\r
8054 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
8055 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
8056 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
8059 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
8061 Float64 *out = (Float64 *)outBuffer;
\r
8063 if (info.inFormat == RTAUDIO_SINT8) {
\r
8064 signed char *in = (signed char *)inBuffer;
\r
8065 scale = 1.0 / 127.5;
\r
8066 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8067 for (j=0; j<info.channels; j++) {
\r
8068 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8069 out[info.outOffset[j]] += 0.5;
\r
8070 out[info.outOffset[j]] *= scale;
\r
8072 in += info.inJump;
\r
8073 out += info.outJump;
\r
8076 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8077 Int16 *in = (Int16 *)inBuffer;
\r
8078 scale = 1.0 / 32767.5;
\r
8079 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8080 for (j=0; j<info.channels; j++) {
\r
8081 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8082 out[info.outOffset[j]] += 0.5;
\r
8083 out[info.outOffset[j]] *= scale;
\r
8085 in += info.inJump;
\r
8086 out += info.outJump;
\r
8089 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8090 Int24 *in = (Int24 *)inBuffer;
\r
8091 scale = 1.0 / 8388607.5;
\r
8092 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8093 for (j=0; j<info.channels; j++) {
\r
8094 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
8095 out[info.outOffset[j]] += 0.5;
\r
8096 out[info.outOffset[j]] *= scale;
\r
8098 in += info.inJump;
\r
8099 out += info.outJump;
\r
8102 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8103 Int32 *in = (Int32 *)inBuffer;
\r
8104 scale = 1.0 / 2147483647.5;
\r
8105 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8106 for (j=0; j<info.channels; j++) {
\r
8107 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8108 out[info.outOffset[j]] += 0.5;
\r
8109 out[info.outOffset[j]] *= scale;
\r
8111 in += info.inJump;
\r
8112 out += info.outJump;
\r
8115 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8116 Float32 *in = (Float32 *)inBuffer;
\r
8117 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8118 for (j=0; j<info.channels; j++) {
\r
8119 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8121 in += info.inJump;
\r
8122 out += info.outJump;
\r
8125 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8126 // Channel compensation and/or (de)interleaving only.
\r
8127 Float64 *in = (Float64 *)inBuffer;
\r
8128 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8129 for (j=0; j<info.channels; j++) {
\r
8130 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8132 in += info.inJump;
\r
8133 out += info.outJump;
\r
8137 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8139 Float32 *out = (Float32 *)outBuffer;
\r
8141 if (info.inFormat == RTAUDIO_SINT8) {
\r
8142 signed char *in = (signed char *)inBuffer;
\r
8143 scale = (Float32) ( 1.0 / 127.5 );
\r
8144 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8145 for (j=0; j<info.channels; j++) {
\r
8146 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8147 out[info.outOffset[j]] += 0.5;
\r
8148 out[info.outOffset[j]] *= scale;
\r
8150 in += info.inJump;
\r
8151 out += info.outJump;
\r
8154 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8155 Int16 *in = (Int16 *)inBuffer;
\r
8156 scale = (Float32) ( 1.0 / 32767.5 );
\r
8157 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8158 for (j=0; j<info.channels; j++) {
\r
8159 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8160 out[info.outOffset[j]] += 0.5;
\r
8161 out[info.outOffset[j]] *= scale;
\r
8163 in += info.inJump;
\r
8164 out += info.outJump;
\r
8167 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8168 Int24 *in = (Int24 *)inBuffer;
\r
8169 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8170 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8171 for (j=0; j<info.channels; j++) {
\r
8172 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8173 out[info.outOffset[j]] += 0.5;
\r
8174 out[info.outOffset[j]] *= scale;
\r
8176 in += info.inJump;
\r
8177 out += info.outJump;
\r
8180 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8181 Int32 *in = (Int32 *)inBuffer;
\r
8182 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8184 for (j=0; j<info.channels; j++) {
\r
8185 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8186 out[info.outOffset[j]] += 0.5;
\r
8187 out[info.outOffset[j]] *= scale;
\r
8189 in += info.inJump;
\r
8190 out += info.outJump;
\r
8193 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8194 // Channel compensation and/or (de)interleaving only.
\r
8195 Float32 *in = (Float32 *)inBuffer;
\r
8196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8197 for (j=0; j<info.channels; j++) {
\r
8198 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8200 in += info.inJump;
\r
8201 out += info.outJump;
\r
8204 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8205 Float64 *in = (Float64 *)inBuffer;
\r
8206 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8207 for (j=0; j<info.channels; j++) {
\r
8208 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8210 in += info.inJump;
\r
8211 out += info.outJump;
\r
8215 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8216 Int32 *out = (Int32 *)outBuffer;
\r
8217 if (info.inFormat == RTAUDIO_SINT8) {
\r
8218 signed char *in = (signed char *)inBuffer;
\r
8219 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8220 for (j=0; j<info.channels; j++) {
\r
8221 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8222 out[info.outOffset[j]] <<= 24;
\r
8224 in += info.inJump;
\r
8225 out += info.outJump;
\r
8228 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8229 Int16 *in = (Int16 *)inBuffer;
\r
8230 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8231 for (j=0; j<info.channels; j++) {
\r
8232 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8233 out[info.outOffset[j]] <<= 16;
\r
8235 in += info.inJump;
\r
8236 out += info.outJump;
\r
8239 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8240 Int24 *in = (Int24 *)inBuffer;
\r
8241 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8242 for (j=0; j<info.channels; j++) {
\r
8243 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8244 out[info.outOffset[j]] <<= 8;
\r
8246 in += info.inJump;
\r
8247 out += info.outJump;
\r
8250 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8251 // Channel compensation and/or (de)interleaving only.
\r
8252 Int32 *in = (Int32 *)inBuffer;
\r
8253 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8254 for (j=0; j<info.channels; j++) {
\r
8255 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8257 in += info.inJump;
\r
8258 out += info.outJump;
\r
8261 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8262 Float32 *in = (Float32 *)inBuffer;
\r
8263 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8264 for (j=0; j<info.channels; j++) {
\r
8265 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8267 in += info.inJump;
\r
8268 out += info.outJump;
\r
8271 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8272 Float64 *in = (Float64 *)inBuffer;
\r
8273 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8274 for (j=0; j<info.channels; j++) {
\r
8275 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8277 in += info.inJump;
\r
8278 out += info.outJump;
\r
8282 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8283 Int24 *out = (Int24 *)outBuffer;
\r
8284 if (info.inFormat == RTAUDIO_SINT8) {
\r
8285 signed char *in = (signed char *)inBuffer;
\r
8286 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8287 for (j=0; j<info.channels; j++) {
\r
8288 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8289 //out[info.outOffset[j]] <<= 16;
\r
8291 in += info.inJump;
\r
8292 out += info.outJump;
\r
8295 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8296 Int16 *in = (Int16 *)inBuffer;
\r
8297 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8298 for (j=0; j<info.channels; j++) {
\r
8299 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8300 //out[info.outOffset[j]] <<= 8;
\r
8302 in += info.inJump;
\r
8303 out += info.outJump;
\r
8306 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8307 // Channel compensation and/or (de)interleaving only.
\r
8308 Int24 *in = (Int24 *)inBuffer;
\r
8309 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8310 for (j=0; j<info.channels; j++) {
\r
8311 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8313 in += info.inJump;
\r
8314 out += info.outJump;
\r
8317 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8318 Int32 *in = (Int32 *)inBuffer;
\r
8319 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8320 for (j=0; j<info.channels; j++) {
\r
8321 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8322 //out[info.outOffset[j]] >>= 8;
\r
8324 in += info.inJump;
\r
8325 out += info.outJump;
\r
8328 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8329 Float32 *in = (Float32 *)inBuffer;
\r
8330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8331 for (j=0; j<info.channels; j++) {
\r
8332 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8334 in += info.inJump;
\r
8335 out += info.outJump;
\r
8338 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8339 Float64 *in = (Float64 *)inBuffer;
\r
8340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8341 for (j=0; j<info.channels; j++) {
\r
8342 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8344 in += info.inJump;
\r
8345 out += info.outJump;
\r
8349 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8350 Int16 *out = (Int16 *)outBuffer;
\r
8351 if (info.inFormat == RTAUDIO_SINT8) {
\r
8352 signed char *in = (signed char *)inBuffer;
\r
8353 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8354 for (j=0; j<info.channels; j++) {
\r
8355 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8356 out[info.outOffset[j]] <<= 8;
\r
8358 in += info.inJump;
\r
8359 out += info.outJump;
\r
8362 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8363 // Channel compensation and/or (de)interleaving only.
\r
8364 Int16 *in = (Int16 *)inBuffer;
\r
8365 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8366 for (j=0; j<info.channels; j++) {
\r
8367 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8369 in += info.inJump;
\r
8370 out += info.outJump;
\r
8373 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8374 Int24 *in = (Int24 *)inBuffer;
\r
8375 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8376 for (j=0; j<info.channels; j++) {
\r
8377 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8379 in += info.inJump;
\r
8380 out += info.outJump;
\r
8383 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8384 Int32 *in = (Int32 *)inBuffer;
\r
8385 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8386 for (j=0; j<info.channels; j++) {
\r
8387 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8389 in += info.inJump;
\r
8390 out += info.outJump;
\r
8393 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8394 Float32 *in = (Float32 *)inBuffer;
\r
8395 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8396 for (j=0; j<info.channels; j++) {
\r
8397 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8399 in += info.inJump;
\r
8400 out += info.outJump;
\r
8403 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8404 Float64 *in = (Float64 *)inBuffer;
\r
8405 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8406 for (j=0; j<info.channels; j++) {
\r
8407 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8409 in += info.inJump;
\r
8410 out += info.outJump;
\r
8414 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8415 signed char *out = (signed char *)outBuffer;
\r
8416 if (info.inFormat == RTAUDIO_SINT8) {
\r
8417 // Channel compensation and/or (de)interleaving only.
\r
8418 signed char *in = (signed char *)inBuffer;
\r
8419 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8420 for (j=0; j<info.channels; j++) {
\r
8421 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8423 in += info.inJump;
\r
8424 out += info.outJump;
\r
8427 if (info.inFormat == RTAUDIO_SINT16) {
\r
8428 Int16 *in = (Int16 *)inBuffer;
\r
8429 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8430 for (j=0; j<info.channels; j++) {
\r
8431 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8433 in += info.inJump;
\r
8434 out += info.outJump;
\r
8437 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8438 Int24 *in = (Int24 *)inBuffer;
\r
8439 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8440 for (j=0; j<info.channels; j++) {
\r
8441 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8443 in += info.inJump;
\r
8444 out += info.outJump;
\r
8447 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8448 Int32 *in = (Int32 *)inBuffer;
\r
8449 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8450 for (j=0; j<info.channels; j++) {
\r
8451 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8453 in += info.inJump;
\r
8454 out += info.outJump;
\r
8457 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8458 Float32 *in = (Float32 *)inBuffer;
\r
8459 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8460 for (j=0; j<info.channels; j++) {
\r
8461 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8463 in += info.inJump;
\r
8464 out += info.outJump;
\r
8467 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8468 Float64 *in = (Float64 *)inBuffer;
\r
8469 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8470 for (j=0; j<info.channels; j++) {
\r
8471 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8473 in += info.inJump;
\r
8474 out += info.outJump;
\r
8480 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8481 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8482 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8484 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8486 register char val;
\r
8487 register char *ptr;
\r
8490 if ( format == RTAUDIO_SINT16 ) {
\r
8491 for ( unsigned int i=0; i<samples; i++ ) {
\r
8492 // Swap 1st and 2nd bytes.
\r
8494 *(ptr) = *(ptr+1);
\r
8497 // Increment 2 bytes.
\r
8501 else if ( format == RTAUDIO_SINT32 ||
\r
8502 format == RTAUDIO_FLOAT32 ) {
\r
8503 for ( unsigned int i=0; i<samples; i++ ) {
\r
8504 // Swap 1st and 4th bytes.
\r
8506 *(ptr) = *(ptr+3);
\r
8509 // Swap 2nd and 3rd bytes.
\r
8512 *(ptr) = *(ptr+1);
\r
8515 // Increment 3 more bytes.
\r
8519 else if ( format == RTAUDIO_SINT24 ) {
\r
8520 for ( unsigned int i=0; i<samples; i++ ) {
\r
8521 // Swap 1st and 3rd bytes.
\r
8523 *(ptr) = *(ptr+2);
\r
8526 // Increment 2 more bytes.
\r
8530 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8531 for ( unsigned int i=0; i<samples; i++ ) {
\r
8532 // Swap 1st and 8th bytes
\r
8534 *(ptr) = *(ptr+7);
\r
8537 // Swap 2nd and 7th bytes
\r
8540 *(ptr) = *(ptr+5);
\r
8543 // Swap 3rd and 6th bytes
\r
8546 *(ptr) = *(ptr+3);
\r
8549 // Swap 4th and 5th bytes
\r
8552 *(ptr) = *(ptr+1);
\r
8555 // Increment 5 more bytes.
\r
8561 // Indentation settings for Vim and Emacs
\r
8563 // Local Variables:
\r
8564 // c-basic-offset: 2
\r
8565 // indent-tabs-mode: nil
\r
8568 // vim: et sts=2 sw=2
\r