1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return std::string( RTAUDIO_VERSION );
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_DS__)
\r
105 apis.push_back( WINDOWS_DS );
\r
107 #if defined(__MACOSX_CORE__)
\r
108 apis.push_back( MACOSX_CORE );
\r
110 #if defined(__RTAUDIO_DUMMY__)
\r
111 apis.push_back( RTAUDIO_DUMMY );
\r
115 void RtAudio :: openRtApi( RtAudio::Api api )
\r
121 #if defined(__UNIX_JACK__)
\r
122 if ( api == UNIX_JACK )
\r
123 rtapi_ = new RtApiJack();
\r
125 #if defined(__LINUX_ALSA__)
\r
126 if ( api == LINUX_ALSA )
\r
127 rtapi_ = new RtApiAlsa();
\r
129 #if defined(__LINUX_PULSE__)
\r
130 if ( api == LINUX_PULSE )
\r
131 rtapi_ = new RtApiPulse();
\r
133 #if defined(__LINUX_OSS__)
\r
134 if ( api == LINUX_OSS )
\r
135 rtapi_ = new RtApiOss();
\r
137 #if defined(__WINDOWS_ASIO__)
\r
138 if ( api == WINDOWS_ASIO )
\r
139 rtapi_ = new RtApiAsio();
\r
141 #if defined(__WINDOWS_DS__)
\r
142 if ( api == WINDOWS_DS )
\r
143 rtapi_ = new RtApiDs();
\r
145 #if defined(__MACOSX_CORE__)
\r
146 if ( api == MACOSX_CORE )
\r
147 rtapi_ = new RtApiCore();
\r
149 #if defined(__RTAUDIO_DUMMY__)
\r
150 if ( api == RTAUDIO_DUMMY )
\r
151 rtapi_ = new RtApiDummy();
\r
155 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
159 if ( api != UNSPECIFIED ) {
\r
160 // Attempt to open the specified API.
\r
162 if ( rtapi_ ) return;
\r
164 // No compiled support for specified API value. Issue a debug
\r
165 // warning and continue as if no API was specified.
\r
166 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
169 // Iterate through the compiled APIs and return as soon as we find
\r
170 // one with at least one device or we reach the end of the list.
\r
171 std::vector< RtAudio::Api > apis;
\r
172 getCompiledApi( apis );
\r
173 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
174 openRtApi( apis[i] );
\r
175 if ( rtapi_->getDeviceCount() ) break;
\r
178 if ( rtapi_ ) return;
\r
180 // It should not be possible to get here because the preprocessor
\r
181 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
182 // API-specific definitions are passed to the compiler. But just in
\r
183 // case something weird happens, we'll print out an error message.
\r
184 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
185 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
188 RtAudio :: ~RtAudio() throw()
\r
193 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
194 RtAudio::StreamParameters *inputParameters,
\r
195 RtAudioFormat format, unsigned int sampleRate,
\r
196 unsigned int *bufferFrames,
\r
197 RtAudioCallback callback, void *userData,
\r
198 RtAudio::StreamOptions *options,
\r
199 RtAudioErrorCallback errorCallback )
\r
201 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
202 sampleRate, bufferFrames, callback,
\r
203 userData, options, errorCallback );
\r
206 // *************************************************** //
\r
208 // Public RtApi definitions (see end of file for
\r
209 // private or protected utility functions).
\r
211 // *************************************************** //
\r
215 stream_.state = STREAM_CLOSED;
\r
216 stream_.mode = UNINITIALIZED;
\r
217 stream_.apiHandle = 0;
\r
218 stream_.userBuffer[0] = 0;
\r
219 stream_.userBuffer[1] = 0;
\r
220 MUTEX_INITIALIZE( &stream_.mutex );
\r
221 showWarnings_ = true;
\r
222 firstErrorOccurred_ = false;
\r
227 MUTEX_DESTROY( &stream_.mutex );
\r
230 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
231 RtAudio::StreamParameters *iParams,
\r
232 RtAudioFormat format, unsigned int sampleRate,
\r
233 unsigned int *bufferFrames,
\r
234 RtAudioCallback callback, void *userData,
\r
235 RtAudio::StreamOptions *options,
\r
236 RtAudioErrorCallback errorCallback )
\r
238 if ( stream_.state != STREAM_CLOSED ) {
\r
239 errorText_ = "RtApi::openStream: a stream is already open!";
\r
240 error( RtAudioError::INVALID_USE );
\r
244 if ( oParams && oParams->nChannels < 1 ) {
\r
245 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
246 error( RtAudioError::INVALID_USE );
\r
250 if ( iParams && iParams->nChannels < 1 ) {
\r
251 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
252 error( RtAudioError::INVALID_USE );
\r
256 if ( oParams == NULL && iParams == NULL ) {
\r
257 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
258 error( RtAudioError::INVALID_USE );
\r
262 if ( formatBytes(format) == 0 ) {
\r
263 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 unsigned int nDevices = getDeviceCount();
\r
269 unsigned int oChannels = 0;
\r
271 oChannels = oParams->nChannels;
\r
272 if ( oParams->deviceId >= nDevices ) {
\r
273 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
274 error( RtAudioError::INVALID_USE );
\r
279 unsigned int iChannels = 0;
\r
281 iChannels = iParams->nChannels;
\r
282 if ( iParams->deviceId >= nDevices ) {
\r
283 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
284 error( RtAudioError::INVALID_USE );
\r
292 if ( oChannels > 0 ) {
\r
294 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
295 sampleRate, format, bufferFrames, options );
\r
296 if ( result == false ) {
\r
297 error( RtAudioError::SYSTEM_ERROR );
\r
302 if ( iChannels > 0 ) {
\r
304 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 if ( oChannels > 0 ) closeStream();
\r
308 error( RtAudioError::SYSTEM_ERROR );
\r
313 stream_.callbackInfo.callback = (void *) callback;
\r
314 stream_.callbackInfo.userData = userData;
\r
315 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
317 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
318 stream_.state = STREAM_STOPPED;
\r
321 unsigned int RtApi :: getDefaultInputDevice( void )
\r
323 // Should be implemented in subclasses if possible.
\r
327 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
329 // Should be implemented in subclasses if possible.
\r
333 void RtApi :: closeStream( void )
\r
335 // MUST be implemented in subclasses!
\r
339 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
340 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
341 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
342 RtAudio::StreamOptions * /*options*/ )
\r
344 // MUST be implemented in subclasses!
\r
348 void RtApi :: tickStreamTime( void )
\r
350 // Subclasses that do not provide their own implementation of
\r
351 // getStreamTime should call this function once per buffer I/O to
\r
352 // provide basic stream time support.
\r
354 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
356 #if defined( HAVE_GETTIMEOFDAY )
\r
357 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
361 long RtApi :: getStreamLatency( void )
\r
365 long totalLatency = 0;
\r
366 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
367 totalLatency = stream_.latency[0];
\r
368 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
369 totalLatency += stream_.latency[1];
\r
371 return totalLatency;
\r
374 double RtApi :: getStreamTime( void )
\r
378 #if defined( HAVE_GETTIMEOFDAY )
\r
379 // Return a very accurate estimate of the stream time by
\r
380 // adding in the elapsed time since the last tick.
\r
381 struct timeval then;
\r
382 struct timeval now;
\r
384 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
385 return stream_.streamTime;
\r
387 gettimeofday( &now, NULL );
\r
388 then = stream_.lastTickTimestamp;
\r
389 return stream_.streamTime +
\r
390 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
391 (then.tv_sec + 0.000001 * then.tv_usec));
\r
393 return stream_.streamTime;
\r
397 unsigned int RtApi :: getStreamSampleRate( void )
\r
401 return stream_.sampleRate;
\r
405 // *************************************************** //
\r
407 // OS/API-specific methods.
\r
409 // *************************************************** //
\r
411 #if defined(__MACOSX_CORE__)
\r
413 // The OS X CoreAudio API is designed to use a separate callback
\r
414 // procedure for each of its audio devices. A single RtAudio duplex
\r
415 // stream using two different devices is supported here, though it
\r
416 // cannot be guaranteed to always behave correctly because we cannot
\r
417 // synchronize these two callbacks.
\r
419 // A property listener is installed for over/underrun information.
\r
420 // However, no functionality is currently provided to allow property
\r
421 // listeners to trigger user handlers because it is unclear what could
\r
422 // be done if a critical stream parameter (buffer size, sample rate,
\r
423 // device disconnect) notification arrived. The listeners entail
\r
424 // quite a bit of extra code and most likely, a user program wouldn't
\r
425 // be prepared for the result anyway. However, we do provide a flag
\r
426 // to the client callback function to inform of an over/underrun.
\r
428 // A structure to hold various information related to the CoreAudio API
\r
430 struct CoreHandle {
\r
431 AudioDeviceID id[2]; // device ids
\r
432 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
433 AudioDeviceIOProcID procId[2];
\r
435 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
436 UInt32 nStreams[2]; // number of streams to use
\r
438 char *deviceBuffer;
\r
439 pthread_cond_t condition;
\r
440 int drainCounter; // Tracks callback counts when draining
\r
441 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
444 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
447 RtApiCore:: RtApiCore()
\r
449 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
450 // This is a largely undocumented but absolutely necessary
\r
451 // requirement starting with OS-X 10.6. If not called, queries and
\r
452 // updates to various audio device properties are not handled
\r
454 CFRunLoopRef theRunLoop = NULL;
\r
455 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
456 kAudioObjectPropertyScopeGlobal,
\r
457 kAudioObjectPropertyElementMaster };
\r
458 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
459 if ( result != noErr ) {
\r
460 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
461 error( RtAudioError::WARNING );
\r
466 RtApiCore :: ~RtApiCore()
\r
468 // The subclass destructor gets called before the base class
\r
469 // destructor, so close an existing stream before deallocating
\r
470 // apiDeviceId memory.
\r
471 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
474 unsigned int RtApiCore :: getDeviceCount( void )
\r
476 // Find out how many audio devices there are, if any.
\r
478 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
479 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
480 if ( result != noErr ) {
\r
481 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
482 error( RtAudioError::WARNING );
\r
486 return dataSize / sizeof( AudioDeviceID );
\r
489 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
491 unsigned int nDevices = getDeviceCount();
\r
492 if ( nDevices <= 1 ) return 0;
\r
495 UInt32 dataSize = sizeof( AudioDeviceID );
\r
496 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
497 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
498 if ( result != noErr ) {
\r
499 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
500 error( RtAudioError::WARNING );
\r
504 dataSize *= nDevices;
\r
505 AudioDeviceID deviceList[ nDevices ];
\r
506 property.mSelector = kAudioHardwarePropertyDevices;
\r
507 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
510 error( RtAudioError::WARNING );
\r
514 for ( unsigned int i=0; i<nDevices; i++ )
\r
515 if ( id == deviceList[i] ) return i;
\r
517 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
518 error( RtAudioError::WARNING );
\r
522 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
524 unsigned int nDevices = getDeviceCount();
\r
525 if ( nDevices <= 1 ) return 0;
\r
528 UInt32 dataSize = sizeof( AudioDeviceID );
\r
529 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
530 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
531 if ( result != noErr ) {
\r
532 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
533 error( RtAudioError::WARNING );
\r
537 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
538 AudioDeviceID deviceList[ nDevices ];
\r
539 property.mSelector = kAudioHardwarePropertyDevices;
\r
540 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
543 error( RtAudioError::WARNING );
\r
547 for ( unsigned int i=0; i<nDevices; i++ )
\r
548 if ( id == deviceList[i] ) return i;
\r
550 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
551 error( RtAudioError::WARNING );
\r
555 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
557 RtAudio::DeviceInfo info;
\r
558 info.probed = false;
\r
561 unsigned int nDevices = getDeviceCount();
\r
562 if ( nDevices == 0 ) {
\r
563 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
564 error( RtAudioError::INVALID_USE );
\r
568 if ( device >= nDevices ) {
\r
569 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
570 error( RtAudioError::INVALID_USE );
\r
574 AudioDeviceID deviceList[ nDevices ];
\r
575 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
576 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
577 kAudioObjectPropertyScopeGlobal,
\r
578 kAudioObjectPropertyElementMaster };
\r
579 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
580 0, NULL, &dataSize, (void *) &deviceList );
\r
581 if ( result != noErr ) {
\r
582 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
583 error( RtAudioError::WARNING );
\r
587 AudioDeviceID id = deviceList[ device ];
\r
589 // Get the device name.
\r
591 CFStringRef cfname;
\r
592 dataSize = sizeof( CFStringRef );
\r
593 property.mSelector = kAudioObjectPropertyManufacturer;
\r
594 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
595 if ( result != noErr ) {
\r
596 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
597 errorText_ = errorStream_.str();
\r
598 error( RtAudioError::WARNING );
\r
602 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
603 int length = CFStringGetLength(cfname);
\r
604 char *mname = (char *)malloc(length * 3 + 1);
\r
605 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
606 info.name.append( (const char *)mname, strlen(mname) );
\r
607 info.name.append( ": " );
\r
608 CFRelease( cfname );
\r
611 property.mSelector = kAudioObjectPropertyName;
\r
612 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
613 if ( result != noErr ) {
\r
614 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
615 errorText_ = errorStream_.str();
\r
616 error( RtAudioError::WARNING );
\r
620 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
621 length = CFStringGetLength(cfname);
\r
622 char *name = (char *)malloc(length * 3 + 1);
\r
623 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
624 info.name.append( (const char *)name, strlen(name) );
\r
625 CFRelease( cfname );
\r
628 // Get the output stream "configuration".
\r
629 AudioBufferList *bufferList = nil;
\r
630 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
631 property.mScope = kAudioDevicePropertyScopeOutput;
\r
632 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
634 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
635 if ( result != noErr || dataSize == 0 ) {
\r
636 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
637 errorText_ = errorStream_.str();
\r
638 error( RtAudioError::WARNING );
\r
642 // Allocate the AudioBufferList.
\r
643 bufferList = (AudioBufferList *) malloc( dataSize );
\r
644 if ( bufferList == NULL ) {
\r
645 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
646 error( RtAudioError::WARNING );
\r
650 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
651 if ( result != noErr || dataSize == 0 ) {
\r
652 free( bufferList );
\r
653 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 // Get output channel information.
\r
660 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
661 for ( i=0; i<nStreams; i++ )
\r
662 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
663 free( bufferList );
\r
665 // Get the input stream "configuration".
\r
666 property.mScope = kAudioDevicePropertyScopeInput;
\r
667 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
668 if ( result != noErr || dataSize == 0 ) {
\r
669 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
670 errorText_ = errorStream_.str();
\r
671 error( RtAudioError::WARNING );
\r
675 // Allocate the AudioBufferList.
\r
676 bufferList = (AudioBufferList *) malloc( dataSize );
\r
677 if ( bufferList == NULL ) {
\r
678 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
679 error( RtAudioError::WARNING );
\r
683 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
684 if (result != noErr || dataSize == 0) {
\r
685 free( bufferList );
\r
686 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
687 errorText_ = errorStream_.str();
\r
688 error( RtAudioError::WARNING );
\r
692 // Get input channel information.
\r
693 nStreams = bufferList->mNumberBuffers;
\r
694 for ( i=0; i<nStreams; i++ )
\r
695 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
696 free( bufferList );
\r
698 // If device opens for both playback and capture, we determine the channels.
\r
699 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
700 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
702 // Probe the device sample rates.
\r
703 bool isInput = false;
\r
704 if ( info.outputChannels == 0 ) isInput = true;
\r
706 // Determine the supported sample rates.
\r
707 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
708 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
709 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
710 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
711 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
712 errorText_ = errorStream_.str();
\r
713 error( RtAudioError::WARNING );
\r
717 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
718 AudioValueRange rangeList[ nRanges ];
\r
719 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
720 if ( result != kAudioHardwareNoError ) {
\r
721 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
722 errorText_ = errorStream_.str();
\r
723 error( RtAudioError::WARNING );
\r
727 // The sample rate reporting mechanism is a bit of a mystery. It
\r
728 // seems that it can either return individual rates or a range of
\r
729 // rates. I assume that if the min / max range values are the same,
\r
730 // then that represents a single supported rate and if the min / max
\r
731 // range values are different, the device supports an arbitrary
\r
732 // range of values (though there might be multiple ranges, so we'll
\r
733 // use the most conservative range).
\r
734 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
735 bool haveValueRange = false;
\r
736 info.sampleRates.clear();
\r
737 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
738 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
739 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
741 haveValueRange = true;
\r
742 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
743 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
747 if ( haveValueRange ) {
\r
748 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
749 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
750 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
754 // Sort and remove any redundant values
\r
755 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
756 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
758 if ( info.sampleRates.size() == 0 ) {
\r
759 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
760 errorText_ = errorStream_.str();
\r
761 error( RtAudioError::WARNING );
\r
765 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
766 // Thus, any other "physical" formats supported by the device are of
\r
767 // no interest to the client.
\r
768 info.nativeFormats = RTAUDIO_FLOAT32;
\r
770 if ( info.outputChannels > 0 )
\r
771 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
772 if ( info.inputChannels > 0 )
\r
773 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
775 info.probed = true;
\r
779 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
780 const AudioTimeStamp* /*inNow*/,
\r
781 const AudioBufferList* inInputData,
\r
782 const AudioTimeStamp* /*inInputTime*/,
\r
783 AudioBufferList* outOutputData,
\r
784 const AudioTimeStamp* /*inOutputTime*/,
\r
785 void* infoPointer )
\r
787 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
789 RtApiCore *object = (RtApiCore *) info->object;
\r
790 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
791 return kAudioHardwareUnspecifiedError;
\r
793 return kAudioHardwareNoError;
\r
796 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
798 const AudioObjectPropertyAddress properties[],
\r
799 void* handlePointer )
\r
801 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
802 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
803 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
804 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
805 handle->xrun[1] = true;
\r
807 handle->xrun[0] = true;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus rateListener( AudioObjectID inDevice,
\r
815 UInt32 /*nAddresses*/,
\r
816 const AudioObjectPropertyAddress /*properties*/[],
\r
817 void* ratePointer )
\r
819 Float64 *rate = (Float64 *) ratePointer;
\r
820 UInt32 dataSize = sizeof( Float64 );
\r
821 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
822 kAudioObjectPropertyScopeGlobal,
\r
823 kAudioObjectPropertyElementMaster };
\r
824 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
825 return kAudioHardwareNoError;
\r
828 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
829 unsigned int firstChannel, unsigned int sampleRate,
\r
830 RtAudioFormat format, unsigned int *bufferSize,
\r
831 RtAudio::StreamOptions *options )
\r
834 unsigned int nDevices = getDeviceCount();
\r
835 if ( nDevices == 0 ) {
\r
836 // This should not happen because a check is made before this function is called.
\r
837 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
841 if ( device >= nDevices ) {
\r
842 // This should not happen because a check is made before this function is called.
\r
843 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
847 AudioDeviceID deviceList[ nDevices ];
\r
848 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
849 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
850 kAudioObjectPropertyScopeGlobal,
\r
851 kAudioObjectPropertyElementMaster };
\r
852 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
853 0, NULL, &dataSize, (void *) &deviceList );
\r
854 if ( result != noErr ) {
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
859 AudioDeviceID id = deviceList[ device ];
\r
861 // Setup for stream mode.
\r
862 bool isInput = false;
\r
863 if ( mode == INPUT ) {
\r
865 property.mScope = kAudioDevicePropertyScopeInput;
\r
868 property.mScope = kAudioDevicePropertyScopeOutput;
\r
870 // Get the stream "configuration".
\r
871 AudioBufferList *bufferList = nil;
\r
873 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
874 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
875 if ( result != noErr || dataSize == 0 ) {
\r
876 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
877 errorText_ = errorStream_.str();
\r
881 // Allocate the AudioBufferList.
\r
882 bufferList = (AudioBufferList *) malloc( dataSize );
\r
883 if ( bufferList == NULL ) {
\r
884 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
888 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
889 if (result != noErr || dataSize == 0) {
\r
890 free( bufferList );
\r
891 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
892 errorText_ = errorStream_.str();
\r
896 // Search for one or more streams that contain the desired number of
\r
897 // channels. CoreAudio devices can have an arbitrary number of
\r
898 // streams and each stream can have an arbitrary number of channels.
\r
899 // For each stream, a single buffer of interleaved samples is
\r
900 // provided. RtAudio prefers the use of one stream of interleaved
\r
901 // data or multiple consecutive single-channel streams. However, we
\r
902 // now support multiple consecutive multi-channel streams of
\r
903 // interleaved data as well.
\r
904 UInt32 iStream, offsetCounter = firstChannel;
\r
905 UInt32 nStreams = bufferList->mNumberBuffers;
\r
906 bool monoMode = false;
\r
907 bool foundStream = false;
\r
909 // First check that the device supports the requested number of
\r
911 UInt32 deviceChannels = 0;
\r
912 for ( iStream=0; iStream<nStreams; iStream++ )
\r
913 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
915 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
916 free( bufferList );
\r
917 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
918 errorText_ = errorStream_.str();
\r
922 // Look for a single stream meeting our needs.
\r
923 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
924 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
925 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
926 if ( streamChannels >= channels + offsetCounter ) {
\r
927 firstStream = iStream;
\r
928 channelOffset = offsetCounter;
\r
929 foundStream = true;
\r
932 if ( streamChannels > offsetCounter ) break;
\r
933 offsetCounter -= streamChannels;
\r
936 // If we didn't find a single stream above, then we should be able
\r
937 // to meet the channel specification with multiple streams.
\r
938 if ( foundStream == false ) {
\r
940 offsetCounter = firstChannel;
\r
941 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
942 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
943 if ( streamChannels > offsetCounter ) break;
\r
944 offsetCounter -= streamChannels;
\r
947 firstStream = iStream;
\r
948 channelOffset = offsetCounter;
\r
949 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
951 if ( streamChannels > 1 ) monoMode = false;
\r
952 while ( channelCounter > 0 ) {
\r
953 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
954 if ( streamChannels > 1 ) monoMode = false;
\r
955 channelCounter -= streamChannels;
\r
960 free( bufferList );
\r
962 // Determine the buffer size.
\r
963 AudioValueRange bufferRange;
\r
964 dataSize = sizeof( AudioValueRange );
\r
965 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
966 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
968 if ( result != noErr ) {
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
970 errorText_ = errorStream_.str();
\r
974 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
975 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
976 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
978 // Set the buffer size. For multiple streams, I'm assuming we only
\r
979 // need to make this setting for the master channel.
\r
980 UInt32 theSize = (UInt32) *bufferSize;
\r
981 dataSize = sizeof( UInt32 );
\r
982 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
983 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
985 if ( result != noErr ) {
\r
986 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
987 errorText_ = errorStream_.str();
\r
991 // If attempting to setup a duplex stream, the bufferSize parameter
\r
992 // MUST be the same in both directions!
\r
993 *bufferSize = theSize;
\r
994 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
995 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
996 errorText_ = errorStream_.str();
\r
1000 stream_.bufferSize = *bufferSize;
\r
1001 stream_.nBuffers = 1;
\r
1003 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1004 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1006 dataSize = sizeof( hog_pid );
\r
1007 property.mSelector = kAudioDevicePropertyHogMode;
\r
1008 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1009 if ( result != noErr ) {
\r
1010 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1011 errorText_ = errorStream_.str();
\r
1015 if ( hog_pid != getpid() ) {
\r
1016 hog_pid = getpid();
\r
1017 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1018 if ( result != noErr ) {
\r
1019 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1020 errorText_ = errorStream_.str();
\r
1026 // Check and if necessary, change the sample rate for the device.
\r
1027 Float64 nominalRate;
\r
1028 dataSize = sizeof( Float64 );
\r
1029 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1030 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1031 if ( result != noErr ) {
\r
1032 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1033 errorText_ = errorStream_.str();
\r
1037 // Only change the sample rate if off by more than 1 Hz.
\r
1038 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1040 // Set a property listener for the sample rate change
\r
1041 Float64 reportedRate = 0.0;
\r
1042 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1043 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1044 if ( result != noErr ) {
\r
1045 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1046 errorText_ = errorStream_.str();
\r
1050 nominalRate = (Float64) sampleRate;
\r
1051 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1052 if ( result != noErr ) {
\r
1053 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1054 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1055 errorText_ = errorStream_.str();
\r
1059 // Now wait until the reported nominal rate is what we just set.
\r
1060 UInt32 microCounter = 0;
\r
1061 while ( reportedRate != nominalRate ) {
\r
1062 microCounter += 5000;
\r
1063 if ( microCounter > 5000000 ) break;
\r
1067 // Remove the property listener.
\r
1068 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1070 if ( microCounter > 5000000 ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1072 errorText_ = errorStream_.str();
\r
1077 // Now set the stream format for all streams. Also, check the
\r
1078 // physical format of the device and change that if necessary.
\r
1079 AudioStreamBasicDescription description;
\r
1080 dataSize = sizeof( AudioStreamBasicDescription );
\r
1081 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Set the sample rate and data format id. However, only make the
\r
1090 // change if the sample rate is not within 1.0 of the desired
\r
1091 // rate and the format is not linear pcm.
\r
1092 bool updateFormat = false;
\r
1093 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1094 description.mSampleRate = (Float64) sampleRate;
\r
1095 updateFormat = true;
\r
1098 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1099 description.mFormatID = kAudioFormatLinearPCM;
\r
1100 updateFormat = true;
\r
1103 if ( updateFormat ) {
\r
1104 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1105 if ( result != noErr ) {
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1112 // Now check the physical format.
\r
1113 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1114 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1115 if ( result != noErr ) {
\r
1116 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1117 errorText_ = errorStream_.str();
\r
1121 //std::cout << "Current physical stream format:" << std::endl;
\r
1122 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1123 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1124 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1125 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1127 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1128 description.mFormatID = kAudioFormatLinearPCM;
\r
1129 //description.mSampleRate = (Float64) sampleRate;
\r
1130 AudioStreamBasicDescription testDescription = description;
\r
1131 UInt32 formatFlags;
\r
1133 // We'll try higher bit rates first and then work our way down.
\r
1134 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1135 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1136 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1137 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1138 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1139 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1140 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1141 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1142 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1143 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1144 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1145 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1146 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1148 bool setPhysicalFormat = false;
\r
1149 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1150 testDescription = description;
\r
1151 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1152 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1153 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1154 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1156 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1157 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1158 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1159 if ( result == noErr ) {
\r
1160 setPhysicalFormat = true;
\r
1161 //std::cout << "Updated physical stream format:" << std::endl;
\r
1162 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1163 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1164 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1165 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1170 if ( !setPhysicalFormat ) {
\r
1171 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1172 errorText_ = errorStream_.str();
\r
1175 } // done setting virtual/physical formats.
\r
1177 // Get the stream / device latency.
\r
1179 dataSize = sizeof( UInt32 );
\r
1180 property.mSelector = kAudioDevicePropertyLatency;
\r
1181 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1182 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1183 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1185 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1186 errorText_ = errorStream_.str();
\r
1187 error( RtAudioError::WARNING );
\r
1191 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1192 // always be presented in native-endian format, so we should never
\r
1193 // need to byte swap.
\r
1194 stream_.doByteSwap[mode] = false;
\r
1196 // From the CoreAudio documentation, PCM data must be supplied as
\r
1198 stream_.userFormat = format;
\r
1199 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1201 if ( streamCount == 1 )
\r
1202 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1203 else // multiple streams
\r
1204 stream_.nDeviceChannels[mode] = channels;
\r
1205 stream_.nUserChannels[mode] = channels;
\r
1206 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1207 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1208 else stream_.userInterleaved = true;
\r
1209 stream_.deviceInterleaved[mode] = true;
\r
1210 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1212 // Set flags for buffer conversion.
\r
1213 stream_.doConvertBuffer[mode] = false;
\r
1214 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1215 stream_.doConvertBuffer[mode] = true;
\r
1216 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1217 stream_.doConvertBuffer[mode] = true;
\r
1218 if ( streamCount == 1 ) {
\r
1219 if ( stream_.nUserChannels[mode] > 1 &&
\r
1220 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1221 stream_.doConvertBuffer[mode] = true;
\r
1223 else if ( monoMode && stream_.userInterleaved )
\r
1224 stream_.doConvertBuffer[mode] = true;
\r
1226 // Allocate our CoreHandle structure for the stream.
\r
1227 CoreHandle *handle = 0;
\r
1228 if ( stream_.apiHandle == 0 ) {
\r
1230 handle = new CoreHandle;
\r
1232 catch ( std::bad_alloc& ) {
\r
1233 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1237 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1238 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1241 stream_.apiHandle = (void *) handle;
\r
1244 handle = (CoreHandle *) stream_.apiHandle;
\r
1245 handle->iStream[mode] = firstStream;
\r
1246 handle->nStreams[mode] = streamCount;
\r
1247 handle->id[mode] = id;
\r
1249 // Allocate necessary internal buffers.
\r
1250 unsigned long bufferBytes;
\r
1251 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1252 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1253 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1254 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1255 if ( stream_.userBuffer[mode] == NULL ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1260 // If possible, we will make use of the CoreAudio stream buffers as
\r
1261 // "device buffers". However, we can't do this if using multiple
\r
1263 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1265 bool makeBuffer = true;
\r
1266 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1267 if ( mode == INPUT ) {
\r
1268 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1269 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1270 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1274 if ( makeBuffer ) {
\r
1275 bufferBytes *= *bufferSize;
\r
1276 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1277 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1278 if ( stream_.deviceBuffer == NULL ) {
\r
1279 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1285 stream_.sampleRate = sampleRate;
\r
1286 stream_.device[mode] = device;
\r
1287 stream_.state = STREAM_STOPPED;
\r
1288 stream_.callbackInfo.object = (void *) this;
\r
1290 // Setup the buffer conversion information structure.
\r
1291 if ( stream_.doConvertBuffer[mode] ) {
\r
1292 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1293 else setConvertInfo( mode, channelOffset );
\r
1296 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1297 // Only one callback procedure per device.
\r
1298 stream_.mode = DUPLEX;
\r
1300 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1301 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1303 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1304 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1306 if ( result != noErr ) {
\r
1307 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1308 errorText_ = errorStream_.str();
\r
1311 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1312 stream_.mode = DUPLEX;
\r
1314 stream_.mode = mode;
\r
1317 // Setup the device property listener for over/underload.
\r
1318 property.mSelector = kAudioDeviceProcessorOverload;
\r
1319 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1325 pthread_cond_destroy( &handle->condition );
\r
1327 stream_.apiHandle = 0;
\r
1330 for ( int i=0; i<2; i++ ) {
\r
1331 if ( stream_.userBuffer[i] ) {
\r
1332 free( stream_.userBuffer[i] );
\r
1333 stream_.userBuffer[i] = 0;
\r
1337 if ( stream_.deviceBuffer ) {
\r
1338 free( stream_.deviceBuffer );
\r
1339 stream_.deviceBuffer = 0;
\r
1342 stream_.state = STREAM_CLOSED;
\r
1346 void RtApiCore :: closeStream( void )
\r
1348 if ( stream_.state == STREAM_CLOSED ) {
\r
1349 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1350 error( RtAudioError::WARNING );
\r
1354 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1355 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1356 if ( stream_.state == STREAM_RUNNING )
\r
1357 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1358 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1359 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1361 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1362 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1366 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1367 if ( stream_.state == STREAM_RUNNING )
\r
1368 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1369 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1370 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1372 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1373 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1377 for ( int i=0; i<2; i++ ) {
\r
1378 if ( stream_.userBuffer[i] ) {
\r
1379 free( stream_.userBuffer[i] );
\r
1380 stream_.userBuffer[i] = 0;
\r
1384 if ( stream_.deviceBuffer ) {
\r
1385 free( stream_.deviceBuffer );
\r
1386 stream_.deviceBuffer = 0;
\r
1389 // Destroy pthread condition variable.
\r
1390 pthread_cond_destroy( &handle->condition );
\r
1392 stream_.apiHandle = 0;
\r
1394 stream_.mode = UNINITIALIZED;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1398 void RtApiCore :: startStream( void )
\r
1401 if ( stream_.state == STREAM_RUNNING ) {
\r
1402 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 OSStatus result = noErr;
\r
1408 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1409 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1411 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1412 if ( result != noErr ) {
\r
1413 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1414 errorText_ = errorStream_.str();
\r
1419 if ( stream_.mode == INPUT ||
\r
1420 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1422 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1423 if ( result != noErr ) {
\r
1424 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1425 errorText_ = errorStream_.str();
\r
1430 handle->drainCounter = 0;
\r
1431 handle->internalDrain = false;
\r
1432 stream_.state = STREAM_RUNNING;
\r
1435 if ( result == noErr ) return;
\r
1436 error( RtAudioError::SYSTEM_ERROR );
\r
1439 void RtApiCore :: stopStream( void )
\r
1442 if ( stream_.state == STREAM_STOPPED ) {
\r
1443 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1444 error( RtAudioError::WARNING );
\r
1448 OSStatus result = noErr;
\r
1449 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1452 if ( handle->drainCounter == 0 ) {
\r
1453 handle->drainCounter = 2;
\r
1454 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1457 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1458 if ( result != noErr ) {
\r
1459 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1460 errorText_ = errorStream_.str();
\r
1465 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1467 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1468 if ( result != noErr ) {
\r
1469 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1470 errorText_ = errorStream_.str();
\r
1475 stream_.state = STREAM_STOPPED;
\r
1478 if ( result == noErr ) return;
\r
1479 error( RtAudioError::SYSTEM_ERROR );
\r
1482 void RtApiCore :: abortStream( void )
\r
1485 if ( stream_.state == STREAM_STOPPED ) {
\r
1486 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1487 error( RtAudioError::WARNING );
\r
1491 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1492 handle->drainCounter = 2;
\r
1497 // This function will be called by a spawned thread when the user
\r
1498 // callback function signals that the stream should be stopped or
\r
1499 // aborted. It is better to handle it this way because the
\r
1500 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1501 // function is called.
\r
1502 static void *coreStopStream( void *ptr )
\r
1504 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1505 RtApiCore *object = (RtApiCore *) info->object;
\r
1507 object->stopStream();
\r
1508 pthread_exit( NULL );
\r
1511 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1512 const AudioBufferList *inBufferList,
\r
1513 const AudioBufferList *outBufferList )
\r
1515 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1516 if ( stream_.state == STREAM_CLOSED ) {
\r
1517 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1518 error( RtAudioError::WARNING );
\r
1522 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1523 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1525 // Check if we were draining the stream and signal is finished.
\r
1526 if ( handle->drainCounter > 3 ) {
\r
1527 ThreadHandle threadId;
\r
1529 stream_.state = STREAM_STOPPING;
\r
1530 if ( handle->internalDrain == true )
\r
1531 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1532 else // external call to stopStream()
\r
1533 pthread_cond_signal( &handle->condition );
\r
1537 AudioDeviceID outputDevice = handle->id[0];
\r
1539 // Invoke user callback to get fresh output data UNLESS we are
\r
1540 // draining stream or duplex mode AND the input/output devices are
\r
1541 // different AND this function is called for the input device.
\r
1542 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1543 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1544 double streamTime = getStreamTime();
\r
1545 RtAudioStreamStatus status = 0;
\r
1546 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1547 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1548 handle->xrun[0] = false;
\r
1550 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1551 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1552 handle->xrun[1] = false;
\r
1555 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1556 stream_.bufferSize, streamTime, status, info->userData );
\r
1557 if ( cbReturnValue == 2 ) {
\r
1558 stream_.state = STREAM_STOPPING;
\r
1559 handle->drainCounter = 2;
\r
1563 else if ( cbReturnValue == 1 ) {
\r
1564 handle->drainCounter = 1;
\r
1565 handle->internalDrain = true;
\r
1569 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1571 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1573 if ( handle->nStreams[0] == 1 ) {
\r
1574 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1576 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1578 else { // fill multiple streams with zeros
\r
1579 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1580 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1582 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1586 else if ( handle->nStreams[0] == 1 ) {
\r
1587 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1588 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1589 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1591 else { // copy from user buffer
\r
1592 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1593 stream_.userBuffer[0],
\r
1594 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams
\r
1598 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1599 if ( stream_.doConvertBuffer[0] ) {
\r
1600 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1601 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1604 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1605 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1606 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1607 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1608 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1611 else { // fill multiple multi-channel streams with interleaved data
\r
1612 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1613 Float32 *out, *in;
\r
1615 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1616 UInt32 inChannels = stream_.nUserChannels[0];
\r
1617 if ( stream_.doConvertBuffer[0] ) {
\r
1618 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1619 inChannels = stream_.nDeviceChannels[0];
\r
1622 if ( inInterleaved ) inOffset = 1;
\r
1623 else inOffset = stream_.bufferSize;
\r
1625 channelsLeft = inChannels;
\r
1626 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1628 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1629 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1632 // Account for possible channel offset in first stream
\r
1633 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1634 streamChannels -= stream_.channelOffset[0];
\r
1635 outJump = stream_.channelOffset[0];
\r
1639 // Account for possible unfilled channels at end of the last stream
\r
1640 if ( streamChannels > channelsLeft ) {
\r
1641 outJump = streamChannels - channelsLeft;
\r
1642 streamChannels = channelsLeft;
\r
1645 // Determine input buffer offsets and skips
\r
1646 if ( inInterleaved ) {
\r
1647 inJump = inChannels;
\r
1648 in += inChannels - channelsLeft;
\r
1652 in += (inChannels - channelsLeft) * inOffset;
\r
1655 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1656 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1657 *out++ = in[j*inOffset];
\r
1662 channelsLeft -= streamChannels;
\r
1667 if ( handle->drainCounter ) {
\r
1668 handle->drainCounter++;
\r
1673 AudioDeviceID inputDevice;
\r
1674 inputDevice = handle->id[1];
\r
1675 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1677 if ( handle->nStreams[1] == 1 ) {
\r
1678 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1679 convertBuffer( stream_.userBuffer[1],
\r
1680 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1681 stream_.convertInfo[1] );
\r
1683 else { // copy to user buffer
\r
1684 memcpy( stream_.userBuffer[1],
\r
1685 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1686 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1689 else { // read from multiple streams
\r
1690 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1691 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1693 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1694 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1695 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1696 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1697 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1700 else { // read from multiple multi-channel streams
\r
1701 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1702 Float32 *out, *in;
\r
1704 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1705 UInt32 outChannels = stream_.nUserChannels[1];
\r
1706 if ( stream_.doConvertBuffer[1] ) {
\r
1707 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1708 outChannels = stream_.nDeviceChannels[1];
\r
1711 if ( outInterleaved ) outOffset = 1;
\r
1712 else outOffset = stream_.bufferSize;
\r
1714 channelsLeft = outChannels;
\r
1715 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1717 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1718 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1721 // Account for possible channel offset in first stream
\r
1722 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1723 streamChannels -= stream_.channelOffset[1];
\r
1724 inJump = stream_.channelOffset[1];
\r
1728 // Account for possible unread channels at end of the last stream
\r
1729 if ( streamChannels > channelsLeft ) {
\r
1730 inJump = streamChannels - channelsLeft;
\r
1731 streamChannels = channelsLeft;
\r
1734 // Determine output buffer offsets and skips
\r
1735 if ( outInterleaved ) {
\r
1736 outJump = outChannels;
\r
1737 out += outChannels - channelsLeft;
\r
1741 out += (outChannels - channelsLeft) * outOffset;
\r
1744 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1745 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1746 out[j*outOffset] = *in++;
\r
1751 channelsLeft -= streamChannels;
\r
1755 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1756 convertBuffer( stream_.userBuffer[1],
\r
1757 stream_.deviceBuffer,
\r
1758 stream_.convertInfo[1] );
\r
1764 //MUTEX_UNLOCK( &stream_.mutex );
\r
1766 RtApi::tickStreamTime();
\r
1770 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1774 case kAudioHardwareNotRunningError:
\r
1775 return "kAudioHardwareNotRunningError";
\r
1777 case kAudioHardwareUnspecifiedError:
\r
1778 return "kAudioHardwareUnspecifiedError";
\r
1780 case kAudioHardwareUnknownPropertyError:
\r
1781 return "kAudioHardwareUnknownPropertyError";
\r
1783 case kAudioHardwareBadPropertySizeError:
\r
1784 return "kAudioHardwareBadPropertySizeError";
\r
1786 case kAudioHardwareIllegalOperationError:
\r
1787 return "kAudioHardwareIllegalOperationError";
\r
1789 case kAudioHardwareBadObjectError:
\r
1790 return "kAudioHardwareBadObjectError";
\r
1792 case kAudioHardwareBadDeviceError:
\r
1793 return "kAudioHardwareBadDeviceError";
\r
1795 case kAudioHardwareBadStreamError:
\r
1796 return "kAudioHardwareBadStreamError";
\r
1798 case kAudioHardwareUnsupportedOperationError:
\r
1799 return "kAudioHardwareUnsupportedOperationError";
\r
1801 case kAudioDeviceUnsupportedFormatError:
\r
1802 return "kAudioDeviceUnsupportedFormatError";
\r
1804 case kAudioDevicePermissionsError:
\r
1805 return "kAudioDevicePermissionsError";
\r
1808 return "CoreAudio unknown error";
\r
1812 //******************** End of __MACOSX_CORE__ *********************//
\r
1815 #if defined(__UNIX_JACK__)
\r
1817 // JACK is a low-latency audio server, originally written for the
\r
1818 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1819 // connect a number of different applications to an audio device, as
\r
1820 // well as allowing them to share audio between themselves.
\r
1822 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1823 // have ports connected to the server. The JACK server is typically
\r
1824 // started in a terminal as follows:
\r
1826 // .jackd -d alsa -d hw:0
\r
1828 // or through an interface program such as qjackctl. Many of the
\r
1829 // parameters normally set for a stream are fixed by the JACK server
\r
1830 // and can be specified when the JACK server is started. In
\r
1833 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1835 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1836 // frames, and number of buffers = 4. Once the server is running, it
\r
1837 // is not possible to override these values. If the values are not
\r
1838 // specified in the command-line, the JACK server uses default values.
\r
1840 // The JACK server does not have to be running when an instance of
\r
1841 // RtApiJack is created, though the function getDeviceCount() will
\r
1842 // report 0 devices found until JACK has been started. When no
\r
1843 // devices are available (i.e., the JACK server is not running), a
\r
1844 // stream cannot be opened.
\r
1846 #include <jack/jack.h>
\r
1847 #include <unistd.h>
\r
1850 // A structure to hold various information related to the Jack API
\r
1851 // implementation.
\r
1852 struct JackHandle {
\r
1853 jack_client_t *client;
\r
1854 jack_port_t **ports[2];
\r
1855 std::string deviceName[2];
\r
1857 pthread_cond_t condition;
\r
1858 int drainCounter; // Tracks callback counts when draining
\r
1859 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1862 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1865 static void jackSilentError( const char * ) {};
\r
1867 RtApiJack :: RtApiJack()
\r
1869 // Nothing to do here.
\r
1870 #if !defined(__RTAUDIO_DEBUG__)
\r
1871 // Turn off Jack's internal error reporting.
\r
1872 jack_set_error_function( &jackSilentError );
\r
1876 RtApiJack :: ~RtApiJack()
\r
1878 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1881 unsigned int RtApiJack :: getDeviceCount( void )
\r
1883 // See if we can become a jack client.
\r
1884 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1885 jack_status_t *status = NULL;
\r
1886 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1887 if ( client == 0 ) return 0;
\r
1889 const char **ports;
\r
1890 std::string port, previousPort;
\r
1891 unsigned int nChannels = 0, nDevices = 0;
\r
1892 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1894 // Parse the port names up to the first colon (:).
\r
1895 size_t iColon = 0;
\r
1897 port = (char *) ports[ nChannels ];
\r
1898 iColon = port.find(":");
\r
1899 if ( iColon != std::string::npos ) {
\r
1900 port = port.substr( 0, iColon + 1 );
\r
1901 if ( port != previousPort ) {
\r
1903 previousPort = port;
\r
1906 } while ( ports[++nChannels] );
\r
1910 jack_client_close( client );
\r
1914 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1916 RtAudio::DeviceInfo info;
\r
1917 info.probed = false;
\r
1919 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1920 jack_status_t *status = NULL;
\r
1921 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1922 if ( client == 0 ) {
\r
1923 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1924 error( RtAudioError::WARNING );
\r
1928 const char **ports;
\r
1929 std::string port, previousPort;
\r
1930 unsigned int nPorts = 0, nDevices = 0;
\r
1931 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1933 // Parse the port names up to the first colon (:).
\r
1934 size_t iColon = 0;
\r
1936 port = (char *) ports[ nPorts ];
\r
1937 iColon = port.find(":");
\r
1938 if ( iColon != std::string::npos ) {
\r
1939 port = port.substr( 0, iColon );
\r
1940 if ( port != previousPort ) {
\r
1941 if ( nDevices == device ) info.name = port;
\r
1943 previousPort = port;
\r
1946 } while ( ports[++nPorts] );
\r
1950 if ( device >= nDevices ) {
\r
1951 jack_client_close( client );
\r
1952 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1953 error( RtAudioError::INVALID_USE );
\r
1957 // Get the current jack server sample rate.
\r
1958 info.sampleRates.clear();
\r
1959 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1961 // Count the available ports containing the client name as device
\r
1962 // channels. Jack "input ports" equal RtAudio output channels.
\r
1963 unsigned int nChannels = 0;
\r
1964 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1966 while ( ports[ nChannels ] ) nChannels++;
\r
1968 info.outputChannels = nChannels;
\r
1971 // Jack "output ports" equal RtAudio input channels.
\r
1973 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1975 while ( ports[ nChannels ] ) nChannels++;
\r
1977 info.inputChannels = nChannels;
\r
1980 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1981 jack_client_close(client);
\r
1982 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1983 error( RtAudioError::WARNING );
\r
1987 // If device opens for both playback and capture, we determine the channels.
\r
1988 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1989 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1991 // Jack always uses 32-bit floats.
\r
1992 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1994 // Jack doesn't provide default devices so we'll use the first available one.
\r
1995 if ( device == 0 && info.outputChannels > 0 )
\r
1996 info.isDefaultOutput = true;
\r
1997 if ( device == 0 && info.inputChannels > 0 )
\r
1998 info.isDefaultInput = true;
\r
2000 jack_client_close(client);
\r
2001 info.probed = true;
\r
2005 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2007 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2009 RtApiJack *object = (RtApiJack *) info->object;
\r
2010 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2015 // This function will be called by a spawned thread when the Jack
\r
2016 // server signals that it is shutting down. It is necessary to handle
\r
2017 // it this way because the jackShutdown() function must return before
\r
2018 // the jack_deactivate() function (in closeStream()) will return.
\r
2019 static void *jackCloseStream( void *ptr )
\r
2021 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2022 RtApiJack *object = (RtApiJack *) info->object;
\r
2024 object->closeStream();
\r
2026 pthread_exit( NULL );
\r
2028 static void jackShutdown( void *infoPointer )
\r
2030 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2031 RtApiJack *object = (RtApiJack *) info->object;
\r
2033 // Check current stream state. If stopped, then we'll assume this
\r
2034 // was called as a result of a call to RtApiJack::stopStream (the
\r
2035 // deactivation of a client handle causes this function to be called).
\r
2036 // If not, we'll assume the Jack server is shutting down or some
\r
2037 // other problem occurred and we should close the stream.
\r
2038 if ( object->isStreamRunning() == false ) return;
\r
2040 ThreadHandle threadId;
\r
2041 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2042 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2045 static int jackXrun( void *infoPointer )
\r
2047 JackHandle *handle = (JackHandle *) infoPointer;
\r
2049 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2050 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2055 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2056 unsigned int firstChannel, unsigned int sampleRate,
\r
2057 RtAudioFormat format, unsigned int *bufferSize,
\r
2058 RtAudio::StreamOptions *options )
\r
2060 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2062 // Look for jack server and try to become a client (only do once per stream).
\r
2063 jack_client_t *client = 0;
\r
2064 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2065 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2066 jack_status_t *status = NULL;
\r
2067 if ( options && !options->streamName.empty() )
\r
2068 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2070 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2071 if ( client == 0 ) {
\r
2072 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2073 error( RtAudioError::WARNING );
\r
2078 // The handle must have been created on an earlier pass.
\r
2079 client = handle->client;
\r
2082 const char **ports;
\r
2083 std::string port, previousPort, deviceName;
\r
2084 unsigned int nPorts = 0, nDevices = 0;
\r
2085 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2087 // Parse the port names up to the first colon (:).
\r
2088 size_t iColon = 0;
\r
2090 port = (char *) ports[ nPorts ];
\r
2091 iColon = port.find(":");
\r
2092 if ( iColon != std::string::npos ) {
\r
2093 port = port.substr( 0, iColon );
\r
2094 if ( port != previousPort ) {
\r
2095 if ( nDevices == device ) deviceName = port;
\r
2097 previousPort = port;
\r
2100 } while ( ports[++nPorts] );
\r
2104 if ( device >= nDevices ) {
\r
2105 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2109 // Count the available ports containing the client name as device
\r
2110 // channels. Jack "input ports" equal RtAudio output channels.
\r
2111 unsigned int nChannels = 0;
\r
2112 unsigned long flag = JackPortIsInput;
\r
2113 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2114 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2116 while ( ports[ nChannels ] ) nChannels++;
\r
2120 // Compare the jack ports for specified client to the requested number of channels.
\r
2121 if ( nChannels < (channels + firstChannel) ) {
\r
2122 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2123 errorText_ = errorStream_.str();
\r
2127 // Check the jack server sample rate.
\r
2128 unsigned int jackRate = jack_get_sample_rate( client );
\r
2129 if ( sampleRate != jackRate ) {
\r
2130 jack_client_close( client );
\r
2131 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2132 errorText_ = errorStream_.str();
\r
2135 stream_.sampleRate = jackRate;
\r
2137 // Get the latency of the JACK port.
\r
2138 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2139 if ( ports[ firstChannel ] ) {
\r
2140 // Added by Ge Wang
\r
2141 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2142 // the range (usually the min and max are equal)
\r
2143 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2144 // get the latency range
\r
2145 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2146 // be optimistic, use the min!
\r
2147 stream_.latency[mode] = latrange.min;
\r
2148 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2152 // The jack server always uses 32-bit floating-point data.
\r
2153 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2154 stream_.userFormat = format;
\r
2156 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2157 else stream_.userInterleaved = true;
\r
2159 // Jack always uses non-interleaved buffers.
\r
2160 stream_.deviceInterleaved[mode] = false;
\r
2162 // Jack always provides host byte-ordered data.
\r
2163 stream_.doByteSwap[mode] = false;
\r
2165 // Get the buffer size. The buffer size and number of buffers
\r
2166 // (periods) is set when the jack server is started.
\r
2167 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2168 *bufferSize = stream_.bufferSize;
\r
2170 stream_.nDeviceChannels[mode] = channels;
\r
2171 stream_.nUserChannels[mode] = channels;
\r
2173 // Set flags for buffer conversion.
\r
2174 stream_.doConvertBuffer[mode] = false;
\r
2175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2176 stream_.doConvertBuffer[mode] = true;
\r
2177 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2178 stream_.nUserChannels[mode] > 1 )
\r
2179 stream_.doConvertBuffer[mode] = true;
\r
2181 // Allocate our JackHandle structure for the stream.
\r
2182 if ( handle == 0 ) {
\r
2184 handle = new JackHandle;
\r
2186 catch ( std::bad_alloc& ) {
\r
2187 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2191 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2192 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2195 stream_.apiHandle = (void *) handle;
\r
2196 handle->client = client;
\r
2198 handle->deviceName[mode] = deviceName;
\r
2200 // Allocate necessary internal buffers.
\r
2201 unsigned long bufferBytes;
\r
2202 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2203 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2204 if ( stream_.userBuffer[mode] == NULL ) {
\r
2205 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2209 if ( stream_.doConvertBuffer[mode] ) {
\r
2211 bool makeBuffer = true;
\r
2212 if ( mode == OUTPUT )
\r
2213 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2214 else { // mode == INPUT
\r
2215 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2216 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2217 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2218 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2222 if ( makeBuffer ) {
\r
2223 bufferBytes *= *bufferSize;
\r
2224 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2225 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2226 if ( stream_.deviceBuffer == NULL ) {
\r
2227 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2233 // Allocate memory for the Jack ports (channels) identifiers.
\r
2234 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2235 if ( handle->ports[mode] == NULL ) {
\r
2236 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2240 stream_.device[mode] = device;
\r
2241 stream_.channelOffset[mode] = firstChannel;
\r
2242 stream_.state = STREAM_STOPPED;
\r
2243 stream_.callbackInfo.object = (void *) this;
\r
2245 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2246 // We had already set up the stream for output.
\r
2247 stream_.mode = DUPLEX;
\r
2249 stream_.mode = mode;
\r
2250 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2251 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2252 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2255 // Register our ports.
\r
2257 if ( mode == OUTPUT ) {
\r
2258 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2259 snprintf( label, 64, "outport %d", i );
\r
2260 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2261 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2265 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2266 snprintf( label, 64, "inport %d", i );
\r
2267 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2268 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2272 // Setup the buffer conversion information structure. We don't use
\r
2273 // buffers to do channel offsets, so we override that parameter
\r
2275 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2281 pthread_cond_destroy( &handle->condition );
\r
2282 jack_client_close( handle->client );
\r
2284 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2285 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2288 stream_.apiHandle = 0;
\r
2291 for ( int i=0; i<2; i++ ) {
\r
2292 if ( stream_.userBuffer[i] ) {
\r
2293 free( stream_.userBuffer[i] );
\r
2294 stream_.userBuffer[i] = 0;
\r
2298 if ( stream_.deviceBuffer ) {
\r
2299 free( stream_.deviceBuffer );
\r
2300 stream_.deviceBuffer = 0;
\r
2306 void RtApiJack :: closeStream( void )
\r
2308 if ( stream_.state == STREAM_CLOSED ) {
\r
2309 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2310 error( RtAudioError::WARNING );
\r
2314 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2317 if ( stream_.state == STREAM_RUNNING )
\r
2318 jack_deactivate( handle->client );
\r
2320 jack_client_close( handle->client );
\r
2324 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2325 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2326 pthread_cond_destroy( &handle->condition );
\r
2328 stream_.apiHandle = 0;
\r
2331 for ( int i=0; i<2; i++ ) {
\r
2332 if ( stream_.userBuffer[i] ) {
\r
2333 free( stream_.userBuffer[i] );
\r
2334 stream_.userBuffer[i] = 0;
\r
2338 if ( stream_.deviceBuffer ) {
\r
2339 free( stream_.deviceBuffer );
\r
2340 stream_.deviceBuffer = 0;
\r
2343 stream_.mode = UNINITIALIZED;
\r
2344 stream_.state = STREAM_CLOSED;
\r
2347 void RtApiJack :: startStream( void )
\r
2350 if ( stream_.state == STREAM_RUNNING ) {
\r
2351 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2352 error( RtAudioError::WARNING );
\r
2356 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2357 int result = jack_activate( handle->client );
\r
2359 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2363 const char **ports;
\r
2365 // Get the list of available ports.
\r
2366 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2368 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2369 if ( ports == NULL) {
\r
2370 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2374 // Now make the port connections. Since RtAudio wasn't designed to
\r
2375 // allow the user to select particular channels of a device, we'll
\r
2376 // just open the first "nChannels" ports with offset.
\r
2377 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2379 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2380 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2383 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2390 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2392 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2393 if ( ports == NULL) {
\r
2394 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2398 // Now make the port connections. See note above.
\r
2399 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2401 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2402 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2405 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2412 handle->drainCounter = 0;
\r
2413 handle->internalDrain = false;
\r
2414 stream_.state = STREAM_RUNNING;
\r
2417 if ( result == 0 ) return;
\r
2418 error( RtAudioError::SYSTEM_ERROR );
\r
2421 void RtApiJack :: stopStream( void )
\r
2424 if ( stream_.state == STREAM_STOPPED ) {
\r
2425 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2426 error( RtAudioError::WARNING );
\r
2430 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2431 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2433 if ( handle->drainCounter == 0 ) {
\r
2434 handle->drainCounter = 2;
\r
2435 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2439 jack_deactivate( handle->client );
\r
2440 stream_.state = STREAM_STOPPED;
\r
2443 void RtApiJack :: abortStream( void )
\r
2446 if ( stream_.state == STREAM_STOPPED ) {
\r
2447 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2448 error( RtAudioError::WARNING );
\r
2452 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2453 handle->drainCounter = 2;
\r
2458 // This function will be called by a spawned thread when the user
\r
2459 // callback function signals that the stream should be stopped or
\r
2460 // aborted. It is necessary to handle it this way because the
\r
2461 // callbackEvent() function must return before the jack_deactivate()
\r
2462 // function will return.
\r
2463 static void *jackStopStream( void *ptr )
\r
2465 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2466 RtApiJack *object = (RtApiJack *) info->object;
\r
2468 object->stopStream();
\r
2469 pthread_exit( NULL );
\r
2472 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2474 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2475 if ( stream_.state == STREAM_CLOSED ) {
\r
2476 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2477 error( RtAudioError::WARNING );
\r
2480 if ( stream_.bufferSize != nframes ) {
\r
2481 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2482 error( RtAudioError::WARNING );
\r
2486 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2487 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2489 // Check if we were draining the stream and signal is finished.
\r
2490 if ( handle->drainCounter > 3 ) {
\r
2491 ThreadHandle threadId;
\r
2493 stream_.state = STREAM_STOPPING;
\r
2494 if ( handle->internalDrain == true )
\r
2495 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2497 pthread_cond_signal( &handle->condition );
\r
2501 // Invoke user callback first, to get fresh output data.
\r
2502 if ( handle->drainCounter == 0 ) {
\r
2503 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2504 double streamTime = getStreamTime();
\r
2505 RtAudioStreamStatus status = 0;
\r
2506 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2507 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2508 handle->xrun[0] = false;
\r
2510 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2511 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2512 handle->xrun[1] = false;
\r
2514 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2515 stream_.bufferSize, streamTime, status, info->userData );
\r
2516 if ( cbReturnValue == 2 ) {
\r
2517 stream_.state = STREAM_STOPPING;
\r
2518 handle->drainCounter = 2;
\r
2520 pthread_create( &id, NULL, jackStopStream, info );
\r
2523 else if ( cbReturnValue == 1 ) {
\r
2524 handle->drainCounter = 1;
\r
2525 handle->internalDrain = true;
\r
2529 jack_default_audio_sample_t *jackbuffer;
\r
2530 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2533 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2535 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2536 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2537 memset( jackbuffer, 0, bufferBytes );
\r
2541 else if ( stream_.doConvertBuffer[0] ) {
\r
2543 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2545 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2546 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2547 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2550 else { // no buffer conversion
\r
2551 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2552 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2553 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2557 if ( handle->drainCounter ) {
\r
2558 handle->drainCounter++;
\r
2563 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2565 if ( stream_.doConvertBuffer[1] ) {
\r
2566 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2567 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2568 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2570 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2572 else { // no buffer conversion
\r
2573 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2574 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2575 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2581 RtApi::tickStreamTime();
\r
2584 //******************** End of __UNIX_JACK__ *********************//
\r
2587 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2589 // The ASIO API is designed around a callback scheme, so this
\r
2590 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2591 // Jack. The primary constraint with ASIO is that it only allows
\r
2592 // access to a single driver at a time. Thus, it is not possible to
\r
2593 // have more than one simultaneous RtAudio stream.
\r
2595 // This implementation also requires a number of external ASIO files
\r
2596 // and a few global variables. The ASIO callback scheme does not
\r
2597 // allow for the passing of user data, so we must create a global
\r
2598 // pointer to our callbackInfo structure.
\r
2600 // On unix systems, we make use of a pthread condition variable.
\r
2601 // Since there is no equivalent in Windows, I hacked something based
\r
2602 // on information found in
\r
2603 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2605 #include "asiosys.h"
\r
2607 #include "iasiothiscallresolver.h"
\r
2608 #include "asiodrivers.h"
\r
2611 static AsioDrivers drivers;
\r
2612 static ASIOCallbacks asioCallbacks;
\r
2613 static ASIODriverInfo driverInfo;
\r
2614 static CallbackInfo *asioCallbackInfo;
\r
2615 static bool asioXRun;
\r
2617 struct AsioHandle {
\r
2618 int drainCounter; // Tracks callback counts when draining
\r
2619 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2620 ASIOBufferInfo *bufferInfos;
\r
2624 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2627 // Function declarations (definitions at end of section)
\r
2628 static const char* getAsioErrorString( ASIOError result );
\r
2629 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2630 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2632 RtApiAsio :: RtApiAsio()
\r
2634 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2635 // CoInitialize beforehand, but it must be for appartment threading
\r
2636 // (in which case, CoInitilialize will return S_FALSE here).
\r
2637 coInitialized_ = false;
\r
2638 HRESULT hr = CoInitialize( NULL );
\r
2639 if ( FAILED(hr) ) {
\r
2640 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2641 error( RtAudioError::WARNING );
\r
2643 coInitialized_ = true;
\r
2645 drivers.removeCurrentDriver();
\r
2646 driverInfo.asioVersion = 2;
\r
2648 // See note in DirectSound implementation about GetDesktopWindow().
\r
2649 driverInfo.sysRef = GetForegroundWindow();
\r
2652 RtApiAsio :: ~RtApiAsio()
\r
2654 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2655 if ( coInitialized_ ) CoUninitialize();
\r
2658 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2660 return (unsigned int) drivers.asioGetNumDev();
\r
2663 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2665 RtAudio::DeviceInfo info;
\r
2666 info.probed = false;
\r
2669 unsigned int nDevices = getDeviceCount();
\r
2670 if ( nDevices == 0 ) {
\r
2671 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2672 error( RtAudioError::INVALID_USE );
\r
2676 if ( device >= nDevices ) {
\r
2677 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2678 error( RtAudioError::INVALID_USE );
\r
2682 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2683 if ( stream_.state != STREAM_CLOSED ) {
\r
2684 if ( device >= devices_.size() ) {
\r
2685 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2686 error( RtAudioError::WARNING );
\r
2689 return devices_[ device ];
\r
2692 char driverName[32];
\r
2693 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2694 if ( result != ASE_OK ) {
\r
2695 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2696 errorText_ = errorStream_.str();
\r
2697 error( RtAudioError::WARNING );
\r
2701 info.name = driverName;
\r
2703 if ( !drivers.loadDriver( driverName ) ) {
\r
2704 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2705 errorText_ = errorStream_.str();
\r
2706 error( RtAudioError::WARNING );
\r
2710 result = ASIOInit( &driverInfo );
\r
2711 if ( result != ASE_OK ) {
\r
2712 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2713 errorText_ = errorStream_.str();
\r
2714 error( RtAudioError::WARNING );
\r
2718 // Determine the device channel information.
\r
2719 long inputChannels, outputChannels;
\r
2720 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2721 if ( result != ASE_OK ) {
\r
2722 drivers.removeCurrentDriver();
\r
2723 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2724 errorText_ = errorStream_.str();
\r
2725 error( RtAudioError::WARNING );
\r
2729 info.outputChannels = outputChannels;
\r
2730 info.inputChannels = inputChannels;
\r
2731 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2732 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2734 // Determine the supported sample rates.
\r
2735 info.sampleRates.clear();
\r
2736 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2737 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2738 if ( result == ASE_OK )
\r
2739 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2742 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2743 ASIOChannelInfo channelInfo;
\r
2744 channelInfo.channel = 0;
\r
2745 channelInfo.isInput = true;
\r
2746 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2747 result = ASIOGetChannelInfo( &channelInfo );
\r
2748 if ( result != ASE_OK ) {
\r
2749 drivers.removeCurrentDriver();
\r
2750 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2751 errorText_ = errorStream_.str();
\r
2752 error( RtAudioError::WARNING );
\r
2756 info.nativeFormats = 0;
\r
2757 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2758 info.nativeFormats |= RTAUDIO_SINT16;
\r
2759 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2760 info.nativeFormats |= RTAUDIO_SINT32;
\r
2761 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2762 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2763 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2764 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2765 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2766 info.nativeFormats |= RTAUDIO_SINT24;
\r
2768 if ( info.outputChannels > 0 )
\r
2769 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2770 if ( info.inputChannels > 0 )
\r
2771 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2773 info.probed = true;
\r
2774 drivers.removeCurrentDriver();
\r
2778 static void bufferSwitch( long index, ASIOBool processNow )
\r
2780 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2781 object->callbackEvent( index );
\r
2784 void RtApiAsio :: saveDeviceInfo( void )
\r
2788 unsigned int nDevices = getDeviceCount();
\r
2789 devices_.resize( nDevices );
\r
2790 for ( unsigned int i=0; i<nDevices; i++ )
\r
2791 devices_[i] = getDeviceInfo( i );
\r
2794 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2795 unsigned int firstChannel, unsigned int sampleRate,
\r
2796 RtAudioFormat format, unsigned int *bufferSize,
\r
2797 RtAudio::StreamOptions *options )
\r
2799 // For ASIO, a duplex stream MUST use the same driver.
\r
2800 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2801 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2805 char driverName[32];
\r
2806 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2807 if ( result != ASE_OK ) {
\r
2808 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2809 errorText_ = errorStream_.str();
\r
2813 // Only load the driver once for duplex stream.
\r
2814 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2815 // The getDeviceInfo() function will not work when a stream is open
\r
2816 // because ASIO does not allow multiple devices to run at the same
\r
2817 // time. Thus, we'll probe the system before opening a stream and
\r
2818 // save the results for use by getDeviceInfo().
\r
2819 this->saveDeviceInfo();
\r
2821 if ( !drivers.loadDriver( driverName ) ) {
\r
2822 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2823 errorText_ = errorStream_.str();
\r
2827 result = ASIOInit( &driverInfo );
\r
2828 if ( result != ASE_OK ) {
\r
2829 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2830 errorText_ = errorStream_.str();
\r
2835 // Check the device channel count.
\r
2836 long inputChannels, outputChannels;
\r
2837 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2838 if ( result != ASE_OK ) {
\r
2839 drivers.removeCurrentDriver();
\r
2840 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2841 errorText_ = errorStream_.str();
\r
2845 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2846 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2847 drivers.removeCurrentDriver();
\r
2848 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2849 errorText_ = errorStream_.str();
\r
2852 stream_.nDeviceChannels[mode] = channels;
\r
2853 stream_.nUserChannels[mode] = channels;
\r
2854 stream_.channelOffset[mode] = firstChannel;
\r
2856 // Verify the sample rate is supported.
\r
2857 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2858 if ( result != ASE_OK ) {
\r
2859 drivers.removeCurrentDriver();
\r
2860 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2861 errorText_ = errorStream_.str();
\r
2865 // Get the current sample rate
\r
2866 ASIOSampleRate currentRate;
\r
2867 result = ASIOGetSampleRate( ¤tRate );
\r
2868 if ( result != ASE_OK ) {
\r
2869 drivers.removeCurrentDriver();
\r
2870 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2871 errorText_ = errorStream_.str();
\r
2875 // Set the sample rate only if necessary
\r
2876 if ( currentRate != sampleRate ) {
\r
2877 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2878 if ( result != ASE_OK ) {
\r
2879 drivers.removeCurrentDriver();
\r
2880 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2881 errorText_ = errorStream_.str();
\r
2886 // Determine the driver data type.
\r
2887 ASIOChannelInfo channelInfo;
\r
2888 channelInfo.channel = 0;
\r
2889 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2890 else channelInfo.isInput = true;
\r
2891 result = ASIOGetChannelInfo( &channelInfo );
\r
2892 if ( result != ASE_OK ) {
\r
2893 drivers.removeCurrentDriver();
\r
2894 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2895 errorText_ = errorStream_.str();
\r
2899 // Assuming WINDOWS host is always little-endian.
\r
2900 stream_.doByteSwap[mode] = false;
\r
2901 stream_.userFormat = format;
\r
2902 stream_.deviceFormat[mode] = 0;
\r
2903 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2904 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2905 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2907 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2908 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2909 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2911 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2912 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2913 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2915 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2916 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2917 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2919 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2920 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2921 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2924 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2925 drivers.removeCurrentDriver();
\r
2926 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2927 errorText_ = errorStream_.str();
\r
2931 // Set the buffer size. For a duplex stream, this will end up
\r
2932 // setting the buffer size based on the input constraints, which
\r
2934 long minSize, maxSize, preferSize, granularity;
\r
2935 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2936 if ( result != ASE_OK ) {
\r
2937 drivers.removeCurrentDriver();
\r
2938 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2939 errorText_ = errorStream_.str();
\r
2943 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2944 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2945 else if ( granularity == -1 ) {
\r
2946 // Make sure bufferSize is a power of two.
\r
2947 int log2_of_min_size = 0;
\r
2948 int log2_of_max_size = 0;
\r
2950 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2951 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2952 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2955 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2956 int min_delta_num = log2_of_min_size;
\r
2958 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2959 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2960 if (current_delta < min_delta) {
\r
2961 min_delta = current_delta;
\r
2962 min_delta_num = i;
\r
2966 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2967 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2968 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2970 else if ( granularity != 0 ) {
\r
2971 // Set to an even multiple of granularity, rounding up.
\r
2972 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2975 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2976 drivers.removeCurrentDriver();
\r
2977 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2981 stream_.bufferSize = *bufferSize;
\r
2982 stream_.nBuffers = 2;
\r
2984 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2985 else stream_.userInterleaved = true;
\r
2987 // ASIO always uses non-interleaved buffers.
\r
2988 stream_.deviceInterleaved[mode] = false;
\r
2990 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2991 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2992 if ( handle == 0 ) {
\r
2994 handle = new AsioHandle;
\r
2996 catch ( std::bad_alloc& ) {
\r
2997 //if ( handle == NULL ) {
\r
2998 drivers.removeCurrentDriver();
\r
2999 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3002 handle->bufferInfos = 0;
\r
3004 // Create a manual-reset event.
\r
3005 handle->condition = CreateEvent( NULL, // no security
\r
3006 TRUE, // manual-reset
\r
3007 FALSE, // non-signaled initially
\r
3008 NULL ); // unnamed
\r
3009 stream_.apiHandle = (void *) handle;
\r
3012 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3013 // and output separately, we'll have to dispose of previously
\r
3014 // created output buffers for a duplex stream.
\r
3015 long inputLatency, outputLatency;
\r
3016 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3017 ASIODisposeBuffers();
\r
3018 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3021 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3022 bool buffersAllocated = false;
\r
3023 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3024 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3025 if ( handle->bufferInfos == NULL ) {
\r
3026 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3027 errorText_ = errorStream_.str();
\r
3031 ASIOBufferInfo *infos;
\r
3032 infos = handle->bufferInfos;
\r
3033 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3034 infos->isInput = ASIOFalse;
\r
3035 infos->channelNum = i + stream_.channelOffset[0];
\r
3036 infos->buffers[0] = infos->buffers[1] = 0;
\r
3038 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3039 infos->isInput = ASIOTrue;
\r
3040 infos->channelNum = i + stream_.channelOffset[1];
\r
3041 infos->buffers[0] = infos->buffers[1] = 0;
\r
3044 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3045 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3046 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3047 asioCallbacks.asioMessage = &asioMessages;
\r
3048 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3049 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3050 if ( result != ASE_OK ) {
\r
3051 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3052 errorText_ = errorStream_.str();
\r
3055 buffersAllocated = true;
\r
3057 // Set flags for buffer conversion.
\r
3058 stream_.doConvertBuffer[mode] = false;
\r
3059 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3060 stream_.doConvertBuffer[mode] = true;
\r
3061 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3062 stream_.nUserChannels[mode] > 1 )
\r
3063 stream_.doConvertBuffer[mode] = true;
\r
3065 // Allocate necessary internal buffers
\r
3066 unsigned long bufferBytes;
\r
3067 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3068 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3069 if ( stream_.userBuffer[mode] == NULL ) {
\r
3070 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3074 if ( stream_.doConvertBuffer[mode] ) {
\r
3076 bool makeBuffer = true;
\r
3077 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3078 if ( mode == INPUT ) {
\r
3079 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3080 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3081 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3085 if ( makeBuffer ) {
\r
3086 bufferBytes *= *bufferSize;
\r
3087 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3088 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3089 if ( stream_.deviceBuffer == NULL ) {
\r
3090 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3096 stream_.sampleRate = sampleRate;
\r
3097 stream_.device[mode] = device;
\r
3098 stream_.state = STREAM_STOPPED;
\r
3099 asioCallbackInfo = &stream_.callbackInfo;
\r
3100 stream_.callbackInfo.object = (void *) this;
\r
3101 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3102 // We had already set up an output stream.
\r
3103 stream_.mode = DUPLEX;
\r
3105 stream_.mode = mode;
\r
3107 // Determine device latencies
\r
3108 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3109 if ( result != ASE_OK ) {
\r
3110 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3111 errorText_ = errorStream_.str();
\r
3112 error( RtAudioError::WARNING); // warn but don't fail
\r
3115 stream_.latency[0] = outputLatency;
\r
3116 stream_.latency[1] = inputLatency;
\r
3119 // Setup the buffer conversion information structure. We don't use
\r
3120 // buffers to do channel offsets, so we override that parameter
\r
3122 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3127 if ( buffersAllocated )
\r
3128 ASIODisposeBuffers();
\r
3129 drivers.removeCurrentDriver();
\r
3132 CloseHandle( handle->condition );
\r
3133 if ( handle->bufferInfos )
\r
3134 free( handle->bufferInfos );
\r
3136 stream_.apiHandle = 0;
\r
3139 for ( int i=0; i<2; i++ ) {
\r
3140 if ( stream_.userBuffer[i] ) {
\r
3141 free( stream_.userBuffer[i] );
\r
3142 stream_.userBuffer[i] = 0;
\r
3146 if ( stream_.deviceBuffer ) {
\r
3147 free( stream_.deviceBuffer );
\r
3148 stream_.deviceBuffer = 0;
\r
3154 void RtApiAsio :: closeStream()
\r
3156 if ( stream_.state == STREAM_CLOSED ) {
\r
3157 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3158 error( RtAudioError::WARNING );
\r
3162 if ( stream_.state == STREAM_RUNNING ) {
\r
3163 stream_.state = STREAM_STOPPED;
\r
3166 ASIODisposeBuffers();
\r
3167 drivers.removeCurrentDriver();
\r
3169 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3171 CloseHandle( handle->condition );
\r
3172 if ( handle->bufferInfos )
\r
3173 free( handle->bufferInfos );
\r
3175 stream_.apiHandle = 0;
\r
3178 for ( int i=0; i<2; i++ ) {
\r
3179 if ( stream_.userBuffer[i] ) {
\r
3180 free( stream_.userBuffer[i] );
\r
3181 stream_.userBuffer[i] = 0;
\r
3185 if ( stream_.deviceBuffer ) {
\r
3186 free( stream_.deviceBuffer );
\r
3187 stream_.deviceBuffer = 0;
\r
3190 stream_.mode = UNINITIALIZED;
\r
3191 stream_.state = STREAM_CLOSED;
\r
3194 bool stopThreadCalled = false;
\r
3196 void RtApiAsio :: startStream()
\r
3199 if ( stream_.state == STREAM_RUNNING ) {
\r
3200 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3201 error( RtAudioError::WARNING );
\r
3205 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3206 ASIOError result = ASIOStart();
\r
3207 if ( result != ASE_OK ) {
\r
3208 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3209 errorText_ = errorStream_.str();
\r
3213 handle->drainCounter = 0;
\r
3214 handle->internalDrain = false;
\r
3215 ResetEvent( handle->condition );
\r
3216 stream_.state = STREAM_RUNNING;
\r
3220 stopThreadCalled = false;
\r
3222 if ( result == ASE_OK ) return;
\r
3223 error( RtAudioError::SYSTEM_ERROR );
\r
3226 void RtApiAsio :: stopStream()
\r
3229 if ( stream_.state == STREAM_STOPPED ) {
\r
3230 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3231 error( RtAudioError::WARNING );
\r
3235 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3236 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3237 if ( handle->drainCounter == 0 ) {
\r
3238 handle->drainCounter = 2;
\r
3239 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3243 stream_.state = STREAM_STOPPED;
\r
3245 ASIOError result = ASIOStop();
\r
3246 if ( result != ASE_OK ) {
\r
3247 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3248 errorText_ = errorStream_.str();
\r
3251 if ( result == ASE_OK ) return;
\r
3252 error( RtAudioError::SYSTEM_ERROR );
\r
3255 void RtApiAsio :: abortStream()
\r
3258 if ( stream_.state == STREAM_STOPPED ) {
\r
3259 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3260 error( RtAudioError::WARNING );
\r
3264 // The following lines were commented-out because some behavior was
\r
3265 // noted where the device buffers need to be zeroed to avoid
\r
3266 // continuing sound, even when the device buffers are completely
\r
3267 // disposed. So now, calling abort is the same as calling stop.
\r
3268 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3269 // handle->drainCounter = 2;
\r
3273 // This function will be called by a spawned thread when the user
\r
3274 // callback function signals that the stream should be stopped or
\r
3275 // aborted. It is necessary to handle it this way because the
\r
3276 // callbackEvent() function must return before the ASIOStop()
\r
3277 // function will return.
\r
3278 static unsigned __stdcall asioStopStream( void *ptr )
\r
3280 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3281 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3283 object->stopStream();
\r
3284 _endthreadex( 0 );
\r
3288 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3290 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3291 if ( stream_.state == STREAM_CLOSED ) {
\r
3292 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3293 error( RtAudioError::WARNING );
\r
3297 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3298 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3300 // Check if we were draining the stream and signal if finished.
\r
3301 if ( handle->drainCounter > 3 ) {
\r
3303 stream_.state = STREAM_STOPPING;
\r
3304 if ( handle->internalDrain == false )
\r
3305 SetEvent( handle->condition );
\r
3306 else { // spawn a thread to stop the stream
\r
3307 unsigned threadId;
\r
3308 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3309 &stream_.callbackInfo, 0, &threadId );
\r
3314 // Invoke user callback to get fresh output data UNLESS we are
\r
3315 // draining stream.
\r
3316 if ( handle->drainCounter == 0 ) {
\r
3317 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3318 double streamTime = getStreamTime();
\r
3319 RtAudioStreamStatus status = 0;
\r
3320 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3321 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3324 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3325 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3328 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3329 stream_.bufferSize, streamTime, status, info->userData );
\r
3330 if ( cbReturnValue == 2 ) {
\r
3331 stream_.state = STREAM_STOPPING;
\r
3332 handle->drainCounter = 2;
\r
3333 unsigned threadId;
\r
3334 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3335 &stream_.callbackInfo, 0, &threadId );
\r
3338 else if ( cbReturnValue == 1 ) {
\r
3339 handle->drainCounter = 1;
\r
3340 handle->internalDrain = true;
\r
3344 unsigned int nChannels, bufferBytes, i, j;
\r
3345 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3348 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3350 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3352 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3353 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3354 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3358 else if ( stream_.doConvertBuffer[0] ) {
\r
3360 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3361 if ( stream_.doByteSwap[0] )
\r
3362 byteSwapBuffer( stream_.deviceBuffer,
\r
3363 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3364 stream_.deviceFormat[0] );
\r
3366 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3367 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3368 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3369 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3375 if ( stream_.doByteSwap[0] )
\r
3376 byteSwapBuffer( stream_.userBuffer[0],
\r
3377 stream_.bufferSize * stream_.nUserChannels[0],
\r
3378 stream_.userFormat );
\r
3380 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3381 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3382 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3383 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3388 if ( handle->drainCounter ) {
\r
3389 handle->drainCounter++;
\r
3394 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3396 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3398 if (stream_.doConvertBuffer[1]) {
\r
3400 // Always interleave ASIO input data.
\r
3401 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3402 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3403 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3404 handle->bufferInfos[i].buffers[bufferIndex],
\r
3408 if ( stream_.doByteSwap[1] )
\r
3409 byteSwapBuffer( stream_.deviceBuffer,
\r
3410 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3411 stream_.deviceFormat[1] );
\r
3412 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3416 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3417 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3418 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3419 handle->bufferInfos[i].buffers[bufferIndex],
\r
3424 if ( stream_.doByteSwap[1] )
\r
3425 byteSwapBuffer( stream_.userBuffer[1],
\r
3426 stream_.bufferSize * stream_.nUserChannels[1],
\r
3427 stream_.userFormat );
\r
3432 // The following call was suggested by Malte Clasen. While the API
\r
3433 // documentation indicates it should not be required, some device
\r
3434 // drivers apparently do not function correctly without it.
\r
3435 ASIOOutputReady();
\r
3437 RtApi::tickStreamTime();
\r
3441 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3443 // The ASIO documentation says that this usually only happens during
\r
3444 // external sync. Audio processing is not stopped by the driver,
\r
3445 // actual sample rate might not have even changed, maybe only the
\r
3446 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3449 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3451 object->stopStream();
\r
3453 catch ( RtAudioError &exception ) {
\r
3454 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3458 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3461 static long asioMessages( long selector, long value, void* message, double* opt )
\r
3465 switch( selector ) {
\r
3466 case kAsioSelectorSupported:
\r
3467 if ( value == kAsioResetRequest
\r
3468 || value == kAsioEngineVersion
\r
3469 || value == kAsioResyncRequest
\r
3470 || value == kAsioLatenciesChanged
\r
3471 // The following three were added for ASIO 2.0, you don't
\r
3472 // necessarily have to support them.
\r
3473 || value == kAsioSupportsTimeInfo
\r
3474 || value == kAsioSupportsTimeCode
\r
3475 || value == kAsioSupportsInputMonitor)
\r
3478 case kAsioResetRequest:
\r
3479 // Defer the task and perform the reset of the driver during the
\r
3480 // next "safe" situation. You cannot reset the driver right now,
\r
3481 // as this code is called from the driver. Reset the driver is
\r
3482 // done by completely destruct is. I.e. ASIOStop(),
\r
3483 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3485 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3488 case kAsioResyncRequest:
\r
3489 // This informs the application that the driver encountered some
\r
3490 // non-fatal data loss. It is used for synchronization purposes
\r
3491 // of different media. Added mainly to work around the Win16Mutex
\r
3492 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3493 // which could lose data because the Mutex was held too long by
\r
3494 // another thread. However a driver can issue it in other
\r
3495 // situations, too.
\r
3496 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3500 case kAsioLatenciesChanged:
\r
3501 // This will inform the host application that the drivers were
\r
3502 // latencies changed. Beware, it this does not mean that the
\r
3503 // buffer sizes have changed! You might need to update internal
\r
3505 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3508 case kAsioEngineVersion:
\r
3509 // Return the supported ASIO version of the host application. If
\r
3510 // a host application does not implement this selector, ASIO 1.0
\r
3511 // is assumed by the driver.
\r
3514 case kAsioSupportsTimeInfo:
\r
3515 // Informs the driver whether the
\r
3516 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3517 // For compatibility with ASIO 1.0 drivers the host application
\r
3518 // should always support the "old" bufferSwitch method, too.
\r
3521 case kAsioSupportsTimeCode:
\r
3522 // Informs the driver whether application is interested in time
\r
3523 // code info. If an application does not need to know about time
\r
3524 // code, the driver has less work to do.
\r
3531 static const char* getAsioErrorString( ASIOError result )
\r
3536 const char*message;
\r
3539 static const Messages m[] =
\r
3541 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3542 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3543 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3544 { ASE_InvalidMode, "Invalid mode." },
\r
3545 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3546 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3547 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3550 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3551 if ( m[i].value == result ) return m[i].message;
\r
3553 return "Unknown error.";
\r
3555 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3559 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3561 // Modified by Robin Davies, October 2005
\r
3562 // - Improvements to DirectX pointer chasing.
\r
3563 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3564 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3565 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3566 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3568 #include <dsound.h>
\r
3569 #include <assert.h>
\r
3570 #include <algorithm>
\r
3572 #if defined(__MINGW32__)
\r
3573 // missing from latest mingw winapi
\r
3574 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3575 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3576 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3577 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3580 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3582 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3583 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3586 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3588 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3589 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3590 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3591 return pointer >= earlierPointer && pointer < laterPointer;
\r
3594 // A structure to hold various information related to the DirectSound
\r
3595 // API implementation.
\r
3597 unsigned int drainCounter; // Tracks callback counts when draining
\r
3598 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3602 UINT bufferPointer[2];
\r
3603 DWORD dsBufferSize[2];
\r
3604 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3608 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3611 // Declarations for utility functions, callbacks, and structures
\r
3612 // specific to the DirectSound implementation.
\r
3613 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3614 LPCTSTR description,
\r
3616 LPVOID lpContext );
\r
3618 static const char* getErrorString( int code );
\r
3620 static unsigned __stdcall callbackHandler( void *ptr );
\r
3629 : found(false) { validId[0] = false; validId[1] = false; }
\r
3632 struct DsProbeData {
\r
3634 std::vector<struct DsDevice>* dsDevices;
\r
3637 RtApiDs :: RtApiDs()
\r
3639 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3640 // accept whatever the mainline chose for a threading model.
\r
3641 coInitialized_ = false;
\r
3642 HRESULT hr = CoInitialize( NULL );
\r
3643 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3646 RtApiDs :: ~RtApiDs()
\r
3648 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3649 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3652 // The DirectSound default output is always the first device.
\r
3653 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3658 // The DirectSound default input is always the first input device,
\r
3659 // which is the first capture device enumerated.
\r
3660 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3665 unsigned int RtApiDs :: getDeviceCount( void )
\r
3667 // Set query flag for previously found devices to false, so that we
\r
3668 // can check for any devices that have disappeared.
\r
3669 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3670 dsDevices[i].found = false;
\r
3672 // Query DirectSound devices.
\r
3673 struct DsProbeData probeInfo;
\r
3674 probeInfo.isInput = false;
\r
3675 probeInfo.dsDevices = &dsDevices;
\r
3676 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3677 if ( FAILED( result ) ) {
\r
3678 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3679 errorText_ = errorStream_.str();
\r
3680 error( RtAudioError::WARNING );
\r
3683 // Query DirectSoundCapture devices.
\r
3684 probeInfo.isInput = true;
\r
3685 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3686 if ( FAILED( result ) ) {
\r
3687 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3688 errorText_ = errorStream_.str();
\r
3689 error( RtAudioError::WARNING );
\r
3692 // Clean out any devices that may have disappeared.
\r
3693 std::vector< int > indices;
\r
3694 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3695 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3696 unsigned int nErased = 0;
\r
3697 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3698 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3700 return dsDevices.size();
\r
3703 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3705 RtAudio::DeviceInfo info;
\r
3706 info.probed = false;
\r
3708 if ( dsDevices.size() == 0 ) {
\r
3709 // Force a query of all devices
\r
3711 if ( dsDevices.size() == 0 ) {
\r
3712 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3713 error( RtAudioError::INVALID_USE );
\r
3718 if ( device >= dsDevices.size() ) {
\r
3719 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3720 error( RtAudioError::INVALID_USE );
\r
3725 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3727 LPDIRECTSOUND output;
\r
3729 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3730 if ( FAILED( result ) ) {
\r
3731 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3732 errorText_ = errorStream_.str();
\r
3733 error( RtAudioError::WARNING );
\r
3737 outCaps.dwSize = sizeof( outCaps );
\r
3738 result = output->GetCaps( &outCaps );
\r
3739 if ( FAILED( result ) ) {
\r
3740 output->Release();
\r
3741 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3742 errorText_ = errorStream_.str();
\r
3743 error( RtAudioError::WARNING );
\r
3747 // Get output channel information.
\r
3748 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3750 // Get sample rate information.
\r
3751 info.sampleRates.clear();
\r
3752 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3753 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3754 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3755 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3758 // Get format information.
\r
3759 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3760 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3762 output->Release();
\r
3764 if ( getDefaultOutputDevice() == device )
\r
3765 info.isDefaultOutput = true;
\r
3767 if ( dsDevices[ device ].validId[1] == false ) {
\r
3768 info.name = dsDevices[ device ].name;
\r
3769 info.probed = true;
\r
3775 LPDIRECTSOUNDCAPTURE input;
\r
3776 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3777 if ( FAILED( result ) ) {
\r
3778 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3779 errorText_ = errorStream_.str();
\r
3780 error( RtAudioError::WARNING );
\r
3785 inCaps.dwSize = sizeof( inCaps );
\r
3786 result = input->GetCaps( &inCaps );
\r
3787 if ( FAILED( result ) ) {
\r
3789 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3790 errorText_ = errorStream_.str();
\r
3791 error( RtAudioError::WARNING );
\r
3795 // Get input channel information.
\r
3796 info.inputChannels = inCaps.dwChannels;
\r
3798 // Get sample rate and format information.
\r
3799 std::vector<unsigned int> rates;
\r
3800 if ( inCaps.dwChannels >= 2 ) {
\r
3801 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3802 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3803 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3804 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3805 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3806 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3807 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3808 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3810 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3813 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3814 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3816 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3817 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3818 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3819 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3820 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3823 else if ( inCaps.dwChannels == 1 ) {
\r
3824 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3825 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3826 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3827 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3828 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3829 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3830 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3831 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3833 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3834 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3835 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3836 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3837 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3839 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3840 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3841 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3842 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3843 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3846 else info.inputChannels = 0; // technically, this would be an error
\r
3850 if ( info.inputChannels == 0 ) return info;
\r
3852 // Copy the supported rates to the info structure but avoid duplication.
\r
3854 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3856 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3857 if ( rates[i] == info.sampleRates[j] ) {
\r
3862 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3864 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3866 // If device opens for both playback and capture, we determine the channels.
\r
3867 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3868 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3870 if ( device == 0 ) info.isDefaultInput = true;
\r
3872 // Copy name and return.
\r
3873 info.name = dsDevices[ device ].name;
\r
3874 info.probed = true;
\r
3878 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3879 unsigned int firstChannel, unsigned int sampleRate,
\r
3880 RtAudioFormat format, unsigned int *bufferSize,
\r
3881 RtAudio::StreamOptions *options )
\r
3883 if ( channels + firstChannel > 2 ) {
\r
3884 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3888 unsigned int nDevices = dsDevices.size();
\r
3889 if ( nDevices == 0 ) {
\r
3890 // This should not happen because a check is made before this function is called.
\r
3891 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3895 if ( device >= nDevices ) {
\r
3896 // This should not happen because a check is made before this function is called.
\r
3897 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3901 if ( mode == OUTPUT ) {
\r
3902 if ( dsDevices[ device ].validId[0] == false ) {
\r
3903 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3904 errorText_ = errorStream_.str();
\r
3908 else { // mode == INPUT
\r
3909 if ( dsDevices[ device ].validId[1] == false ) {
\r
3910 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3911 errorText_ = errorStream_.str();
\r
3916 // According to a note in PortAudio, using GetDesktopWindow()
\r
3917 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3918 // that occur when the application's window is not the foreground
\r
3919 // window. Also, if the application window closes before the
\r
3920 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3921 // problems when using GetDesktopWindow() but it seems fine now
\r
3922 // (January 2010). I'll leave it commented here.
\r
3923 // HWND hWnd = GetForegroundWindow();
\r
3924 HWND hWnd = GetDesktopWindow();
\r
3926 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3927 // two. This is a judgement call and a value of two is probably too
\r
3928 // low for capture, but it should work for playback.
\r
3930 if ( options ) nBuffers = options->numberOfBuffers;
\r
3931 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3932 if ( nBuffers < 2 ) nBuffers = 3;
\r
3934 // Check the lower range of the user-specified buffer size and set
\r
3935 // (arbitrarily) to a lower bound of 32.
\r
3936 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3938 // Create the wave format structure. The data format setting will
\r
3939 // be determined later.
\r
3940 WAVEFORMATEX waveFormat;
\r
3941 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3942 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3943 waveFormat.nChannels = channels + firstChannel;
\r
3944 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3946 // Determine the device buffer size. By default, we'll use the value
\r
3947 // defined above (32K), but we will grow it to make allowances for
\r
3948 // very large software buffer sizes.
\r
3949 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3950 DWORD dsPointerLeadTime = 0;
\r
3952 void *ohandle = 0, *bhandle = 0;
\r
3954 if ( mode == OUTPUT ) {
\r
3956 LPDIRECTSOUND output;
\r
3957 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3958 if ( FAILED( result ) ) {
\r
3959 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3960 errorText_ = errorStream_.str();
\r
3965 outCaps.dwSize = sizeof( outCaps );
\r
3966 result = output->GetCaps( &outCaps );
\r
3967 if ( FAILED( result ) ) {
\r
3968 output->Release();
\r
3969 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3970 errorText_ = errorStream_.str();
\r
3974 // Check channel information.
\r
3975 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3976 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3977 errorText_ = errorStream_.str();
\r
3981 // Check format information. Use 16-bit format unless not
\r
3982 // supported or user requests 8-bit.
\r
3983 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3984 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3985 waveFormat.wBitsPerSample = 16;
\r
3986 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3989 waveFormat.wBitsPerSample = 8;
\r
3990 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3992 stream_.userFormat = format;
\r
3994 // Update wave format structure and buffer information.
\r
3995 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3996 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3997 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3999 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4000 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4001 dsBufferSize *= 2;
\r
4003 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
4004 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
4005 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
4006 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
4007 if ( FAILED( result ) ) {
\r
4008 output->Release();
\r
4009 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
4010 errorText_ = errorStream_.str();
\r
4014 // Even though we will write to the secondary buffer, we need to
\r
4015 // access the primary buffer to set the correct output format
\r
4016 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
4017 // buffer description.
\r
4018 DSBUFFERDESC bufferDescription;
\r
4019 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4020 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4021 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
4023 // Obtain the primary buffer
\r
4024 LPDIRECTSOUNDBUFFER buffer;
\r
4025 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4026 if ( FAILED( result ) ) {
\r
4027 output->Release();
\r
4028 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4029 errorText_ = errorStream_.str();
\r
4033 // Set the primary DS buffer sound format.
\r
4034 result = buffer->SetFormat( &waveFormat );
\r
4035 if ( FAILED( result ) ) {
\r
4036 output->Release();
\r
4037 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4038 errorText_ = errorStream_.str();
\r
4042 // Setup the secondary DS buffer description.
\r
4043 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4044 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4045 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4046 DSBCAPS_GLOBALFOCUS |
\r
4047 DSBCAPS_GETCURRENTPOSITION2 |
\r
4048 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4049 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4050 bufferDescription.lpwfxFormat = &waveFormat;
\r
4052 // Try to create the secondary DS buffer. If that doesn't work,
\r
4053 // try to use software mixing. Otherwise, there's a problem.
\r
4054 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4055 if ( FAILED( result ) ) {
\r
4056 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4057 DSBCAPS_GLOBALFOCUS |
\r
4058 DSBCAPS_GETCURRENTPOSITION2 |
\r
4059 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4060 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4061 if ( FAILED( result ) ) {
\r
4062 output->Release();
\r
4063 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4064 errorText_ = errorStream_.str();
\r
4069 // Get the buffer size ... might be different from what we specified.
\r
4071 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4072 result = buffer->GetCaps( &dsbcaps );
\r
4073 if ( FAILED( result ) ) {
\r
4074 output->Release();
\r
4075 buffer->Release();
\r
4076 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4077 errorText_ = errorStream_.str();
\r
4081 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4083 // Lock the DS buffer
\r
4086 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4087 if ( FAILED( result ) ) {
\r
4088 output->Release();
\r
4089 buffer->Release();
\r
4090 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4091 errorText_ = errorStream_.str();
\r
4095 // Zero the DS buffer
\r
4096 ZeroMemory( audioPtr, dataLen );
\r
4098 // Unlock the DS buffer
\r
4099 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4100 if ( FAILED( result ) ) {
\r
4101 output->Release();
\r
4102 buffer->Release();
\r
4103 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4104 errorText_ = errorStream_.str();
\r
4108 ohandle = (void *) output;
\r
4109 bhandle = (void *) buffer;
\r
4112 if ( mode == INPUT ) {
\r
4114 LPDIRECTSOUNDCAPTURE input;
\r
4115 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4116 if ( FAILED( result ) ) {
\r
4117 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4118 errorText_ = errorStream_.str();
\r
4123 inCaps.dwSize = sizeof( inCaps );
\r
4124 result = input->GetCaps( &inCaps );
\r
4125 if ( FAILED( result ) ) {
\r
4127 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4128 errorText_ = errorStream_.str();
\r
4132 // Check channel information.
\r
4133 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4134 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4138 // Check format information. Use 16-bit format unless user
\r
4139 // requests 8-bit.
\r
4140 DWORD deviceFormats;
\r
4141 if ( channels + firstChannel == 2 ) {
\r
4142 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4143 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4144 waveFormat.wBitsPerSample = 8;
\r
4145 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4147 else { // assume 16-bit is supported
\r
4148 waveFormat.wBitsPerSample = 16;
\r
4149 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4152 else { // channel == 1
\r
4153 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4154 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4155 waveFormat.wBitsPerSample = 8;
\r
4156 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4158 else { // assume 16-bit is supported
\r
4159 waveFormat.wBitsPerSample = 16;
\r
4160 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4163 stream_.userFormat = format;
\r
4165 // Update wave format structure and buffer information.
\r
4166 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4167 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4168 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4170 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4171 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4172 dsBufferSize *= 2;
\r
4174 // Setup the secondary DS buffer description.
\r
4175 DSCBUFFERDESC bufferDescription;
\r
4176 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4177 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4178 bufferDescription.dwFlags = 0;
\r
4179 bufferDescription.dwReserved = 0;
\r
4180 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4181 bufferDescription.lpwfxFormat = &waveFormat;
\r
4183 // Create the capture buffer.
\r
4184 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4185 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4186 if ( FAILED( result ) ) {
\r
4188 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4189 errorText_ = errorStream_.str();
\r
4193 // Get the buffer size ... might be different from what we specified.
\r
4194 DSCBCAPS dscbcaps;
\r
4195 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4196 result = buffer->GetCaps( &dscbcaps );
\r
4197 if ( FAILED( result ) ) {
\r
4199 buffer->Release();
\r
4200 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4201 errorText_ = errorStream_.str();
\r
4205 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4207 // NOTE: We could have a problem here if this is a duplex stream
\r
4208 // and the play and capture hardware buffer sizes are different
\r
4209 // (I'm actually not sure if that is a problem or not).
\r
4210 // Currently, we are not verifying that.
\r
4212 // Lock the capture buffer
\r
4215 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4216 if ( FAILED( result ) ) {
\r
4218 buffer->Release();
\r
4219 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4220 errorText_ = errorStream_.str();
\r
4224 // Zero the buffer
\r
4225 ZeroMemory( audioPtr, dataLen );
\r
4227 // Unlock the buffer
\r
4228 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4229 if ( FAILED( result ) ) {
\r
4231 buffer->Release();
\r
4232 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4233 errorText_ = errorStream_.str();
\r
4237 ohandle = (void *) input;
\r
4238 bhandle = (void *) buffer;
\r
4241 // Set various stream parameters
\r
4242 DsHandle *handle = 0;
\r
4243 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4244 stream_.nUserChannels[mode] = channels;
\r
4245 stream_.bufferSize = *bufferSize;
\r
4246 stream_.channelOffset[mode] = firstChannel;
\r
4247 stream_.deviceInterleaved[mode] = true;
\r
4248 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4249 else stream_.userInterleaved = true;
\r
4251 // Set flag for buffer conversion
\r
4252 stream_.doConvertBuffer[mode] = false;
\r
4253 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4254 stream_.doConvertBuffer[mode] = true;
\r
4255 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4256 stream_.doConvertBuffer[mode] = true;
\r
4257 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4258 stream_.nUserChannels[mode] > 1 )
\r
4259 stream_.doConvertBuffer[mode] = true;
\r
4261 // Allocate necessary internal buffers
\r
4262 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4263 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4264 if ( stream_.userBuffer[mode] == NULL ) {
\r
4265 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4269 if ( stream_.doConvertBuffer[mode] ) {
\r
4271 bool makeBuffer = true;
\r
4272 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4273 if ( mode == INPUT ) {
\r
4274 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4275 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4276 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4280 if ( makeBuffer ) {
\r
4281 bufferBytes *= *bufferSize;
\r
4282 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4283 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4284 if ( stream_.deviceBuffer == NULL ) {
\r
4285 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4291 // Allocate our DsHandle structures for the stream.
\r
4292 if ( stream_.apiHandle == 0 ) {
\r
4294 handle = new DsHandle;
\r
4296 catch ( std::bad_alloc& ) {
\r
4297 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4301 // Create a manual-reset event.
\r
4302 handle->condition = CreateEvent( NULL, // no security
\r
4303 TRUE, // manual-reset
\r
4304 FALSE, // non-signaled initially
\r
4305 NULL ); // unnamed
\r
4306 stream_.apiHandle = (void *) handle;
\r
4309 handle = (DsHandle *) stream_.apiHandle;
\r
4310 handle->id[mode] = ohandle;
\r
4311 handle->buffer[mode] = bhandle;
\r
4312 handle->dsBufferSize[mode] = dsBufferSize;
\r
4313 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4315 stream_.device[mode] = device;
\r
4316 stream_.state = STREAM_STOPPED;
\r
4317 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4318 // We had already set up an output stream.
\r
4319 stream_.mode = DUPLEX;
\r
4321 stream_.mode = mode;
\r
4322 stream_.nBuffers = nBuffers;
\r
4323 stream_.sampleRate = sampleRate;
\r
4325 // Setup the buffer conversion information structure.
\r
4326 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4328 // Setup the callback thread.
\r
4329 if ( stream_.callbackInfo.isRunning == false ) {
\r
4330 unsigned threadId;
\r
4331 stream_.callbackInfo.isRunning = true;
\r
4332 stream_.callbackInfo.object = (void *) this;
\r
4333 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4334 &stream_.callbackInfo, 0, &threadId );
\r
4335 if ( stream_.callbackInfo.thread == 0 ) {
\r
4336 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4340 // Boost DS thread priority
\r
4341 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4347 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4348 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4349 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4350 if ( buffer ) buffer->Release();
\r
4351 object->Release();
\r
4353 if ( handle->buffer[1] ) {
\r
4354 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4355 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4356 if ( buffer ) buffer->Release();
\r
4357 object->Release();
\r
4359 CloseHandle( handle->condition );
\r
4361 stream_.apiHandle = 0;
\r
4364 for ( int i=0; i<2; i++ ) {
\r
4365 if ( stream_.userBuffer[i] ) {
\r
4366 free( stream_.userBuffer[i] );
\r
4367 stream_.userBuffer[i] = 0;
\r
4371 if ( stream_.deviceBuffer ) {
\r
4372 free( stream_.deviceBuffer );
\r
4373 stream_.deviceBuffer = 0;
\r
4376 stream_.state = STREAM_CLOSED;
\r
4380 void RtApiDs :: closeStream()
\r
4382 if ( stream_.state == STREAM_CLOSED ) {
\r
4383 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4384 error( RtAudioError::WARNING );
\r
4388 // Stop the callback thread.
\r
4389 stream_.callbackInfo.isRunning = false;
\r
4390 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4391 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4393 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4395 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4396 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4397 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4400 buffer->Release();
\r
4402 object->Release();
\r
4404 if ( handle->buffer[1] ) {
\r
4405 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4406 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4409 buffer->Release();
\r
4411 object->Release();
\r
4413 CloseHandle( handle->condition );
\r
4415 stream_.apiHandle = 0;
\r
4418 for ( int i=0; i<2; i++ ) {
\r
4419 if ( stream_.userBuffer[i] ) {
\r
4420 free( stream_.userBuffer[i] );
\r
4421 stream_.userBuffer[i] = 0;
\r
4425 if ( stream_.deviceBuffer ) {
\r
4426 free( stream_.deviceBuffer );
\r
4427 stream_.deviceBuffer = 0;
\r
4430 stream_.mode = UNINITIALIZED;
\r
4431 stream_.state = STREAM_CLOSED;
\r
4434 void RtApiDs :: startStream()
\r
4437 if ( stream_.state == STREAM_RUNNING ) {
\r
4438 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4439 error( RtAudioError::WARNING );
\r
4443 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4445 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4446 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4447 // this is already in effect.
\r
4448 timeBeginPeriod( 1 );
\r
4450 buffersRolling = false;
\r
4451 duplexPrerollBytes = 0;
\r
4453 if ( stream_.mode == DUPLEX ) {
\r
4454 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4455 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4458 HRESULT result = 0;
\r
4459 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4461 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4462 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4463 if ( FAILED( result ) ) {
\r
4464 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4465 errorText_ = errorStream_.str();
\r
4470 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4472 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4473 result = buffer->Start( DSCBSTART_LOOPING );
\r
4474 if ( FAILED( result ) ) {
\r
4475 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4476 errorText_ = errorStream_.str();
\r
4481 handle->drainCounter = 0;
\r
4482 handle->internalDrain = false;
\r
4483 ResetEvent( handle->condition );
\r
4484 stream_.state = STREAM_RUNNING;
\r
4487 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4490 void RtApiDs :: stopStream()
\r
4493 if ( stream_.state == STREAM_STOPPED ) {
\r
4494 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4495 error( RtAudioError::WARNING );
\r
4499 HRESULT result = 0;
\r
4502 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4503 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4504 if ( handle->drainCounter == 0 ) {
\r
4505 handle->drainCounter = 2;
\r
4506 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4509 stream_.state = STREAM_STOPPED;
\r
4511 // Stop the buffer and clear memory
\r
4512 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4513 result = buffer->Stop();
\r
4514 if ( FAILED( result ) ) {
\r
4515 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4516 errorText_ = errorStream_.str();
\r
4520 // Lock the buffer and clear it so that if we start to play again,
\r
4521 // we won't have old data playing.
\r
4522 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4523 if ( FAILED( result ) ) {
\r
4524 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4525 errorText_ = errorStream_.str();
\r
4529 // Zero the DS buffer
\r
4530 ZeroMemory( audioPtr, dataLen );
\r
4532 // Unlock the DS buffer
\r
4533 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4534 if ( FAILED( result ) ) {
\r
4535 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4536 errorText_ = errorStream_.str();
\r
4540 // If we start playing again, we must begin at beginning of buffer.
\r
4541 handle->bufferPointer[0] = 0;
\r
4544 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4545 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4549 stream_.state = STREAM_STOPPED;
\r
4551 result = buffer->Stop();
\r
4552 if ( FAILED( result ) ) {
\r
4553 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4554 errorText_ = errorStream_.str();
\r
4558 // Lock the buffer and clear it so that if we start to play again,
\r
4559 // we won't have old data playing.
\r
4560 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4561 if ( FAILED( result ) ) {
\r
4562 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4563 errorText_ = errorStream_.str();
\r
4567 // Zero the DS buffer
\r
4568 ZeroMemory( audioPtr, dataLen );
\r
4570 // Unlock the DS buffer
\r
4571 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4572 if ( FAILED( result ) ) {
\r
4573 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4574 errorText_ = errorStream_.str();
\r
4578 // If we start recording again, we must begin at beginning of buffer.
\r
4579 handle->bufferPointer[1] = 0;
\r
4583 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4584 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4587 void RtApiDs :: abortStream()
\r
4590 if ( stream_.state == STREAM_STOPPED ) {
\r
4591 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4592 error( RtAudioError::WARNING );
\r
4596 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4597 handle->drainCounter = 2;
\r
4602 void RtApiDs :: callbackEvent()
\r
4604 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4605 Sleep( 50 ); // sleep 50 milliseconds
\r
4609 if ( stream_.state == STREAM_CLOSED ) {
\r
4610 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4611 error( RtAudioError::WARNING );
\r
4615 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4616 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4618 // Check if we were draining the stream and signal is finished.
\r
4619 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4621 stream_.state = STREAM_STOPPING;
\r
4622 if ( handle->internalDrain == false )
\r
4623 SetEvent( handle->condition );
\r
4629 // Invoke user callback to get fresh output data UNLESS we are
\r
4630 // draining stream.
\r
4631 if ( handle->drainCounter == 0 ) {
\r
4632 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4633 double streamTime = getStreamTime();
\r
4634 RtAudioStreamStatus status = 0;
\r
4635 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4636 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4637 handle->xrun[0] = false;
\r
4639 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4640 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4641 handle->xrun[1] = false;
\r
4643 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4644 stream_.bufferSize, streamTime, status, info->userData );
\r
4645 if ( cbReturnValue == 2 ) {
\r
4646 stream_.state = STREAM_STOPPING;
\r
4647 handle->drainCounter = 2;
\r
4651 else if ( cbReturnValue == 1 ) {
\r
4652 handle->drainCounter = 1;
\r
4653 handle->internalDrain = true;
\r
4658 DWORD currentWritePointer, safeWritePointer;
\r
4659 DWORD currentReadPointer, safeReadPointer;
\r
4660 UINT nextWritePointer;
\r
4662 LPVOID buffer1 = NULL;
\r
4663 LPVOID buffer2 = NULL;
\r
4664 DWORD bufferSize1 = 0;
\r
4665 DWORD bufferSize2 = 0;
\r
4670 if ( buffersRolling == false ) {
\r
4671 if ( stream_.mode == DUPLEX ) {
\r
4672 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4674 // It takes a while for the devices to get rolling. As a result,
\r
4675 // there's no guarantee that the capture and write device pointers
\r
4676 // will move in lockstep. Wait here for both devices to start
\r
4677 // rolling, and then set our buffer pointers accordingly.
\r
4678 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4679 // bytes later than the write buffer.
\r
4681 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4682 // take place between the two GetCurrentPosition calls... but I'm
\r
4683 // really not sure how to solve the problem. Temporarily boost to
\r
4684 // Realtime priority, maybe; but I'm not sure what priority the
\r
4685 // DirectSound service threads run at. We *should* be roughly
\r
4686 // within a ms or so of correct.
\r
4688 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4689 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4691 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4693 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4694 if ( FAILED( result ) ) {
\r
4695 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4696 errorText_ = errorStream_.str();
\r
4697 error( RtAudioError::SYSTEM_ERROR );
\r
4700 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4701 if ( FAILED( result ) ) {
\r
4702 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4703 errorText_ = errorStream_.str();
\r
4704 error( RtAudioError::SYSTEM_ERROR );
\r
4708 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4709 if ( FAILED( result ) ) {
\r
4710 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4711 errorText_ = errorStream_.str();
\r
4712 error( RtAudioError::SYSTEM_ERROR );
\r
4715 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4716 if ( FAILED( result ) ) {
\r
4717 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4718 errorText_ = errorStream_.str();
\r
4719 error( RtAudioError::SYSTEM_ERROR );
\r
4722 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4726 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4728 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4729 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4730 handle->bufferPointer[1] = safeReadPointer;
\r
4732 else if ( stream_.mode == OUTPUT ) {
\r
4734 // Set the proper nextWritePosition after initial startup.
\r
4735 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4736 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4737 if ( FAILED( result ) ) {
\r
4738 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4739 errorText_ = errorStream_.str();
\r
4740 error( RtAudioError::SYSTEM_ERROR );
\r
4743 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4744 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4747 buffersRolling = true;
\r
4750 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4752 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4754 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4755 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4756 bufferBytes *= formatBytes( stream_.userFormat );
\r
4757 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4760 // Setup parameters and do buffer conversion if necessary.
\r
4761 if ( stream_.doConvertBuffer[0] ) {
\r
4762 buffer = stream_.deviceBuffer;
\r
4763 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4764 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4765 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4768 buffer = stream_.userBuffer[0];
\r
4769 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4770 bufferBytes *= formatBytes( stream_.userFormat );
\r
4773 // No byte swapping necessary in DirectSound implementation.
\r
4775 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4776 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4778 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4779 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4781 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4782 nextWritePointer = handle->bufferPointer[0];
\r
4784 DWORD endWrite, leadPointer;
\r
4786 // Find out where the read and "safe write" pointers are.
\r
4787 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4788 if ( FAILED( result ) ) {
\r
4789 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4790 errorText_ = errorStream_.str();
\r
4791 error( RtAudioError::SYSTEM_ERROR );
\r
4795 // We will copy our output buffer into the region between
\r
4796 // safeWritePointer and leadPointer. If leadPointer is not
\r
4797 // beyond the next endWrite position, wait until it is.
\r
4798 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4799 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4800 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4801 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4802 endWrite = nextWritePointer + bufferBytes;
\r
4804 // Check whether the entire write region is behind the play pointer.
\r
4805 if ( leadPointer >= endWrite ) break;
\r
4807 // If we are here, then we must wait until the leadPointer advances
\r
4808 // beyond the end of our next write region. We use the
\r
4809 // Sleep() function to suspend operation until that happens.
\r
4810 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4811 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4812 if ( millis < 1.0 ) millis = 1.0;
\r
4813 Sleep( (DWORD) millis );
\r
4816 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4817 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4818 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4819 handle->xrun[0] = true;
\r
4820 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4821 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4822 handle->bufferPointer[0] = nextWritePointer;
\r
4823 endWrite = nextWritePointer + bufferBytes;
\r
4826 // Lock free space in the buffer
\r
4827 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4828 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4829 if ( FAILED( result ) ) {
\r
4830 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4831 errorText_ = errorStream_.str();
\r
4832 error( RtAudioError::SYSTEM_ERROR );
\r
4836 // Copy our buffer into the DS buffer
\r
4837 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4838 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4840 // Update our buffer offset and unlock sound buffer
\r
4841 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4842 if ( FAILED( result ) ) {
\r
4843 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4844 errorText_ = errorStream_.str();
\r
4845 error( RtAudioError::SYSTEM_ERROR );
\r
4848 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4849 handle->bufferPointer[0] = nextWritePointer;
\r
4851 if ( handle->drainCounter ) {
\r
4852 handle->drainCounter++;
\r
4857 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4859 // Setup parameters.
\r
4860 if ( stream_.doConvertBuffer[1] ) {
\r
4861 buffer = stream_.deviceBuffer;
\r
4862 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4863 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4866 buffer = stream_.userBuffer[1];
\r
4867 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4868 bufferBytes *= formatBytes( stream_.userFormat );
\r
4871 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4872 long nextReadPointer = handle->bufferPointer[1];
\r
4873 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4875 // Find out where the write and "safe read" pointers are.
\r
4876 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4877 if ( FAILED( result ) ) {
\r
4878 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4879 errorText_ = errorStream_.str();
\r
4880 error( RtAudioError::SYSTEM_ERROR );
\r
4884 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4885 DWORD endRead = nextReadPointer + bufferBytes;
\r
4887 // Handling depends on whether we are INPUT or DUPLEX.
\r
4888 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4889 // then a wait here will drag the write pointers into the forbidden zone.
\r
4891 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4892 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4893 // practical way to sync up the read and write pointers reliably, given the
\r
4894 // the very complex relationship between phase and increment of the read and write
\r
4897 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4898 // provide a pre-roll period of 0.5 seconds in which we return
\r
4899 // zeros from the read buffer while the pointers sync up.
\r
4901 if ( stream_.mode == DUPLEX ) {
\r
4902 if ( safeReadPointer < endRead ) {
\r
4903 if ( duplexPrerollBytes <= 0 ) {
\r
4904 // Pre-roll time over. Be more agressive.
\r
4905 int adjustment = endRead-safeReadPointer;
\r
4907 handle->xrun[1] = true;
\r
4909 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4910 // and perform fine adjustments later.
\r
4911 // - small adjustments: back off by twice as much.
\r
4912 if ( adjustment >= 2*bufferBytes )
\r
4913 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4915 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4917 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4921 // In pre=roll time. Just do it.
\r
4922 nextReadPointer = safeReadPointer - bufferBytes;
\r
4923 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4925 endRead = nextReadPointer + bufferBytes;
\r
4928 else { // mode == INPUT
\r
4929 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4930 // See comments for playback.
\r
4931 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4932 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4933 if ( millis < 1.0 ) millis = 1.0;
\r
4934 Sleep( (DWORD) millis );
\r
4936 // Wake up and find out where we are now.
\r
4937 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4938 if ( FAILED( result ) ) {
\r
4939 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4940 errorText_ = errorStream_.str();
\r
4941 error( RtAudioError::SYSTEM_ERROR );
\r
4945 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4949 // Lock free space in the buffer
\r
4950 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4951 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4952 if ( FAILED( result ) ) {
\r
4953 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4954 errorText_ = errorStream_.str();
\r
4955 error( RtAudioError::SYSTEM_ERROR );
\r
4959 if ( duplexPrerollBytes <= 0 ) {
\r
4960 // Copy our buffer into the DS buffer
\r
4961 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4962 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4965 memset( buffer, 0, bufferSize1 );
\r
4966 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4967 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4970 // Update our buffer offset and unlock sound buffer
\r
4971 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4972 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4973 if ( FAILED( result ) ) {
\r
4974 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4975 errorText_ = errorStream_.str();
\r
4976 error( RtAudioError::SYSTEM_ERROR );
\r
4979 handle->bufferPointer[1] = nextReadPointer;
\r
4981 // No byte swapping necessary in DirectSound implementation.
\r
4983 // If necessary, convert 8-bit data from unsigned to signed.
\r
4984 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4985 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4987 // Do buffer conversion if necessary.
\r
4988 if ( stream_.doConvertBuffer[1] )
\r
4989 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4993 RtApi::tickStreamTime();
\r
4996 // Definitions for utility functions and callbacks
\r
4997 // specific to the DirectSound implementation.
\r
4999 static unsigned __stdcall callbackHandler( void *ptr )
\r
5001 CallbackInfo *info = (CallbackInfo *) ptr;
\r
5002 RtApiDs *object = (RtApiDs *) info->object;
\r
5003 bool* isRunning = &info->isRunning;
\r
5005 while ( *isRunning == true ) {
\r
5006 object->callbackEvent();
\r
5009 _endthreadex( 0 );
\r
5013 #include "tchar.h"
\r
5015 static std::string convertTChar( LPCTSTR name )
\r
5017 #if defined( UNICODE ) || defined( _UNICODE )
\r
5018 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
5019 std::string s( length-1, '\0' );
\r
5020 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
5022 std::string s( name );
\r
5028 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5029 LPCTSTR description,
\r
5031 LPVOID lpContext )
\r
5033 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
5034 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
5037 bool validDevice = false;
\r
5038 if ( probeInfo.isInput == true ) {
\r
5040 LPDIRECTSOUNDCAPTURE object;
\r
5042 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5043 if ( hr != DS_OK ) return TRUE;
\r
5045 caps.dwSize = sizeof(caps);
\r
5046 hr = object->GetCaps( &caps );
\r
5047 if ( hr == DS_OK ) {
\r
5048 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5049 validDevice = true;
\r
5051 object->Release();
\r
5055 LPDIRECTSOUND object;
\r
5056 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5057 if ( hr != DS_OK ) return TRUE;
\r
5059 caps.dwSize = sizeof(caps);
\r
5060 hr = object->GetCaps( &caps );
\r
5061 if ( hr == DS_OK ) {
\r
5062 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5063 validDevice = true;
\r
5065 object->Release();
\r
5068 // If good device, then save its name and guid.
\r
5069 std::string name = convertTChar( description );
\r
5070 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5071 if ( lpguid == NULL )
\r
5072 name = "Default Device";
\r
5073 if ( validDevice ) {
\r
5074 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5075 if ( dsDevices[i].name == name ) {
\r
5076 dsDevices[i].found = true;
\r
5077 if ( probeInfo.isInput ) {
\r
5078 dsDevices[i].id[1] = lpguid;
\r
5079 dsDevices[i].validId[1] = true;
\r
5082 dsDevices[i].id[0] = lpguid;
\r
5083 dsDevices[i].validId[0] = true;
\r
5090 device.name = name;
\r
5091 device.found = true;
\r
5092 if ( probeInfo.isInput ) {
\r
5093 device.id[1] = lpguid;
\r
5094 device.validId[1] = true;
\r
5097 device.id[0] = lpguid;
\r
5098 device.validId[0] = true;
\r
5100 dsDevices.push_back( device );
\r
5106 static const char* getErrorString( int code )
\r
5110 case DSERR_ALLOCATED:
\r
5111 return "Already allocated";
\r
5113 case DSERR_CONTROLUNAVAIL:
\r
5114 return "Control unavailable";
\r
5116 case DSERR_INVALIDPARAM:
\r
5117 return "Invalid parameter";
\r
5119 case DSERR_INVALIDCALL:
\r
5120 return "Invalid call";
\r
5122 case DSERR_GENERIC:
\r
5123 return "Generic error";
\r
5125 case DSERR_PRIOLEVELNEEDED:
\r
5126 return "Priority level needed";
\r
5128 case DSERR_OUTOFMEMORY:
\r
5129 return "Out of memory";
\r
5131 case DSERR_BADFORMAT:
\r
5132 return "The sample rate or the channel format is not supported";
\r
5134 case DSERR_UNSUPPORTED:
\r
5135 return "Not supported";
\r
5137 case DSERR_NODRIVER:
\r
5138 return "No driver";
\r
5140 case DSERR_ALREADYINITIALIZED:
\r
5141 return "Already initialized";
\r
5143 case DSERR_NOAGGREGATION:
\r
5144 return "No aggregation";
\r
5146 case DSERR_BUFFERLOST:
\r
5147 return "Buffer lost";
\r
5149 case DSERR_OTHERAPPHASPRIO:
\r
5150 return "Another application already has priority";
\r
5152 case DSERR_UNINITIALIZED:
\r
5153 return "Uninitialized";
\r
5156 return "DirectSound unknown error";
\r
5159 //******************** End of __WINDOWS_DS__ *********************//
\r
5163 #if defined(__LINUX_ALSA__)
\r
5165 #include <alsa/asoundlib.h>
\r
5166 #include <unistd.h>
\r
5168 // A structure to hold various information related to the ALSA API
\r
5169 // implementation.
\r
5170 struct AlsaHandle {
\r
5171 snd_pcm_t *handles[2];
\r
5172 bool synchronized;
\r
5174 pthread_cond_t runnable_cv;
\r
5178 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5181 static void *alsaCallbackHandler( void * ptr );
\r
5183 RtApiAlsa :: RtApiAlsa()
\r
5185 // Nothing to do here.
\r
5188 RtApiAlsa :: ~RtApiAlsa()
\r
5190 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5193 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5195 unsigned nDevices = 0;
\r
5196 int result, subdevice, card;
\r
5198 snd_ctl_t *handle;
\r
5200 // Count cards and devices
\r
5202 snd_card_next( &card );
\r
5203 while ( card >= 0 ) {
\r
5204 sprintf( name, "hw:%d", card );
\r
5205 result = snd_ctl_open( &handle, name, 0 );
\r
5206 if ( result < 0 ) {
\r
5207 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5208 errorText_ = errorStream_.str();
\r
5209 error( RtAudioError::WARNING );
\r
5214 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5215 if ( result < 0 ) {
\r
5216 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5217 errorText_ = errorStream_.str();
\r
5218 error( RtAudioError::WARNING );
\r
5221 if ( subdevice < 0 )
\r
5226 snd_ctl_close( handle );
\r
5227 snd_card_next( &card );
\r
5230 result = snd_ctl_open( &handle, "default", 0 );
\r
5231 if (result == 0) {
\r
5233 snd_ctl_close( handle );
\r
5239 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5241 RtAudio::DeviceInfo info;
\r
5242 info.probed = false;
\r
5244 unsigned nDevices = 0;
\r
5245 int result, subdevice, card;
\r
5247 snd_ctl_t *chandle;
\r
5249 // Count cards and devices
\r
5251 snd_card_next( &card );
\r
5252 while ( card >= 0 ) {
\r
5253 sprintf( name, "hw:%d", card );
\r
5254 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5255 if ( result < 0 ) {
\r
5256 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5257 errorText_ = errorStream_.str();
\r
5258 error( RtAudioError::WARNING );
\r
5263 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5264 if ( result < 0 ) {
\r
5265 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5266 errorText_ = errorStream_.str();
\r
5267 error( RtAudioError::WARNING );
\r
5270 if ( subdevice < 0 ) break;
\r
5271 if ( nDevices == device ) {
\r
5272 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5278 snd_ctl_close( chandle );
\r
5279 snd_card_next( &card );
\r
5282 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5283 if ( result == 0 ) {
\r
5284 if ( nDevices == device ) {
\r
5285 strcpy( name, "default" );
\r
5291 if ( nDevices == 0 ) {
\r
5292 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5293 error( RtAudioError::INVALID_USE );
\r
5297 if ( device >= nDevices ) {
\r
5298 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5299 error( RtAudioError::INVALID_USE );
\r
5305 // If a stream is already open, we cannot probe the stream devices.
\r
5306 // Thus, use the saved results.
\r
5307 if ( stream_.state != STREAM_CLOSED &&
\r
5308 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5309 snd_ctl_close( chandle );
\r
5310 if ( device >= devices_.size() ) {
\r
5311 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5312 error( RtAudioError::WARNING );
\r
5315 return devices_[ device ];
\r
5318 int openMode = SND_PCM_ASYNC;
\r
5319 snd_pcm_stream_t stream;
\r
5320 snd_pcm_info_t *pcminfo;
\r
5321 snd_pcm_info_alloca( &pcminfo );
\r
5322 snd_pcm_t *phandle;
\r
5323 snd_pcm_hw_params_t *params;
\r
5324 snd_pcm_hw_params_alloca( ¶ms );
\r
5326 // First try for playback unless default device (which has subdev -1)
\r
5327 stream = SND_PCM_STREAM_PLAYBACK;
\r
5328 snd_pcm_info_set_stream( pcminfo, stream );
\r
5329 if ( subdevice != -1 ) {
\r
5330 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5331 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5333 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5334 if ( result < 0 ) {
\r
5335 // Device probably doesn't support playback.
\r
5336 goto captureProbe;
\r
5340 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5341 if ( result < 0 ) {
\r
5342 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5343 errorText_ = errorStream_.str();
\r
5344 error( RtAudioError::WARNING );
\r
5345 goto captureProbe;
\r
5348 // The device is open ... fill the parameter structure.
\r
5349 result = snd_pcm_hw_params_any( phandle, params );
\r
5350 if ( result < 0 ) {
\r
5351 snd_pcm_close( phandle );
\r
5352 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5353 errorText_ = errorStream_.str();
\r
5354 error( RtAudioError::WARNING );
\r
5355 goto captureProbe;
\r
5358 // Get output channel information.
\r
5359 unsigned int value;
\r
5360 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5361 if ( result < 0 ) {
\r
5362 snd_pcm_close( phandle );
\r
5363 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5364 errorText_ = errorStream_.str();
\r
5365 error( RtAudioError::WARNING );
\r
5366 goto captureProbe;
\r
5368 info.outputChannels = value;
\r
5369 snd_pcm_close( phandle );
\r
5372 stream = SND_PCM_STREAM_CAPTURE;
\r
5373 snd_pcm_info_set_stream( pcminfo, stream );
\r
5375 // Now try for capture unless default device (with subdev = -1)
\r
5376 if ( subdevice != -1 ) {
\r
5377 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5378 snd_ctl_close( chandle );
\r
5379 if ( result < 0 ) {
\r
5380 // Device probably doesn't support capture.
\r
5381 if ( info.outputChannels == 0 ) return info;
\r
5382 goto probeParameters;
\r
5386 snd_ctl_close( chandle );
\r
5388 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5389 if ( result < 0 ) {
\r
5390 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5391 errorText_ = errorStream_.str();
\r
5392 error( RtAudioError::WARNING );
\r
5393 if ( info.outputChannels == 0 ) return info;
\r
5394 goto probeParameters;
\r
5397 // The device is open ... fill the parameter structure.
\r
5398 result = snd_pcm_hw_params_any( phandle, params );
\r
5399 if ( result < 0 ) {
\r
5400 snd_pcm_close( phandle );
\r
5401 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5402 errorText_ = errorStream_.str();
\r
5403 error( RtAudioError::WARNING );
\r
5404 if ( info.outputChannels == 0 ) return info;
\r
5405 goto probeParameters;
\r
5408 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5409 if ( result < 0 ) {
\r
5410 snd_pcm_close( phandle );
\r
5411 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5412 errorText_ = errorStream_.str();
\r
5413 error( RtAudioError::WARNING );
\r
5414 if ( info.outputChannels == 0 ) return info;
\r
5415 goto probeParameters;
\r
5417 info.inputChannels = value;
\r
5418 snd_pcm_close( phandle );
\r
5420 // If device opens for both playback and capture, we determine the channels.
\r
5421 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5422 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5424 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5425 if ( device == 0 && info.outputChannels > 0 )
\r
5426 info.isDefaultOutput = true;
\r
5427 if ( device == 0 && info.inputChannels > 0 )
\r
5428 info.isDefaultInput = true;
\r
5431 // At this point, we just need to figure out the supported data
\r
5432 // formats and sample rates. We'll proceed by opening the device in
\r
5433 // the direction with the maximum number of channels, or playback if
\r
5434 // they are equal. This might limit our sample rate options, but so
\r
5437 if ( info.outputChannels >= info.inputChannels )
\r
5438 stream = SND_PCM_STREAM_PLAYBACK;
\r
5440 stream = SND_PCM_STREAM_CAPTURE;
\r
5441 snd_pcm_info_set_stream( pcminfo, stream );
\r
5443 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5444 if ( result < 0 ) {
\r
5445 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5446 errorText_ = errorStream_.str();
\r
5447 error( RtAudioError::WARNING );
\r
5451 // The device is open ... fill the parameter structure.
\r
5452 result = snd_pcm_hw_params_any( phandle, params );
\r
5453 if ( result < 0 ) {
\r
5454 snd_pcm_close( phandle );
\r
5455 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5456 errorText_ = errorStream_.str();
\r
5457 error( RtAudioError::WARNING );
\r
5461 // Test our discrete set of sample rate values.
\r
5462 info.sampleRates.clear();
\r
5463 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5464 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5465 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5467 if ( info.sampleRates.size() == 0 ) {
\r
5468 snd_pcm_close( phandle );
\r
5469 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5470 errorText_ = errorStream_.str();
\r
5471 error( RtAudioError::WARNING );
\r
5475 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5476 snd_pcm_format_t format;
\r
5477 info.nativeFormats = 0;
\r
5478 format = SND_PCM_FORMAT_S8;
\r
5479 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5480 info.nativeFormats |= RTAUDIO_SINT8;
\r
5481 format = SND_PCM_FORMAT_S16;
\r
5482 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5483 info.nativeFormats |= RTAUDIO_SINT16;
\r
5484 format = SND_PCM_FORMAT_S24;
\r
5485 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5486 info.nativeFormats |= RTAUDIO_SINT24;
\r
5487 format = SND_PCM_FORMAT_S32;
\r
5488 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5489 info.nativeFormats |= RTAUDIO_SINT32;
\r
5490 format = SND_PCM_FORMAT_FLOAT;
\r
5491 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5492 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5493 format = SND_PCM_FORMAT_FLOAT64;
\r
5494 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5495 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5497 // Check that we have at least one supported format
\r
5498 if ( info.nativeFormats == 0 ) {
\r
5499 snd_pcm_close( phandle );
\r
5500 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5501 errorText_ = errorStream_.str();
\r
5502 error( RtAudioError::WARNING );
\r
5506 // Get the device name
\r
5508 result = snd_card_get_name( card, &cardname );
\r
5509 if ( result >= 0 ) {
\r
5510 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5515 // That's all ... close the device and return
\r
5516 snd_pcm_close( phandle );
\r
5517 info.probed = true;
\r
5521 void RtApiAlsa :: saveDeviceInfo( void )
\r
5525 unsigned int nDevices = getDeviceCount();
\r
5526 devices_.resize( nDevices );
\r
5527 for ( unsigned int i=0; i<nDevices; i++ )
\r
5528 devices_[i] = getDeviceInfo( i );
\r
5531 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5532 unsigned int firstChannel, unsigned int sampleRate,
\r
5533 RtAudioFormat format, unsigned int *bufferSize,
\r
5534 RtAudio::StreamOptions *options )
\r
5537 #if defined(__RTAUDIO_DEBUG__)
\r
5538 snd_output_t *out;
\r
5539 snd_output_stdio_attach(&out, stderr, 0);
\r
5542 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5544 unsigned nDevices = 0;
\r
5545 int result, subdevice, card;
\r
5547 snd_ctl_t *chandle;
\r
5549 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5550 snprintf(name, sizeof(name), "%s", "default");
\r
5552 // Count cards and devices
\r
5554 snd_card_next( &card );
\r
5555 while ( card >= 0 ) {
\r
5556 sprintf( name, "hw:%d", card );
\r
5557 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5558 if ( result < 0 ) {
\r
5559 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5560 errorText_ = errorStream_.str();
\r
5565 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5566 if ( result < 0 ) break;
\r
5567 if ( subdevice < 0 ) break;
\r
5568 if ( nDevices == device ) {
\r
5569 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5570 snd_ctl_close( chandle );
\r
5575 snd_ctl_close( chandle );
\r
5576 snd_card_next( &card );
\r
5579 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5580 if ( result == 0 ) {
\r
5581 if ( nDevices == device ) {
\r
5582 strcpy( name, "default" );
\r
5588 if ( nDevices == 0 ) {
\r
5589 // This should not happen because a check is made before this function is called.
\r
5590 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5594 if ( device >= nDevices ) {
\r
5595 // This should not happen because a check is made before this function is called.
\r
5596 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5603 // The getDeviceInfo() function will not work for a device that is
\r
5604 // already open. Thus, we'll probe the system before opening a
\r
5605 // stream and save the results for use by getDeviceInfo().
\r
5606 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5607 this->saveDeviceInfo();
\r
5609 snd_pcm_stream_t stream;
\r
5610 if ( mode == OUTPUT )
\r
5611 stream = SND_PCM_STREAM_PLAYBACK;
\r
5613 stream = SND_PCM_STREAM_CAPTURE;
\r
5615 snd_pcm_t *phandle;
\r
5616 int openMode = SND_PCM_ASYNC;
\r
5617 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5618 if ( result < 0 ) {
\r
5619 if ( mode == OUTPUT )
\r
5620 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5622 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5623 errorText_ = errorStream_.str();
\r
5627 // Fill the parameter structure.
\r
5628 snd_pcm_hw_params_t *hw_params;
\r
5629 snd_pcm_hw_params_alloca( &hw_params );
\r
5630 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5631 if ( result < 0 ) {
\r
5632 snd_pcm_close( phandle );
\r
5633 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5634 errorText_ = errorStream_.str();
\r
5638 #if defined(__RTAUDIO_DEBUG__)
\r
5639 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5640 snd_pcm_hw_params_dump( hw_params, out );
\r
5643 // Set access ... check user preference.
\r
5644 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5645 stream_.userInterleaved = false;
\r
5646 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5647 if ( result < 0 ) {
\r
5648 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5649 stream_.deviceInterleaved[mode] = true;
\r
5652 stream_.deviceInterleaved[mode] = false;
\r
5655 stream_.userInterleaved = true;
\r
5656 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5657 if ( result < 0 ) {
\r
5658 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5659 stream_.deviceInterleaved[mode] = false;
\r
5662 stream_.deviceInterleaved[mode] = true;
\r
5665 if ( result < 0 ) {
\r
5666 snd_pcm_close( phandle );
\r
5667 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5668 errorText_ = errorStream_.str();
\r
5672 // Determine how to set the device format.
\r
5673 stream_.userFormat = format;
\r
5674 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5676 if ( format == RTAUDIO_SINT8 )
\r
5677 deviceFormat = SND_PCM_FORMAT_S8;
\r
5678 else if ( format == RTAUDIO_SINT16 )
\r
5679 deviceFormat = SND_PCM_FORMAT_S16;
\r
5680 else if ( format == RTAUDIO_SINT24 )
\r
5681 deviceFormat = SND_PCM_FORMAT_S24;
\r
5682 else if ( format == RTAUDIO_SINT32 )
\r
5683 deviceFormat = SND_PCM_FORMAT_S32;
\r
5684 else if ( format == RTAUDIO_FLOAT32 )
\r
5685 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5686 else if ( format == RTAUDIO_FLOAT64 )
\r
5687 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5689 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5690 stream_.deviceFormat[mode] = format;
\r
5694 // The user requested format is not natively supported by the device.
\r
5695 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5696 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5697 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5701 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5702 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5703 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5707 deviceFormat = SND_PCM_FORMAT_S32;
\r
5708 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5709 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5713 deviceFormat = SND_PCM_FORMAT_S24;
\r
5714 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5715 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5719 deviceFormat = SND_PCM_FORMAT_S16;
\r
5720 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5721 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5725 deviceFormat = SND_PCM_FORMAT_S8;
\r
5726 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5727 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5731 // If we get here, no supported format was found.
\r
5732 snd_pcm_close( phandle );
\r
5733 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5734 errorText_ = errorStream_.str();
\r
5738 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5739 if ( result < 0 ) {
\r
5740 snd_pcm_close( phandle );
\r
5741 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5742 errorText_ = errorStream_.str();
\r
5746 // Determine whether byte-swaping is necessary.
\r
5747 stream_.doByteSwap[mode] = false;
\r
5748 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5749 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5750 if ( result == 0 )
\r
5751 stream_.doByteSwap[mode] = true;
\r
5752 else if (result < 0) {
\r
5753 snd_pcm_close( phandle );
\r
5754 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5755 errorText_ = errorStream_.str();
\r
5760 // Set the sample rate.
\r
5761 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5762 if ( result < 0 ) {
\r
5763 snd_pcm_close( phandle );
\r
5764 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5765 errorText_ = errorStream_.str();
\r
5769 // Determine the number of channels for this device. We support a possible
\r
5770 // minimum device channel number > than the value requested by the user.
\r
5771 stream_.nUserChannels[mode] = channels;
\r
5772 unsigned int value;
\r
5773 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5774 unsigned int deviceChannels = value;
\r
5775 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5776 snd_pcm_close( phandle );
\r
5777 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5778 errorText_ = errorStream_.str();
\r
5782 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5783 if ( result < 0 ) {
\r
5784 snd_pcm_close( phandle );
\r
5785 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5786 errorText_ = errorStream_.str();
\r
5789 deviceChannels = value;
\r
5790 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5791 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5793 // Set the device channels.
\r
5794 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5795 if ( result < 0 ) {
\r
5796 snd_pcm_close( phandle );
\r
5797 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5798 errorText_ = errorStream_.str();
\r
5802 // Set the buffer (or period) size.
\r
5804 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5805 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5806 if ( result < 0 ) {
\r
5807 snd_pcm_close( phandle );
\r
5808 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5809 errorText_ = errorStream_.str();
\r
5812 *bufferSize = periodSize;
\r
5814 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5815 unsigned int periods = 0;
\r
5816 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5817 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5818 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5819 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5820 if ( result < 0 ) {
\r
5821 snd_pcm_close( phandle );
\r
5822 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5823 errorText_ = errorStream_.str();
\r
5827 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5828 // MUST be the same in both directions!
\r
5829 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5830 snd_pcm_close( phandle );
\r
5831 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5832 errorText_ = errorStream_.str();
\r
5836 stream_.bufferSize = *bufferSize;
\r
5838 // Install the hardware configuration
\r
5839 result = snd_pcm_hw_params( phandle, hw_params );
\r
5840 if ( result < 0 ) {
\r
5841 snd_pcm_close( phandle );
\r
5842 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5843 errorText_ = errorStream_.str();
\r
5847 #if defined(__RTAUDIO_DEBUG__)
\r
5848 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5849 snd_pcm_hw_params_dump( hw_params, out );
\r
5852 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5853 snd_pcm_sw_params_t *sw_params = NULL;
\r
5854 snd_pcm_sw_params_alloca( &sw_params );
\r
5855 snd_pcm_sw_params_current( phandle, sw_params );
\r
5856 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5857 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5858 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5860 // The following two settings were suggested by Theo Veenker
\r
5861 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5862 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5864 // here are two options for a fix
\r
5865 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5866 snd_pcm_uframes_t val;
\r
5867 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5868 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5870 result = snd_pcm_sw_params( phandle, sw_params );
\r
5871 if ( result < 0 ) {
\r
5872 snd_pcm_close( phandle );
\r
5873 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5874 errorText_ = errorStream_.str();
\r
5878 #if defined(__RTAUDIO_DEBUG__)
\r
5879 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5880 snd_pcm_sw_params_dump( sw_params, out );
\r
5883 // Set flags for buffer conversion
\r
5884 stream_.doConvertBuffer[mode] = false;
\r
5885 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5886 stream_.doConvertBuffer[mode] = true;
\r
5887 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5888 stream_.doConvertBuffer[mode] = true;
\r
5889 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5890 stream_.nUserChannels[mode] > 1 )
\r
5891 stream_.doConvertBuffer[mode] = true;
\r
5893 // Allocate the ApiHandle if necessary and then save.
\r
5894 AlsaHandle *apiInfo = 0;
\r
5895 if ( stream_.apiHandle == 0 ) {
\r
5897 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5899 catch ( std::bad_alloc& ) {
\r
5900 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5904 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5905 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5909 stream_.apiHandle = (void *) apiInfo;
\r
5910 apiInfo->handles[0] = 0;
\r
5911 apiInfo->handles[1] = 0;
\r
5914 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5916 apiInfo->handles[mode] = phandle;
\r
5919 // Allocate necessary internal buffers.
\r
5920 unsigned long bufferBytes;
\r
5921 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5922 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5923 if ( stream_.userBuffer[mode] == NULL ) {
\r
5924 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5928 if ( stream_.doConvertBuffer[mode] ) {
\r
5930 bool makeBuffer = true;
\r
5931 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5932 if ( mode == INPUT ) {
\r
5933 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5934 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5935 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5939 if ( makeBuffer ) {
\r
5940 bufferBytes *= *bufferSize;
\r
5941 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5942 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5943 if ( stream_.deviceBuffer == NULL ) {
\r
5944 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5950 stream_.sampleRate = sampleRate;
\r
5951 stream_.nBuffers = periods;
\r
5952 stream_.device[mode] = device;
\r
5953 stream_.state = STREAM_STOPPED;
\r
5955 // Setup the buffer conversion information structure.
\r
5956 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5958 // Setup thread if necessary.
\r
5959 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5960 // We had already set up an output stream.
\r
5961 stream_.mode = DUPLEX;
\r
5962 // Link the streams if possible.
\r
5963 apiInfo->synchronized = false;
\r
5964 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5965 apiInfo->synchronized = true;
\r
5967 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5968 error( RtAudioError::WARNING );
\r
5972 stream_.mode = mode;
\r
5974 // Setup callback thread.
\r
5975 stream_.callbackInfo.object = (void *) this;
\r
5977 // Set the thread attributes for joinable and realtime scheduling
\r
5978 // priority (optional). The higher priority will only take affect
\r
5979 // if the program is run as root or suid. Note, under Linux
\r
5980 // processes with CAP_SYS_NICE privilege, a user can change
\r
5981 // scheduling policy and priority (thus need not be root). See
\r
5982 // POSIX "capabilities".
\r
5983 pthread_attr_t attr;
\r
5984 pthread_attr_init( &attr );
\r
5985 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5987 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5988 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5989 // We previously attempted to increase the audio callback priority
\r
5990 // to SCHED_RR here via the attributes. However, while no errors
\r
5991 // were reported in doing so, it did not work. So, now this is
\r
5992 // done in the alsaCallbackHandler function.
\r
5993 stream_.callbackInfo.doRealtime = true;
\r
5994 int priority = options->priority;
\r
5995 int min = sched_get_priority_min( SCHED_RR );
\r
5996 int max = sched_get_priority_max( SCHED_RR );
\r
5997 if ( priority < min ) priority = min;
\r
5998 else if ( priority > max ) priority = max;
\r
5999 stream_.callbackInfo.priority = priority;
\r
6003 stream_.callbackInfo.isRunning = true;
\r
6004 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
6005 pthread_attr_destroy( &attr );
\r
6007 stream_.callbackInfo.isRunning = false;
\r
6008 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
6017 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6018 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6019 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6021 stream_.apiHandle = 0;
\r
6024 if ( phandle) snd_pcm_close( phandle );
\r
6026 for ( int i=0; i<2; i++ ) {
\r
6027 if ( stream_.userBuffer[i] ) {
\r
6028 free( stream_.userBuffer[i] );
\r
6029 stream_.userBuffer[i] = 0;
\r
6033 if ( stream_.deviceBuffer ) {
\r
6034 free( stream_.deviceBuffer );
\r
6035 stream_.deviceBuffer = 0;
\r
6038 stream_.state = STREAM_CLOSED;
\r
6042 void RtApiAlsa :: closeStream()
\r
6044 if ( stream_.state == STREAM_CLOSED ) {
\r
6045 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6046 error( RtAudioError::WARNING );
\r
6050 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6051 stream_.callbackInfo.isRunning = false;
\r
6052 MUTEX_LOCK( &stream_.mutex );
\r
6053 if ( stream_.state == STREAM_STOPPED ) {
\r
6054 apiInfo->runnable = true;
\r
6055 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6057 MUTEX_UNLOCK( &stream_.mutex );
\r
6058 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6060 if ( stream_.state == STREAM_RUNNING ) {
\r
6061 stream_.state = STREAM_STOPPED;
\r
6062 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6063 snd_pcm_drop( apiInfo->handles[0] );
\r
6064 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6065 snd_pcm_drop( apiInfo->handles[1] );
\r
6069 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6070 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6071 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6073 stream_.apiHandle = 0;
\r
6076 for ( int i=0; i<2; i++ ) {
\r
6077 if ( stream_.userBuffer[i] ) {
\r
6078 free( stream_.userBuffer[i] );
\r
6079 stream_.userBuffer[i] = 0;
\r
6083 if ( stream_.deviceBuffer ) {
\r
6084 free( stream_.deviceBuffer );
\r
6085 stream_.deviceBuffer = 0;
\r
6088 stream_.mode = UNINITIALIZED;
\r
6089 stream_.state = STREAM_CLOSED;
\r
6092 void RtApiAlsa :: startStream()
\r
6094 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6097 if ( stream_.state == STREAM_RUNNING ) {
\r
6098 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6099 error( RtAudioError::WARNING );
\r
6103 MUTEX_LOCK( &stream_.mutex );
\r
6106 snd_pcm_state_t state;
\r
6107 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6108 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6109 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6110 state = snd_pcm_state( handle[0] );
\r
6111 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6112 result = snd_pcm_prepare( handle[0] );
\r
6113 if ( result < 0 ) {
\r
6114 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6115 errorText_ = errorStream_.str();
\r
6121 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6122 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
6123 state = snd_pcm_state( handle[1] );
\r
6124 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6125 result = snd_pcm_prepare( handle[1] );
\r
6126 if ( result < 0 ) {
\r
6127 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6128 errorText_ = errorStream_.str();
\r
6134 stream_.state = STREAM_RUNNING;
\r
6137 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6138 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6139 MUTEX_UNLOCK( &stream_.mutex );
\r
6141 if ( result >= 0 ) return;
\r
6142 error( RtAudioError::SYSTEM_ERROR );
\r
6145 void RtApiAlsa :: stopStream()
\r
6148 if ( stream_.state == STREAM_STOPPED ) {
\r
6149 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6150 error( RtAudioError::WARNING );
\r
6154 stream_.state = STREAM_STOPPED;
\r
6155 MUTEX_LOCK( &stream_.mutex );
\r
6158 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6159 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6160 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6161 if ( apiInfo->synchronized )
\r
6162 result = snd_pcm_drop( handle[0] );
\r
6164 result = snd_pcm_drain( handle[0] );
\r
6165 if ( result < 0 ) {
\r
6166 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6167 errorText_ = errorStream_.str();
\r
6172 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6173 result = snd_pcm_drop( handle[1] );
\r
6174 if ( result < 0 ) {
\r
6175 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6176 errorText_ = errorStream_.str();
\r
6182 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6183 MUTEX_UNLOCK( &stream_.mutex );
\r
6185 if ( result >= 0 ) return;
\r
6186 error( RtAudioError::SYSTEM_ERROR );
\r
6189 void RtApiAlsa :: abortStream()
\r
6192 if ( stream_.state == STREAM_STOPPED ) {
\r
6193 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6194 error( RtAudioError::WARNING );
\r
6198 stream_.state = STREAM_STOPPED;
\r
6199 MUTEX_LOCK( &stream_.mutex );
\r
6202 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6203 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6204 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6205 result = snd_pcm_drop( handle[0] );
\r
6206 if ( result < 0 ) {
\r
6207 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6208 errorText_ = errorStream_.str();
\r
6213 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6214 result = snd_pcm_drop( handle[1] );
\r
6215 if ( result < 0 ) {
\r
6216 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6217 errorText_ = errorStream_.str();
\r
6223 MUTEX_UNLOCK( &stream_.mutex );
\r
6225 if ( result >= 0 ) return;
\r
6226 error( RtAudioError::SYSTEM_ERROR );
\r
6229 void RtApiAlsa :: callbackEvent()
\r
6231 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6232 if ( stream_.state == STREAM_STOPPED ) {
\r
6233 MUTEX_LOCK( &stream_.mutex );
\r
6234 while ( !apiInfo->runnable )
\r
6235 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6237 if ( stream_.state != STREAM_RUNNING ) {
\r
6238 MUTEX_UNLOCK( &stream_.mutex );
\r
6241 MUTEX_UNLOCK( &stream_.mutex );
\r
6244 if ( stream_.state == STREAM_CLOSED ) {
\r
6245 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6246 error( RtAudioError::WARNING );
\r
6250 int doStopStream = 0;
\r
6251 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6252 double streamTime = getStreamTime();
\r
6253 RtAudioStreamStatus status = 0;
\r
6254 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6255 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6256 apiInfo->xrun[0] = false;
\r
6258 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6259 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6260 apiInfo->xrun[1] = false;
\r
6262 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6263 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6265 if ( doStopStream == 2 ) {
\r
6270 MUTEX_LOCK( &stream_.mutex );
\r
6272 // The state might change while waiting on a mutex.
\r
6273 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6278 snd_pcm_t **handle;
\r
6279 snd_pcm_sframes_t frames;
\r
6280 RtAudioFormat format;
\r
6281 handle = (snd_pcm_t **) apiInfo->handles;
\r
6283 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6285 // Setup parameters.
\r
6286 if ( stream_.doConvertBuffer[1] ) {
\r
6287 buffer = stream_.deviceBuffer;
\r
6288 channels = stream_.nDeviceChannels[1];
\r
6289 format = stream_.deviceFormat[1];
\r
6292 buffer = stream_.userBuffer[1];
\r
6293 channels = stream_.nUserChannels[1];
\r
6294 format = stream_.userFormat;
\r
6297 // Read samples from device in interleaved/non-interleaved format.
\r
6298 if ( stream_.deviceInterleaved[1] )
\r
6299 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6301 void *bufs[channels];
\r
6302 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6303 for ( int i=0; i<channels; i++ )
\r
6304 bufs[i] = (void *) (buffer + (i * offset));
\r
6305 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6308 if ( result < (int) stream_.bufferSize ) {
\r
6309 // Either an error or overrun occured.
\r
6310 if ( result == -EPIPE ) {
\r
6311 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6312 if ( state == SND_PCM_STATE_XRUN ) {
\r
6313 apiInfo->xrun[1] = true;
\r
6314 result = snd_pcm_prepare( handle[1] );
\r
6315 if ( result < 0 ) {
\r
6316 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6317 errorText_ = errorStream_.str();
\r
6321 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6322 errorText_ = errorStream_.str();
\r
6326 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6327 errorText_ = errorStream_.str();
\r
6329 error( RtAudioError::WARNING );
\r
6333 // Do byte swapping if necessary.
\r
6334 if ( stream_.doByteSwap[1] )
\r
6335 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6337 // Do buffer conversion if necessary.
\r
6338 if ( stream_.doConvertBuffer[1] )
\r
6339 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6341 // Check stream latency
\r
6342 result = snd_pcm_delay( handle[1], &frames );
\r
6343 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6348 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6350 // Setup parameters and do buffer conversion if necessary.
\r
6351 if ( stream_.doConvertBuffer[0] ) {
\r
6352 buffer = stream_.deviceBuffer;
\r
6353 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6354 channels = stream_.nDeviceChannels[0];
\r
6355 format = stream_.deviceFormat[0];
\r
6358 buffer = stream_.userBuffer[0];
\r
6359 channels = stream_.nUserChannels[0];
\r
6360 format = stream_.userFormat;
\r
6363 // Do byte swapping if necessary.
\r
6364 if ( stream_.doByteSwap[0] )
\r
6365 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6367 // Write samples to device in interleaved/non-interleaved format.
\r
6368 if ( stream_.deviceInterleaved[0] )
\r
6369 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6371 void *bufs[channels];
\r
6372 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6373 for ( int i=0; i<channels; i++ )
\r
6374 bufs[i] = (void *) (buffer + (i * offset));
\r
6375 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6378 if ( result < (int) stream_.bufferSize ) {
\r
6379 // Either an error or underrun occured.
\r
6380 if ( result == -EPIPE ) {
\r
6381 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6382 if ( state == SND_PCM_STATE_XRUN ) {
\r
6383 apiInfo->xrun[0] = true;
\r
6384 result = snd_pcm_prepare( handle[0] );
\r
6385 if ( result < 0 ) {
\r
6386 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6387 errorText_ = errorStream_.str();
\r
6391 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6392 errorText_ = errorStream_.str();
\r
6396 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6397 errorText_ = errorStream_.str();
\r
6399 error( RtAudioError::WARNING );
\r
6403 // Check stream latency
\r
6404 result = snd_pcm_delay( handle[0], &frames );
\r
6405 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6409 MUTEX_UNLOCK( &stream_.mutex );
\r
6411 RtApi::tickStreamTime();
\r
6412 if ( doStopStream == 1 ) this->stopStream();
\r
6415 static void *alsaCallbackHandler( void *ptr )
\r
6417 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6418 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6419 bool *isRunning = &info->isRunning;
\r
6421 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6422 if ( &info->doRealtime ) {
\r
6423 pthread_t tID = pthread_self(); // ID of this thread
\r
6424 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6425 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6429 while ( *isRunning == true ) {
\r
6430 pthread_testcancel();
\r
6431 object->callbackEvent();
\r
6434 pthread_exit( NULL );
\r
6437 //******************** End of __LINUX_ALSA__ *********************//
\r
6440 #if defined(__LINUX_PULSE__)
\r
6442 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6443 // and Tristan Matthews.
\r
6445 #include <pulse/error.h>
\r
6446 #include <pulse/simple.h>
\r
6449 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6450 44100, 48000, 96000, 0};
\r
6452 struct rtaudio_pa_format_mapping_t {
\r
6453 RtAudioFormat rtaudio_format;
\r
6454 pa_sample_format_t pa_format;
\r
6457 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6458 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6459 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6460 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6461 {0, PA_SAMPLE_INVALID}};
\r
6463 struct PulseAudioHandle {
\r
6464 pa_simple *s_play;
\r
6467 pthread_cond_t runnable_cv;
\r
6469 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6472 RtApiPulse::~RtApiPulse()
\r
6474 if ( stream_.state != STREAM_CLOSED )
\r
6478 unsigned int RtApiPulse::getDeviceCount( void )
\r
6483 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6485 RtAudio::DeviceInfo info;
\r
6486 info.probed = true;
\r
6487 info.name = "PulseAudio";
\r
6488 info.outputChannels = 2;
\r
6489 info.inputChannels = 2;
\r
6490 info.duplexChannels = 2;
\r
6491 info.isDefaultOutput = true;
\r
6492 info.isDefaultInput = true;
\r
6494 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6495 info.sampleRates.push_back( *sr );
\r
6497 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6502 static void *pulseaudio_callback( void * user )
\r
6504 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6505 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6506 volatile bool *isRunning = &cbi->isRunning;
\r
6508 while ( *isRunning ) {
\r
6509 pthread_testcancel();
\r
6510 context->callbackEvent();
\r
6513 pthread_exit( NULL );
\r
6516 void RtApiPulse::closeStream( void )
\r
6518 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6520 stream_.callbackInfo.isRunning = false;
\r
6522 MUTEX_LOCK( &stream_.mutex );
\r
6523 if ( stream_.state == STREAM_STOPPED ) {
\r
6524 pah->runnable = true;
\r
6525 pthread_cond_signal( &pah->runnable_cv );
\r
6527 MUTEX_UNLOCK( &stream_.mutex );
\r
6529 pthread_join( pah->thread, 0 );
\r
6530 if ( pah->s_play ) {
\r
6531 pa_simple_flush( pah->s_play, NULL );
\r
6532 pa_simple_free( pah->s_play );
\r
6535 pa_simple_free( pah->s_rec );
\r
6537 pthread_cond_destroy( &pah->runnable_cv );
\r
6539 stream_.apiHandle = 0;
\r
6542 if ( stream_.userBuffer[0] ) {
\r
6543 free( stream_.userBuffer[0] );
\r
6544 stream_.userBuffer[0] = 0;
\r
6546 if ( stream_.userBuffer[1] ) {
\r
6547 free( stream_.userBuffer[1] );
\r
6548 stream_.userBuffer[1] = 0;
\r
6551 stream_.state = STREAM_CLOSED;
\r
6552 stream_.mode = UNINITIALIZED;
\r
6555 void RtApiPulse::callbackEvent( void )
\r
6557 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6559 if ( stream_.state == STREAM_STOPPED ) {
\r
6560 MUTEX_LOCK( &stream_.mutex );
\r
6561 while ( !pah->runnable )
\r
6562 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6564 if ( stream_.state != STREAM_RUNNING ) {
\r
6565 MUTEX_UNLOCK( &stream_.mutex );
\r
6568 MUTEX_UNLOCK( &stream_.mutex );
\r
6571 if ( stream_.state == STREAM_CLOSED ) {
\r
6572 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6573 "this shouldn't happen!";
\r
6574 error( RtAudioError::WARNING );
\r
6578 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6579 double streamTime = getStreamTime();
\r
6580 RtAudioStreamStatus status = 0;
\r
6581 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6582 stream_.bufferSize, streamTime, status,
\r
6583 stream_.callbackInfo.userData );
\r
6585 if ( doStopStream == 2 ) {
\r
6590 MUTEX_LOCK( &stream_.mutex );
\r
6591 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6592 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6594 if ( stream_.state != STREAM_RUNNING )
\r
6599 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6600 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6601 convertBuffer( stream_.deviceBuffer,
\r
6602 stream_.userBuffer[OUTPUT],
\r
6603 stream_.convertInfo[OUTPUT] );
\r
6604 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6605 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6607 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6608 formatBytes( stream_.userFormat );
\r
6610 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6611 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6612 pa_strerror( pa_error ) << ".";
\r
6613 errorText_ = errorStream_.str();
\r
6614 error( RtAudioError::WARNING );
\r
6618 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6619 if ( stream_.doConvertBuffer[INPUT] )
\r
6620 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6621 formatBytes( stream_.deviceFormat[INPUT] );
\r
6623 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6624 formatBytes( stream_.userFormat );
\r
6626 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6627 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6628 pa_strerror( pa_error ) << ".";
\r
6629 errorText_ = errorStream_.str();
\r
6630 error( RtAudioError::WARNING );
\r
6632 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6633 convertBuffer( stream_.userBuffer[INPUT],
\r
6634 stream_.deviceBuffer,
\r
6635 stream_.convertInfo[INPUT] );
\r
6640 MUTEX_UNLOCK( &stream_.mutex );
\r
6641 RtApi::tickStreamTime();
\r
6643 if ( doStopStream == 1 )
\r
6647 void RtApiPulse::startStream( void )
\r
6649 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6651 if ( stream_.state == STREAM_CLOSED ) {
\r
6652 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6653 error( RtAudioError::INVALID_USE );
\r
6656 if ( stream_.state == STREAM_RUNNING ) {
\r
6657 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6658 error( RtAudioError::WARNING );
\r
6662 MUTEX_LOCK( &stream_.mutex );
\r
6664 stream_.state = STREAM_RUNNING;
\r
6666 pah->runnable = true;
\r
6667 pthread_cond_signal( &pah->runnable_cv );
\r
6668 MUTEX_UNLOCK( &stream_.mutex );
\r
6671 void RtApiPulse::stopStream( void )
\r
6673 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6675 if ( stream_.state == STREAM_CLOSED ) {
\r
6676 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6677 error( RtAudioError::INVALID_USE );
\r
6680 if ( stream_.state == STREAM_STOPPED ) {
\r
6681 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6682 error( RtAudioError::WARNING );
\r
6686 stream_.state = STREAM_STOPPED;
\r
6687 MUTEX_LOCK( &stream_.mutex );
\r
6689 if ( pah && pah->s_play ) {
\r
6691 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6692 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6693 pa_strerror( pa_error ) << ".";
\r
6694 errorText_ = errorStream_.str();
\r
6695 MUTEX_UNLOCK( &stream_.mutex );
\r
6696 error( RtAudioError::SYSTEM_ERROR );
\r
6701 stream_.state = STREAM_STOPPED;
\r
6702 MUTEX_UNLOCK( &stream_.mutex );
\r
6705 void RtApiPulse::abortStream( void )
\r
6707 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6709 if ( stream_.state == STREAM_CLOSED ) {
\r
6710 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6711 error( RtAudioError::INVALID_USE );
\r
6714 if ( stream_.state == STREAM_STOPPED ) {
\r
6715 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6716 error( RtAudioError::WARNING );
\r
6720 stream_.state = STREAM_STOPPED;
\r
6721 MUTEX_LOCK( &stream_.mutex );
\r
6723 if ( pah && pah->s_play ) {
\r
6725 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6726 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6727 pa_strerror( pa_error ) << ".";
\r
6728 errorText_ = errorStream_.str();
\r
6729 MUTEX_UNLOCK( &stream_.mutex );
\r
6730 error( RtAudioError::SYSTEM_ERROR );
\r
6735 stream_.state = STREAM_STOPPED;
\r
6736 MUTEX_UNLOCK( &stream_.mutex );
\r
6739 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6740 unsigned int channels, unsigned int firstChannel,
\r
6741 unsigned int sampleRate, RtAudioFormat format,
\r
6742 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6744 PulseAudioHandle *pah = 0;
\r
6745 unsigned long bufferBytes = 0;
\r
6746 pa_sample_spec ss;
\r
6748 if ( device != 0 ) return false;
\r
6749 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6750 if ( channels != 1 && channels != 2 ) {
\r
6751 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6754 ss.channels = channels;
\r
6756 if ( firstChannel != 0 ) return false;
\r
6758 bool sr_found = false;
\r
6759 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6760 if ( sampleRate == *sr ) {
\r
6762 stream_.sampleRate = sampleRate;
\r
6763 ss.rate = sampleRate;
\r
6767 if ( !sr_found ) {
\r
6768 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6772 bool sf_found = 0;
\r
6773 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6774 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6775 if ( format == sf->rtaudio_format ) {
\r
6777 stream_.userFormat = sf->rtaudio_format;
\r
6778 ss.format = sf->pa_format;
\r
6782 if ( !sf_found ) {
\r
6783 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6787 // Set interleaving parameters.
\r
6788 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6789 else stream_.userInterleaved = true;
\r
6790 stream_.deviceInterleaved[mode] = true;
\r
6791 stream_.nBuffers = 1;
\r
6792 stream_.doByteSwap[mode] = false;
\r
6793 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6794 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6795 stream_.nUserChannels[mode] = channels;
\r
6796 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6797 stream_.channelOffset[mode] = 0;
\r
6799 // Allocate necessary internal buffers.
\r
6800 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6801 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6802 if ( stream_.userBuffer[mode] == NULL ) {
\r
6803 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6806 stream_.bufferSize = *bufferSize;
\r
6808 if ( stream_.doConvertBuffer[mode] ) {
\r
6810 bool makeBuffer = true;
\r
6811 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6812 if ( mode == INPUT ) {
\r
6813 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6814 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6815 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6819 if ( makeBuffer ) {
\r
6820 bufferBytes *= *bufferSize;
\r
6821 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6822 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6823 if ( stream_.deviceBuffer == NULL ) {
\r
6824 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6830 stream_.device[mode] = device;
\r
6832 // Setup the buffer conversion information structure.
\r
6833 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6835 if ( !stream_.apiHandle ) {
\r
6836 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6838 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6842 stream_.apiHandle = pah;
\r
6843 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6844 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6848 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6851 std::string streamName = "RtAudio";
\r
6852 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
6855 pa_buffer_attr buffer_attr;
\r
6856 buffer_attr.fragsize = bufferBytes;
\r
6857 buffer_attr.maxlength = -1;
\r
6859 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
6860 if ( !pah->s_rec ) {
\r
6861 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6866 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6867 if ( !pah->s_play ) {
\r
6868 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6876 if ( stream_.mode == UNINITIALIZED )
\r
6877 stream_.mode = mode;
\r
6878 else if ( stream_.mode == mode )
\r
6881 stream_.mode = DUPLEX;
\r
6883 if ( !stream_.callbackInfo.isRunning ) {
\r
6884 stream_.callbackInfo.object = this;
\r
6885 stream_.callbackInfo.isRunning = true;
\r
6886 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6887 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6892 stream_.state = STREAM_STOPPED;
\r
6896 if ( pah && stream_.callbackInfo.isRunning ) {
\r
6897 pthread_cond_destroy( &pah->runnable_cv );
\r
6899 stream_.apiHandle = 0;
\r
6902 for ( int i=0; i<2; i++ ) {
\r
6903 if ( stream_.userBuffer[i] ) {
\r
6904 free( stream_.userBuffer[i] );
\r
6905 stream_.userBuffer[i] = 0;
\r
6909 if ( stream_.deviceBuffer ) {
\r
6910 free( stream_.deviceBuffer );
\r
6911 stream_.deviceBuffer = 0;
\r
6917 //******************** End of __LINUX_PULSE__ *********************//
\r
6920 #if defined(__LINUX_OSS__)
\r
6922 #include <unistd.h>
\r
6923 #include <sys/ioctl.h>
\r
6924 #include <unistd.h>
\r
6925 #include <fcntl.h>
\r
6926 #include <sys/soundcard.h>
\r
6927 #include <errno.h>
\r
6930 static void *ossCallbackHandler(void * ptr);
\r
6932 // A structure to hold various information related to the OSS API
\r
6933 // implementation.
\r
6934 struct OssHandle {
\r
6935 int id[2]; // device ids
\r
6938 pthread_cond_t runnable;
\r
6941 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6944 RtApiOss :: RtApiOss()
\r
6946 // Nothing to do here.
\r
6949 RtApiOss :: ~RtApiOss()
\r
6951 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6954 unsigned int RtApiOss :: getDeviceCount( void )
\r
6956 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6957 if ( mixerfd == -1 ) {
\r
6958 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6959 error( RtAudioError::WARNING );
\r
6963 oss_sysinfo sysinfo;
\r
6964 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6966 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6967 error( RtAudioError::WARNING );
\r
6972 return sysinfo.numaudios;
\r
6975 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6977 RtAudio::DeviceInfo info;
\r
6978 info.probed = false;
\r
6980 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6981 if ( mixerfd == -1 ) {
\r
6982 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6983 error( RtAudioError::WARNING );
\r
6987 oss_sysinfo sysinfo;
\r
6988 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6989 if ( result == -1 ) {
\r
6991 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6992 error( RtAudioError::WARNING );
\r
6996 unsigned nDevices = sysinfo.numaudios;
\r
6997 if ( nDevices == 0 ) {
\r
6999 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
7000 error( RtAudioError::INVALID_USE );
\r
7004 if ( device >= nDevices ) {
\r
7006 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
7007 error( RtAudioError::INVALID_USE );
\r
7011 oss_audioinfo ainfo;
\r
7012 ainfo.dev = device;
\r
7013 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7015 if ( result == -1 ) {
\r
7016 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7017 errorText_ = errorStream_.str();
\r
7018 error( RtAudioError::WARNING );
\r
7023 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
7024 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
7025 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
7026 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
7027 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7030 // Probe data formats ... do for input
\r
7031 unsigned long mask = ainfo.iformats;
\r
7032 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
7033 info.nativeFormats |= RTAUDIO_SINT16;
\r
7034 if ( mask & AFMT_S8 )
\r
7035 info.nativeFormats |= RTAUDIO_SINT8;
\r
7036 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
7037 info.nativeFormats |= RTAUDIO_SINT32;
\r
7038 if ( mask & AFMT_FLOAT )
\r
7039 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7040 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
7041 info.nativeFormats |= RTAUDIO_SINT24;
\r
7043 // Check that we have at least one supported format
\r
7044 if ( info.nativeFormats == 0 ) {
\r
7045 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7046 errorText_ = errorStream_.str();
\r
7047 error( RtAudioError::WARNING );
\r
7051 // Probe the supported sample rates.
\r
7052 info.sampleRates.clear();
\r
7053 if ( ainfo.nrates ) {
\r
7054 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
7055 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7056 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
7057 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7064 // Check min and max rate values;
\r
7065 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7066 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
7067 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7071 if ( info.sampleRates.size() == 0 ) {
\r
7072 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
7073 errorText_ = errorStream_.str();
\r
7074 error( RtAudioError::WARNING );
\r
7077 info.probed = true;
\r
7078 info.name = ainfo.name;
\r
7085 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7086 unsigned int firstChannel, unsigned int sampleRate,
\r
7087 RtAudioFormat format, unsigned int *bufferSize,
\r
7088 RtAudio::StreamOptions *options )
\r
7090 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
7091 if ( mixerfd == -1 ) {
\r
7092 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
7096 oss_sysinfo sysinfo;
\r
7097 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
7098 if ( result == -1 ) {
\r
7100 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7104 unsigned nDevices = sysinfo.numaudios;
\r
7105 if ( nDevices == 0 ) {
\r
7106 // This should not happen because a check is made before this function is called.
\r
7108 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7112 if ( device >= nDevices ) {
\r
7113 // This should not happen because a check is made before this function is called.
\r
7115 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7119 oss_audioinfo ainfo;
\r
7120 ainfo.dev = device;
\r
7121 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7123 if ( result == -1 ) {
\r
7124 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7125 errorText_ = errorStream_.str();
\r
7129 // Check if device supports input or output
\r
7130 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7131 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7132 if ( mode == OUTPUT )
\r
7133 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7135 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7136 errorText_ = errorStream_.str();
\r
7141 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7142 if ( mode == OUTPUT )
\r
7143 flags |= O_WRONLY;
\r
7144 else { // mode == INPUT
\r
7145 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7146 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7147 close( handle->id[0] );
\r
7148 handle->id[0] = 0;
\r
7149 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7150 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7151 errorText_ = errorStream_.str();
\r
7154 // Check that the number previously set channels is the same.
\r
7155 if ( stream_.nUserChannels[0] != channels ) {
\r
7156 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7157 errorText_ = errorStream_.str();
\r
7163 flags |= O_RDONLY;
\r
7166 // Set exclusive access if specified.
\r
7167 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7169 // Try to open the device.
\r
7171 fd = open( ainfo.devnode, flags, 0 );
\r
7173 if ( errno == EBUSY )
\r
7174 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7176 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7177 errorText_ = errorStream_.str();
\r
7181 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7183 if ( flags | O_RDWR ) {
\r
7184 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7185 if ( result == -1) {
\r
7186 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7187 errorText_ = errorStream_.str();
\r
7193 // Check the device channel support.
\r
7194 stream_.nUserChannels[mode] = channels;
\r
7195 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7197 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7198 errorText_ = errorStream_.str();
\r
7202 // Set the number of channels.
\r
7203 int deviceChannels = channels + firstChannel;
\r
7204 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7205 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7207 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7208 errorText_ = errorStream_.str();
\r
7211 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7213 // Get the data format mask
\r
7215 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7216 if ( result == -1 ) {
\r
7218 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7219 errorText_ = errorStream_.str();
\r
7223 // Determine how to set the device format.
\r
7224 stream_.userFormat = format;
\r
7225 int deviceFormat = -1;
\r
7226 stream_.doByteSwap[mode] = false;
\r
7227 if ( format == RTAUDIO_SINT8 ) {
\r
7228 if ( mask & AFMT_S8 ) {
\r
7229 deviceFormat = AFMT_S8;
\r
7230 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7233 else if ( format == RTAUDIO_SINT16 ) {
\r
7234 if ( mask & AFMT_S16_NE ) {
\r
7235 deviceFormat = AFMT_S16_NE;
\r
7236 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7238 else if ( mask & AFMT_S16_OE ) {
\r
7239 deviceFormat = AFMT_S16_OE;
\r
7240 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7241 stream_.doByteSwap[mode] = true;
\r
7244 else if ( format == RTAUDIO_SINT24 ) {
\r
7245 if ( mask & AFMT_S24_NE ) {
\r
7246 deviceFormat = AFMT_S24_NE;
\r
7247 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7249 else if ( mask & AFMT_S24_OE ) {
\r
7250 deviceFormat = AFMT_S24_OE;
\r
7251 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7252 stream_.doByteSwap[mode] = true;
\r
7255 else if ( format == RTAUDIO_SINT32 ) {
\r
7256 if ( mask & AFMT_S32_NE ) {
\r
7257 deviceFormat = AFMT_S32_NE;
\r
7258 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7260 else if ( mask & AFMT_S32_OE ) {
\r
7261 deviceFormat = AFMT_S32_OE;
\r
7262 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7263 stream_.doByteSwap[mode] = true;
\r
7267 if ( deviceFormat == -1 ) {
\r
7268 // The user requested format is not natively supported by the device.
\r
7269 if ( mask & AFMT_S16_NE ) {
\r
7270 deviceFormat = AFMT_S16_NE;
\r
7271 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7273 else if ( mask & AFMT_S32_NE ) {
\r
7274 deviceFormat = AFMT_S32_NE;
\r
7275 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7277 else if ( mask & AFMT_S24_NE ) {
\r
7278 deviceFormat = AFMT_S24_NE;
\r
7279 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7281 else if ( mask & AFMT_S16_OE ) {
\r
7282 deviceFormat = AFMT_S16_OE;
\r
7283 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7284 stream_.doByteSwap[mode] = true;
\r
7286 else if ( mask & AFMT_S32_OE ) {
\r
7287 deviceFormat = AFMT_S32_OE;
\r
7288 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7289 stream_.doByteSwap[mode] = true;
\r
7291 else if ( mask & AFMT_S24_OE ) {
\r
7292 deviceFormat = AFMT_S24_OE;
\r
7293 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7294 stream_.doByteSwap[mode] = true;
\r
7296 else if ( mask & AFMT_S8) {
\r
7297 deviceFormat = AFMT_S8;
\r
7298 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7302 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7303 // This really shouldn't happen ...
\r
7305 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7306 errorText_ = errorStream_.str();
\r
7310 // Set the data format.
\r
7311 int temp = deviceFormat;
\r
7312 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7313 if ( result == -1 || deviceFormat != temp ) {
\r
7315 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7316 errorText_ = errorStream_.str();
\r
7320 // Attempt to set the buffer size. According to OSS, the minimum
\r
7321 // number of buffers is two. The supposed minimum buffer size is 16
\r
7322 // bytes, so that will be our lower bound. The argument to this
\r
7323 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7324 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7325 // We'll check the actual value used near the end of the setup
\r
7327 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7328 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7330 if ( options ) buffers = options->numberOfBuffers;
\r
7331 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7332 if ( buffers < 2 ) buffers = 3;
\r
7333 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7334 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7335 if ( result == -1 ) {
\r
7337 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7338 errorText_ = errorStream_.str();
\r
7341 stream_.nBuffers = buffers;
\r
7343 // Save buffer size (in sample frames).
\r
7344 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7345 stream_.bufferSize = *bufferSize;
\r
7347 // Set the sample rate.
\r
7348 int srate = sampleRate;
\r
7349 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7350 if ( result == -1 ) {
\r
7352 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7353 errorText_ = errorStream_.str();
\r
7357 // Verify the sample rate setup worked.
\r
7358 if ( abs( srate - sampleRate ) > 100 ) {
\r
7360 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7361 errorText_ = errorStream_.str();
\r
7364 stream_.sampleRate = sampleRate;
\r
7366 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7367 // We're doing duplex setup here.
\r
7368 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7369 stream_.nDeviceChannels[0] = deviceChannels;
\r
7372 // Set interleaving parameters.
\r
7373 stream_.userInterleaved = true;
\r
7374 stream_.deviceInterleaved[mode] = true;
\r
7375 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7376 stream_.userInterleaved = false;
\r
7378 // Set flags for buffer conversion
\r
7379 stream_.doConvertBuffer[mode] = false;
\r
7380 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7381 stream_.doConvertBuffer[mode] = true;
\r
7382 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7383 stream_.doConvertBuffer[mode] = true;
\r
7384 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7385 stream_.nUserChannels[mode] > 1 )
\r
7386 stream_.doConvertBuffer[mode] = true;
\r
7388 // Allocate the stream handles if necessary and then save.
\r
7389 if ( stream_.apiHandle == 0 ) {
\r
7391 handle = new OssHandle;
\r
7393 catch ( std::bad_alloc& ) {
\r
7394 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7398 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7399 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7403 stream_.apiHandle = (void *) handle;
\r
7406 handle = (OssHandle *) stream_.apiHandle;
\r
7408 handle->id[mode] = fd;
\r
7410 // Allocate necessary internal buffers.
\r
7411 unsigned long bufferBytes;
\r
7412 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7413 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7414 if ( stream_.userBuffer[mode] == NULL ) {
\r
7415 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7419 if ( stream_.doConvertBuffer[mode] ) {
\r
7421 bool makeBuffer = true;
\r
7422 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7423 if ( mode == INPUT ) {
\r
7424 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7425 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7426 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7430 if ( makeBuffer ) {
\r
7431 bufferBytes *= *bufferSize;
\r
7432 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7433 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7434 if ( stream_.deviceBuffer == NULL ) {
\r
7435 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7441 stream_.device[mode] = device;
\r
7442 stream_.state = STREAM_STOPPED;
\r
7444 // Setup the buffer conversion information structure.
\r
7445 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7447 // Setup thread if necessary.
\r
7448 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7449 // We had already set up an output stream.
\r
7450 stream_.mode = DUPLEX;
\r
7451 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7454 stream_.mode = mode;
\r
7456 // Setup callback thread.
\r
7457 stream_.callbackInfo.object = (void *) this;
\r
7459 // Set the thread attributes for joinable and realtime scheduling
\r
7460 // priority. The higher priority will only take affect if the
\r
7461 // program is run as root or suid.
\r
7462 pthread_attr_t attr;
\r
7463 pthread_attr_init( &attr );
\r
7464 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7465 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7466 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7467 struct sched_param param;
\r
7468 int priority = options->priority;
\r
7469 int min = sched_get_priority_min( SCHED_RR );
\r
7470 int max = sched_get_priority_max( SCHED_RR );
\r
7471 if ( priority < min ) priority = min;
\r
7472 else if ( priority > max ) priority = max;
\r
7473 param.sched_priority = priority;
\r
7474 pthread_attr_setschedparam( &attr, ¶m );
\r
7475 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7478 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7480 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7483 stream_.callbackInfo.isRunning = true;
\r
7484 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7485 pthread_attr_destroy( &attr );
\r
7487 stream_.callbackInfo.isRunning = false;
\r
7488 errorText_ = "RtApiOss::error creating callback thread!";
\r
7497 pthread_cond_destroy( &handle->runnable );
\r
7498 if ( handle->id[0] ) close( handle->id[0] );
\r
7499 if ( handle->id[1] ) close( handle->id[1] );
\r
7501 stream_.apiHandle = 0;
\r
7504 for ( int i=0; i<2; i++ ) {
\r
7505 if ( stream_.userBuffer[i] ) {
\r
7506 free( stream_.userBuffer[i] );
\r
7507 stream_.userBuffer[i] = 0;
\r
7511 if ( stream_.deviceBuffer ) {
\r
7512 free( stream_.deviceBuffer );
\r
7513 stream_.deviceBuffer = 0;
\r
7519 void RtApiOss :: closeStream()
\r
7521 if ( stream_.state == STREAM_CLOSED ) {
\r
7522 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7523 error( RtAudioError::WARNING );
\r
7527 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7528 stream_.callbackInfo.isRunning = false;
\r
7529 MUTEX_LOCK( &stream_.mutex );
\r
7530 if ( stream_.state == STREAM_STOPPED )
\r
7531 pthread_cond_signal( &handle->runnable );
\r
7532 MUTEX_UNLOCK( &stream_.mutex );
\r
7533 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7535 if ( stream_.state == STREAM_RUNNING ) {
\r
7536 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7537 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7539 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7540 stream_.state = STREAM_STOPPED;
\r
7544 pthread_cond_destroy( &handle->runnable );
\r
7545 if ( handle->id[0] ) close( handle->id[0] );
\r
7546 if ( handle->id[1] ) close( handle->id[1] );
\r
7548 stream_.apiHandle = 0;
\r
7551 for ( int i=0; i<2; i++ ) {
\r
7552 if ( stream_.userBuffer[i] ) {
\r
7553 free( stream_.userBuffer[i] );
\r
7554 stream_.userBuffer[i] = 0;
\r
7558 if ( stream_.deviceBuffer ) {
\r
7559 free( stream_.deviceBuffer );
\r
7560 stream_.deviceBuffer = 0;
\r
7563 stream_.mode = UNINITIALIZED;
\r
7564 stream_.state = STREAM_CLOSED;
\r
7567 void RtApiOss :: startStream()
\r
7570 if ( stream_.state == STREAM_RUNNING ) {
\r
7571 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7572 error( RtAudioError::WARNING );
\r
7576 MUTEX_LOCK( &stream_.mutex );
\r
7578 stream_.state = STREAM_RUNNING;
\r
7580 // No need to do anything else here ... OSS automatically starts
\r
7581 // when fed samples.
\r
7583 MUTEX_UNLOCK( &stream_.mutex );
\r
7585 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7586 pthread_cond_signal( &handle->runnable );
\r
7589 void RtApiOss :: stopStream()
\r
7592 if ( stream_.state == STREAM_STOPPED ) {
\r
7593 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7594 error( RtAudioError::WARNING );
\r
7598 MUTEX_LOCK( &stream_.mutex );
\r
7600 // The state might change while waiting on a mutex.
\r
7601 if ( stream_.state == STREAM_STOPPED ) {
\r
7602 MUTEX_UNLOCK( &stream_.mutex );
\r
7607 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7608 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7610 // Flush the output with zeros a few times.
\r
7613 RtAudioFormat format;
\r
7615 if ( stream_.doConvertBuffer[0] ) {
\r
7616 buffer = stream_.deviceBuffer;
\r
7617 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7618 format = stream_.deviceFormat[0];
\r
7621 buffer = stream_.userBuffer[0];
\r
7622 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7623 format = stream_.userFormat;
\r
7626 memset( buffer, 0, samples * formatBytes(format) );
\r
7627 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7628 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7629 if ( result == -1 ) {
\r
7630 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7631 error( RtAudioError::WARNING );
\r
7635 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7636 if ( result == -1 ) {
\r
7637 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7638 errorText_ = errorStream_.str();
\r
7641 handle->triggered = false;
\r
7644 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7645 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7646 if ( result == -1 ) {
\r
7647 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7648 errorText_ = errorStream_.str();
\r
7654 stream_.state = STREAM_STOPPED;
\r
7655 MUTEX_UNLOCK( &stream_.mutex );
\r
7657 if ( result != -1 ) return;
\r
7658 error( RtAudioError::SYSTEM_ERROR );
\r
7661 void RtApiOss :: abortStream()
\r
7664 if ( stream_.state == STREAM_STOPPED ) {
\r
7665 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7666 error( RtAudioError::WARNING );
\r
7670 MUTEX_LOCK( &stream_.mutex );
\r
7672 // The state might change while waiting on a mutex.
\r
7673 if ( stream_.state == STREAM_STOPPED ) {
\r
7674 MUTEX_UNLOCK( &stream_.mutex );
\r
7679 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7680 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7681 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7682 if ( result == -1 ) {
\r
7683 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7684 errorText_ = errorStream_.str();
\r
7687 handle->triggered = false;
\r
7690 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7691 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7692 if ( result == -1 ) {
\r
7693 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7694 errorText_ = errorStream_.str();
\r
7700 stream_.state = STREAM_STOPPED;
\r
7701 MUTEX_UNLOCK( &stream_.mutex );
\r
7703 if ( result != -1 ) return;
\r
7704 error( RtAudioError::SYSTEM_ERROR );
\r
7707 void RtApiOss :: callbackEvent()
\r
7709 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7710 if ( stream_.state == STREAM_STOPPED ) {
\r
7711 MUTEX_LOCK( &stream_.mutex );
\r
7712 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7713 if ( stream_.state != STREAM_RUNNING ) {
\r
7714 MUTEX_UNLOCK( &stream_.mutex );
\r
7717 MUTEX_UNLOCK( &stream_.mutex );
\r
7720 if ( stream_.state == STREAM_CLOSED ) {
\r
7721 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7722 error( RtAudioError::WARNING );
\r
7726 // Invoke user callback to get fresh output data.
\r
7727 int doStopStream = 0;
\r
7728 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7729 double streamTime = getStreamTime();
\r
7730 RtAudioStreamStatus status = 0;
\r
7731 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7732 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7733 handle->xrun[0] = false;
\r
7735 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7736 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7737 handle->xrun[1] = false;
\r
7739 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7740 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7741 if ( doStopStream == 2 ) {
\r
7742 this->abortStream();
\r
7746 MUTEX_LOCK( &stream_.mutex );
\r
7748 // The state might change while waiting on a mutex.
\r
7749 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7754 RtAudioFormat format;
\r
7756 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7758 // Setup parameters and do buffer conversion if necessary.
\r
7759 if ( stream_.doConvertBuffer[0] ) {
\r
7760 buffer = stream_.deviceBuffer;
\r
7761 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7762 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7763 format = stream_.deviceFormat[0];
\r
7766 buffer = stream_.userBuffer[0];
\r
7767 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7768 format = stream_.userFormat;
\r
7771 // Do byte swapping if necessary.
\r
7772 if ( stream_.doByteSwap[0] )
\r
7773 byteSwapBuffer( buffer, samples, format );
\r
7775 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7777 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7778 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7779 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7780 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7781 handle->triggered = true;
\r
7784 // Write samples to device.
\r
7785 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7787 if ( result == -1 ) {
\r
7788 // We'll assume this is an underrun, though there isn't a
\r
7789 // specific means for determining that.
\r
7790 handle->xrun[0] = true;
\r
7791 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7792 error( RtAudioError::WARNING );
\r
7793 // Continue on to input section.
\r
7797 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7799 // Setup parameters.
\r
7800 if ( stream_.doConvertBuffer[1] ) {
\r
7801 buffer = stream_.deviceBuffer;
\r
7802 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7803 format = stream_.deviceFormat[1];
\r
7806 buffer = stream_.userBuffer[1];
\r
7807 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7808 format = stream_.userFormat;
\r
7811 // Read samples from device.
\r
7812 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7814 if ( result == -1 ) {
\r
7815 // We'll assume this is an overrun, though there isn't a
\r
7816 // specific means for determining that.
\r
7817 handle->xrun[1] = true;
\r
7818 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7819 error( RtAudioError::WARNING );
\r
7823 // Do byte swapping if necessary.
\r
7824 if ( stream_.doByteSwap[1] )
\r
7825 byteSwapBuffer( buffer, samples, format );
\r
7827 // Do buffer conversion if necessary.
\r
7828 if ( stream_.doConvertBuffer[1] )
\r
7829 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7833 MUTEX_UNLOCK( &stream_.mutex );
\r
7835 RtApi::tickStreamTime();
\r
7836 if ( doStopStream == 1 ) this->stopStream();
\r
7839 static void *ossCallbackHandler( void *ptr )
\r
7841 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7842 RtApiOss *object = (RtApiOss *) info->object;
\r
7843 bool *isRunning = &info->isRunning;
\r
7845 while ( *isRunning == true ) {
\r
7846 pthread_testcancel();
\r
7847 object->callbackEvent();
\r
7850 pthread_exit( NULL );
\r
7853 //******************** End of __LINUX_OSS__ *********************//
\r
7857 // *************************************************** //
\r
7859 // Protected common (OS-independent) RtAudio methods.
\r
7861 // *************************************************** //
\r
7863 // This method can be modified to control the behavior of error
\r
7864 // message printing.
\r
7865 void RtApi :: error( RtAudioError::Type type )
\r
7867 errorStream_.str(""); // clear the ostringstream
\r
7869 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
7870 if ( errorCallback ) {
\r
7871 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
7873 if ( firstErrorOccurred_ )
\r
7876 firstErrorOccurred_ = true;
\r
7877 const std::string errorMessage = errorText_;
\r
7879 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
7880 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
7884 errorCallback( type, errorMessage );
\r
7885 firstErrorOccurred_ = false;
\r
7889 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
7890 std::cerr << '\n' << errorText_ << "\n\n";
\r
7891 else if ( type != RtAudioError::WARNING )
\r
7892 throw( RtAudioError( errorText_, type ) );
\r
7895 void RtApi :: verifyStream()
\r
7897 if ( stream_.state == STREAM_CLOSED ) {
\r
7898 errorText_ = "RtApi:: a stream is not open!";
\r
7899 error( RtAudioError::INVALID_USE );
\r
7903 void RtApi :: clearStreamInfo()
\r
7905 stream_.mode = UNINITIALIZED;
\r
7906 stream_.state = STREAM_CLOSED;
\r
7907 stream_.sampleRate = 0;
\r
7908 stream_.bufferSize = 0;
\r
7909 stream_.nBuffers = 0;
\r
7910 stream_.userFormat = 0;
\r
7911 stream_.userInterleaved = true;
\r
7912 stream_.streamTime = 0.0;
\r
7913 stream_.apiHandle = 0;
\r
7914 stream_.deviceBuffer = 0;
\r
7915 stream_.callbackInfo.callback = 0;
\r
7916 stream_.callbackInfo.userData = 0;
\r
7917 stream_.callbackInfo.isRunning = false;
\r
7918 stream_.callbackInfo.errorCallback = 0;
\r
7919 for ( int i=0; i<2; i++ ) {
\r
7920 stream_.device[i] = 11111;
\r
7921 stream_.doConvertBuffer[i] = false;
\r
7922 stream_.deviceInterleaved[i] = true;
\r
7923 stream_.doByteSwap[i] = false;
\r
7924 stream_.nUserChannels[i] = 0;
\r
7925 stream_.nDeviceChannels[i] = 0;
\r
7926 stream_.channelOffset[i] = 0;
\r
7927 stream_.deviceFormat[i] = 0;
\r
7928 stream_.latency[i] = 0;
\r
7929 stream_.userBuffer[i] = 0;
\r
7930 stream_.convertInfo[i].channels = 0;
\r
7931 stream_.convertInfo[i].inJump = 0;
\r
7932 stream_.convertInfo[i].outJump = 0;
\r
7933 stream_.convertInfo[i].inFormat = 0;
\r
7934 stream_.convertInfo[i].outFormat = 0;
\r
7935 stream_.convertInfo[i].inOffset.clear();
\r
7936 stream_.convertInfo[i].outOffset.clear();
\r
7940 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7942 if ( format == RTAUDIO_SINT16 )
\r
7944 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7946 else if ( format == RTAUDIO_FLOAT64 )
\r
7948 else if ( format == RTAUDIO_SINT24 )
\r
7950 else if ( format == RTAUDIO_SINT8 )
\r
7953 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7954 error( RtAudioError::WARNING );
\r
7959 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7961 if ( mode == INPUT ) { // convert device to user buffer
\r
7962 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7963 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7964 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7965 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7967 else { // convert user to device buffer
\r
7968 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7969 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7970 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7971 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7974 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7975 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7977 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7979 // Set up the interleave/deinterleave offsets.
\r
7980 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7981 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7982 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7983 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7984 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7985 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7986 stream_.convertInfo[mode].inJump = 1;
\r
7990 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7991 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7992 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7993 stream_.convertInfo[mode].outJump = 1;
\r
7997 else { // no (de)interleaving
\r
7998 if ( stream_.userInterleaved ) {
\r
7999 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8000 stream_.convertInfo[mode].inOffset.push_back( k );
\r
8001 stream_.convertInfo[mode].outOffset.push_back( k );
\r
8005 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8006 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
8007 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
8008 stream_.convertInfo[mode].inJump = 1;
\r
8009 stream_.convertInfo[mode].outJump = 1;
\r
8014 // Add channel offset.
\r
8015 if ( firstChannel > 0 ) {
\r
8016 if ( stream_.deviceInterleaved[mode] ) {
\r
8017 if ( mode == OUTPUT ) {
\r
8018 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8019 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
8022 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8023 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
8027 if ( mode == OUTPUT ) {
\r
8028 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8029 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8032 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8033 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8039 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
8041 // This function does format conversion, input/output channel compensation, and
\r
8042 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
8043 // the lower three bytes of a 32-bit integer.
\r
8045 // Clear our device buffer when in/out duplex device channels are different
\r
8046 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
8047 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
8048 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
8051 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
8053 Float64 *out = (Float64 *)outBuffer;
\r
8055 if (info.inFormat == RTAUDIO_SINT8) {
\r
8056 signed char *in = (signed char *)inBuffer;
\r
8057 scale = 1.0 / 127.5;
\r
8058 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8059 for (j=0; j<info.channels; j++) {
\r
8060 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8061 out[info.outOffset[j]] += 0.5;
\r
8062 out[info.outOffset[j]] *= scale;
\r
8064 in += info.inJump;
\r
8065 out += info.outJump;
\r
8068 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8069 Int16 *in = (Int16 *)inBuffer;
\r
8070 scale = 1.0 / 32767.5;
\r
8071 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8072 for (j=0; j<info.channels; j++) {
\r
8073 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8074 out[info.outOffset[j]] += 0.5;
\r
8075 out[info.outOffset[j]] *= scale;
\r
8077 in += info.inJump;
\r
8078 out += info.outJump;
\r
8081 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8082 Int24 *in = (Int24 *)inBuffer;
\r
8083 scale = 1.0 / 8388607.5;
\r
8084 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8085 for (j=0; j<info.channels; j++) {
\r
8086 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
8087 out[info.outOffset[j]] += 0.5;
\r
8088 out[info.outOffset[j]] *= scale;
\r
8090 in += info.inJump;
\r
8091 out += info.outJump;
\r
8094 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8095 Int32 *in = (Int32 *)inBuffer;
\r
8096 scale = 1.0 / 2147483647.5;
\r
8097 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8098 for (j=0; j<info.channels; j++) {
\r
8099 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8100 out[info.outOffset[j]] += 0.5;
\r
8101 out[info.outOffset[j]] *= scale;
\r
8103 in += info.inJump;
\r
8104 out += info.outJump;
\r
8107 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8108 Float32 *in = (Float32 *)inBuffer;
\r
8109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8110 for (j=0; j<info.channels; j++) {
\r
8111 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8113 in += info.inJump;
\r
8114 out += info.outJump;
\r
8117 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8118 // Channel compensation and/or (de)interleaving only.
\r
8119 Float64 *in = (Float64 *)inBuffer;
\r
8120 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8121 for (j=0; j<info.channels; j++) {
\r
8122 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8124 in += info.inJump;
\r
8125 out += info.outJump;
\r
8129 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8131 Float32 *out = (Float32 *)outBuffer;
\r
8133 if (info.inFormat == RTAUDIO_SINT8) {
\r
8134 signed char *in = (signed char *)inBuffer;
\r
8135 scale = (Float32) ( 1.0 / 127.5 );
\r
8136 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8137 for (j=0; j<info.channels; j++) {
\r
8138 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8139 out[info.outOffset[j]] += 0.5;
\r
8140 out[info.outOffset[j]] *= scale;
\r
8142 in += info.inJump;
\r
8143 out += info.outJump;
\r
8146 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8147 Int16 *in = (Int16 *)inBuffer;
\r
8148 scale = (Float32) ( 1.0 / 32767.5 );
\r
8149 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8150 for (j=0; j<info.channels; j++) {
\r
8151 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8152 out[info.outOffset[j]] += 0.5;
\r
8153 out[info.outOffset[j]] *= scale;
\r
8155 in += info.inJump;
\r
8156 out += info.outJump;
\r
8159 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8160 Int24 *in = (Int24 *)inBuffer;
\r
8161 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8162 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8163 for (j=0; j<info.channels; j++) {
\r
8164 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8165 out[info.outOffset[j]] += 0.5;
\r
8166 out[info.outOffset[j]] *= scale;
\r
8168 in += info.inJump;
\r
8169 out += info.outJump;
\r
8172 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8173 Int32 *in = (Int32 *)inBuffer;
\r
8174 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8175 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8176 for (j=0; j<info.channels; j++) {
\r
8177 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8178 out[info.outOffset[j]] += 0.5;
\r
8179 out[info.outOffset[j]] *= scale;
\r
8181 in += info.inJump;
\r
8182 out += info.outJump;
\r
8185 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8186 // Channel compensation and/or (de)interleaving only.
\r
8187 Float32 *in = (Float32 *)inBuffer;
\r
8188 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8189 for (j=0; j<info.channels; j++) {
\r
8190 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8192 in += info.inJump;
\r
8193 out += info.outJump;
\r
8196 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8197 Float64 *in = (Float64 *)inBuffer;
\r
8198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8199 for (j=0; j<info.channels; j++) {
\r
8200 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8202 in += info.inJump;
\r
8203 out += info.outJump;
\r
8207 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8208 Int32 *out = (Int32 *)outBuffer;
\r
8209 if (info.inFormat == RTAUDIO_SINT8) {
\r
8210 signed char *in = (signed char *)inBuffer;
\r
8211 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8212 for (j=0; j<info.channels; j++) {
\r
8213 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8214 out[info.outOffset[j]] <<= 24;
\r
8216 in += info.inJump;
\r
8217 out += info.outJump;
\r
8220 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8221 Int16 *in = (Int16 *)inBuffer;
\r
8222 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8223 for (j=0; j<info.channels; j++) {
\r
8224 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8225 out[info.outOffset[j]] <<= 16;
\r
8227 in += info.inJump;
\r
8228 out += info.outJump;
\r
8231 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8232 Int24 *in = (Int24 *)inBuffer;
\r
8233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8234 for (j=0; j<info.channels; j++) {
\r
8235 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8236 out[info.outOffset[j]] <<= 8;
\r
8238 in += info.inJump;
\r
8239 out += info.outJump;
\r
8242 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8243 // Channel compensation and/or (de)interleaving only.
\r
8244 Int32 *in = (Int32 *)inBuffer;
\r
8245 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8246 for (j=0; j<info.channels; j++) {
\r
8247 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8249 in += info.inJump;
\r
8250 out += info.outJump;
\r
8253 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8254 Float32 *in = (Float32 *)inBuffer;
\r
8255 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8256 for (j=0; j<info.channels; j++) {
\r
8257 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8259 in += info.inJump;
\r
8260 out += info.outJump;
\r
8263 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8264 Float64 *in = (Float64 *)inBuffer;
\r
8265 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8266 for (j=0; j<info.channels; j++) {
\r
8267 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8269 in += info.inJump;
\r
8270 out += info.outJump;
\r
8274 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8275 Int24 *out = (Int24 *)outBuffer;
\r
8276 if (info.inFormat == RTAUDIO_SINT8) {
\r
8277 signed char *in = (signed char *)inBuffer;
\r
8278 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8279 for (j=0; j<info.channels; j++) {
\r
8280 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8281 //out[info.outOffset[j]] <<= 16;
\r
8283 in += info.inJump;
\r
8284 out += info.outJump;
\r
8287 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8288 Int16 *in = (Int16 *)inBuffer;
\r
8289 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8290 for (j=0; j<info.channels; j++) {
\r
8291 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8292 //out[info.outOffset[j]] <<= 8;
\r
8294 in += info.inJump;
\r
8295 out += info.outJump;
\r
8298 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8299 // Channel compensation and/or (de)interleaving only.
\r
8300 Int24 *in = (Int24 *)inBuffer;
\r
8301 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8302 for (j=0; j<info.channels; j++) {
\r
8303 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8305 in += info.inJump;
\r
8306 out += info.outJump;
\r
8309 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8310 Int32 *in = (Int32 *)inBuffer;
\r
8311 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8312 for (j=0; j<info.channels; j++) {
\r
8313 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8314 //out[info.outOffset[j]] >>= 8;
\r
8316 in += info.inJump;
\r
8317 out += info.outJump;
\r
8320 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8321 Float32 *in = (Float32 *)inBuffer;
\r
8322 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8323 for (j=0; j<info.channels; j++) {
\r
8324 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8326 in += info.inJump;
\r
8327 out += info.outJump;
\r
8330 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8331 Float64 *in = (Float64 *)inBuffer;
\r
8332 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8333 for (j=0; j<info.channels; j++) {
\r
8334 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8336 in += info.inJump;
\r
8337 out += info.outJump;
\r
8341 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8342 Int16 *out = (Int16 *)outBuffer;
\r
8343 if (info.inFormat == RTAUDIO_SINT8) {
\r
8344 signed char *in = (signed char *)inBuffer;
\r
8345 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8346 for (j=0; j<info.channels; j++) {
\r
8347 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8348 out[info.outOffset[j]] <<= 8;
\r
8350 in += info.inJump;
\r
8351 out += info.outJump;
\r
8354 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8355 // Channel compensation and/or (de)interleaving only.
\r
8356 Int16 *in = (Int16 *)inBuffer;
\r
8357 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8358 for (j=0; j<info.channels; j++) {
\r
8359 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8361 in += info.inJump;
\r
8362 out += info.outJump;
\r
8365 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8366 Int24 *in = (Int24 *)inBuffer;
\r
8367 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8368 for (j=0; j<info.channels; j++) {
\r
8369 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8371 in += info.inJump;
\r
8372 out += info.outJump;
\r
8375 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8376 Int32 *in = (Int32 *)inBuffer;
\r
8377 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8378 for (j=0; j<info.channels; j++) {
\r
8379 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8381 in += info.inJump;
\r
8382 out += info.outJump;
\r
8385 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8386 Float32 *in = (Float32 *)inBuffer;
\r
8387 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8388 for (j=0; j<info.channels; j++) {
\r
8389 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8391 in += info.inJump;
\r
8392 out += info.outJump;
\r
8395 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8396 Float64 *in = (Float64 *)inBuffer;
\r
8397 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8398 for (j=0; j<info.channels; j++) {
\r
8399 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8401 in += info.inJump;
\r
8402 out += info.outJump;
\r
8406 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8407 signed char *out = (signed char *)outBuffer;
\r
8408 if (info.inFormat == RTAUDIO_SINT8) {
\r
8409 // Channel compensation and/or (de)interleaving only.
\r
8410 signed char *in = (signed char *)inBuffer;
\r
8411 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8412 for (j=0; j<info.channels; j++) {
\r
8413 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8415 in += info.inJump;
\r
8416 out += info.outJump;
\r
8419 if (info.inFormat == RTAUDIO_SINT16) {
\r
8420 Int16 *in = (Int16 *)inBuffer;
\r
8421 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8422 for (j=0; j<info.channels; j++) {
\r
8423 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8425 in += info.inJump;
\r
8426 out += info.outJump;
\r
8429 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8430 Int24 *in = (Int24 *)inBuffer;
\r
8431 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8432 for (j=0; j<info.channels; j++) {
\r
8433 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8435 in += info.inJump;
\r
8436 out += info.outJump;
\r
8439 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8440 Int32 *in = (Int32 *)inBuffer;
\r
8441 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8442 for (j=0; j<info.channels; j++) {
\r
8443 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8445 in += info.inJump;
\r
8446 out += info.outJump;
\r
8449 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8450 Float32 *in = (Float32 *)inBuffer;
\r
8451 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8452 for (j=0; j<info.channels; j++) {
\r
8453 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8455 in += info.inJump;
\r
8456 out += info.outJump;
\r
8459 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8460 Float64 *in = (Float64 *)inBuffer;
\r
8461 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8462 for (j=0; j<info.channels; j++) {
\r
8463 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8465 in += info.inJump;
\r
8466 out += info.outJump;
\r
8472 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8473 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8474 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8476 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8478 register char val;
\r
8479 register char *ptr;
\r
8482 if ( format == RTAUDIO_SINT16 ) {
\r
8483 for ( unsigned int i=0; i<samples; i++ ) {
\r
8484 // Swap 1st and 2nd bytes.
\r
8486 *(ptr) = *(ptr+1);
\r
8489 // Increment 2 bytes.
\r
8493 else if ( format == RTAUDIO_SINT32 ||
\r
8494 format == RTAUDIO_FLOAT32 ) {
\r
8495 for ( unsigned int i=0; i<samples; i++ ) {
\r
8496 // Swap 1st and 4th bytes.
\r
8498 *(ptr) = *(ptr+3);
\r
8501 // Swap 2nd and 3rd bytes.
\r
8504 *(ptr) = *(ptr+1);
\r
8507 // Increment 3 more bytes.
\r
8511 else if ( format == RTAUDIO_SINT24 ) {
\r
8512 for ( unsigned int i=0; i<samples; i++ ) {
\r
8513 // Swap 1st and 3rd bytes.
\r
8515 *(ptr) = *(ptr+2);
\r
8518 // Increment 2 more bytes.
\r
8522 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8523 for ( unsigned int i=0; i<samples; i++ ) {
\r
8524 // Swap 1st and 8th bytes
\r
8526 *(ptr) = *(ptr+7);
\r
8529 // Swap 2nd and 7th bytes
\r
8532 *(ptr) = *(ptr+5);
\r
8535 // Swap 3rd and 6th bytes
\r
8538 *(ptr) = *(ptr+3);
\r
8541 // Swap 4th and 5th bytes
\r
8544 *(ptr) = *(ptr+1);
\r
8547 // Increment 5 more bytes.
\r
8553 // Indentation settings for Vim and Emacs
\r
8555 // Local Variables:
\r
8556 // c-basic-offset: 2
\r
8557 // indent-tabs-mode: nil
\r
8560 // vim: et sts=2 sw=2
\r