1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2011 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.10
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_OSS__)
\r
91 apis.push_back( LINUX_OSS );
\r
93 #if defined(__WINDOWS_ASIO__)
\r
94 apis.push_back( WINDOWS_ASIO );
\r
96 #if defined(__WINDOWS_DS__)
\r
97 apis.push_back( WINDOWS_DS );
\r
99 #if defined(__MACOSX_CORE__)
\r
100 apis.push_back( MACOSX_CORE );
\r
102 #if defined(__RTAUDIO_DUMMY__)
\r
103 apis.push_back( RTAUDIO_DUMMY );
\r
107 void RtAudio :: openRtApi( RtAudio::Api api )
\r
113 #if defined(__UNIX_JACK__)
\r
114 if ( api == UNIX_JACK )
\r
115 rtapi_ = new RtApiJack();
\r
117 #if defined(__LINUX_ALSA__)
\r
118 if ( api == LINUX_ALSA )
\r
119 rtapi_ = new RtApiAlsa();
\r
121 #if defined(__LINUX_OSS__)
\r
122 if ( api == LINUX_OSS )
\r
123 rtapi_ = new RtApiOss();
\r
125 #if defined(__WINDOWS_ASIO__)
\r
126 if ( api == WINDOWS_ASIO )
\r
127 rtapi_ = new RtApiAsio();
\r
129 #if defined(__WINDOWS_DS__)
\r
130 if ( api == WINDOWS_DS )
\r
131 rtapi_ = new RtApiDs();
\r
133 #if defined(__MACOSX_CORE__)
\r
134 if ( api == MACOSX_CORE )
\r
135 rtapi_ = new RtApiCore();
\r
137 #if defined(__RTAUDIO_DUMMY__)
\r
138 if ( api == RTAUDIO_DUMMY )
\r
139 rtapi_ = new RtApiDummy();
\r
143 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
147 if ( api != UNSPECIFIED ) {
\r
148 // Attempt to open the specified API.
\r
150 if ( rtapi_ ) return;
\r
152 // No compiled support for specified API value. Issue a debug
\r
153 // warning and continue as if no API was specified.
\r
154 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
157 // Iterate through the compiled APIs and return as soon as we find
\r
158 // one with at least one device or we reach the end of the list.
\r
159 std::vector< RtAudio::Api > apis;
\r
160 getCompiledApi( apis );
\r
161 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
162 openRtApi( apis[i] );
\r
163 if ( rtapi_->getDeviceCount() ) break;
\r
166 if ( rtapi_ ) return;
\r
168 // It should not be possible to get here because the preprocessor
\r
169 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
170 // API-specific definitions are passed to the compiler. But just in
\r
171 // case something weird happens, we'll print out an error message.
\r
172 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
175 RtAudio :: ~RtAudio() throw()
\r
180 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
181 RtAudio::StreamParameters *inputParameters,
\r
182 RtAudioFormat format, unsigned int sampleRate,
\r
183 unsigned int *bufferFrames,
\r
184 RtAudioCallback callback, void *userData,
\r
185 RtAudio::StreamOptions *options )
\r
187 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
188 sampleRate, bufferFrames, callback,
\r
189 userData, options );
\r
192 // *************************************************** //
\r
194 // Public RtApi definitions (see end of file for
\r
195 // private or protected utility functions).
\r
197 // *************************************************** //
\r
201 stream_.state = STREAM_CLOSED;
\r
202 stream_.mode = UNINITIALIZED;
\r
203 stream_.apiHandle = 0;
\r
204 stream_.userBuffer[0] = 0;
\r
205 stream_.userBuffer[1] = 0;
\r
206 MUTEX_INITIALIZE( &stream_.mutex );
\r
207 showWarnings_ = true;
\r
212 MUTEX_DESTROY( &stream_.mutex );
\r
215 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
216 RtAudio::StreamParameters *iParams,
\r
217 RtAudioFormat format, unsigned int sampleRate,
\r
218 unsigned int *bufferFrames,
\r
219 RtAudioCallback callback, void *userData,
\r
220 RtAudio::StreamOptions *options )
\r
222 if ( stream_.state != STREAM_CLOSED ) {
\r
223 errorText_ = "RtApi::openStream: a stream is already open!";
\r
224 error( RtError::INVALID_USE );
\r
227 if ( oParams && oParams->nChannels < 1 ) {
\r
228 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
229 error( RtError::INVALID_USE );
\r
232 if ( iParams && iParams->nChannels < 1 ) {
\r
233 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
234 error( RtError::INVALID_USE );
\r
237 if ( oParams == NULL && iParams == NULL ) {
\r
238 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
239 error( RtError::INVALID_USE );
\r
242 if ( formatBytes(format) == 0 ) {
\r
243 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
244 error( RtError::INVALID_USE );
\r
247 unsigned int nDevices = getDeviceCount();
\r
248 unsigned int oChannels = 0;
\r
250 oChannels = oParams->nChannels;
\r
251 if ( oParams->deviceId >= nDevices ) {
\r
252 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
253 error( RtError::INVALID_USE );
\r
257 unsigned int iChannels = 0;
\r
259 iChannels = iParams->nChannels;
\r
260 if ( iParams->deviceId >= nDevices ) {
\r
261 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
262 error( RtError::INVALID_USE );
\r
269 if ( oChannels > 0 ) {
\r
271 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
272 sampleRate, format, bufferFrames, options );
\r
273 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
276 if ( iChannels > 0 ) {
\r
278 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
279 sampleRate, format, bufferFrames, options );
\r
280 if ( result == false ) {
\r
281 if ( oChannels > 0 ) closeStream();
\r
282 error( RtError::SYSTEM_ERROR );
\r
286 stream_.callbackInfo.callback = (void *) callback;
\r
287 stream_.callbackInfo.userData = userData;
\r
289 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
290 stream_.state = STREAM_STOPPED;
\r
293 unsigned int RtApi :: getDefaultInputDevice( void )
\r
295 // Should be implemented in subclasses if possible.
\r
299 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
301 // Should be implemented in subclasses if possible.
\r
305 void RtApi :: closeStream( void )
\r
307 // MUST be implemented in subclasses!
\r
311 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
312 unsigned int firstChannel, unsigned int sampleRate,
\r
313 RtAudioFormat format, unsigned int *bufferSize,
\r
314 RtAudio::StreamOptions *options )
\r
316 // MUST be implemented in subclasses!
\r
320 void RtApi :: tickStreamTime( void )
\r
322 // Subclasses that do not provide their own implementation of
\r
323 // getStreamTime should call this function once per buffer I/O to
\r
324 // provide basic stream time support.
\r
326 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
328 #if defined( HAVE_GETTIMEOFDAY )
\r
329 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
333 long RtApi :: getStreamLatency( void )
\r
337 long totalLatency = 0;
\r
338 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
339 totalLatency = stream_.latency[0];
\r
340 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
341 totalLatency += stream_.latency[1];
\r
343 return totalLatency;
\r
346 double RtApi :: getStreamTime( void )
\r
350 #if defined( HAVE_GETTIMEOFDAY )
\r
351 // Return a very accurate estimate of the stream time by
\r
352 // adding in the elapsed time since the last tick.
\r
353 struct timeval then;
\r
354 struct timeval now;
\r
356 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
357 return stream_.streamTime;
\r
359 gettimeofday( &now, NULL );
\r
360 then = stream_.lastTickTimestamp;
\r
361 return stream_.streamTime +
\r
362 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
363 (then.tv_sec + 0.000001 * then.tv_usec));
\r
365 return stream_.streamTime;
\r
369 unsigned int RtApi :: getStreamSampleRate( void )
\r
373 return stream_.sampleRate;
\r
377 // *************************************************** //
\r
379 // OS/API-specific methods.
\r
381 // *************************************************** //
\r
383 #if defined(__MACOSX_CORE__)
\r
385 // The OS X CoreAudio API is designed to use a separate callback
\r
386 // procedure for each of its audio devices. A single RtAudio duplex
\r
387 // stream using two different devices is supported here, though it
\r
388 // cannot be guaranteed to always behave correctly because we cannot
\r
389 // synchronize these two callbacks.
\r
391 // A property listener is installed for over/underrun information.
\r
392 // However, no functionality is currently provided to allow property
\r
393 // listeners to trigger user handlers because it is unclear what could
\r
394 // be done if a critical stream parameter (buffer size, sample rate,
\r
395 // device disconnect) notification arrived. The listeners entail
\r
396 // quite a bit of extra code and most likely, a user program wouldn't
\r
397 // be prepared for the result anyway. However, we do provide a flag
\r
398 // to the client callback function to inform of an over/underrun.
\r
400 // A structure to hold various information related to the CoreAudio API
\r
402 struct CoreHandle {
\r
403 AudioDeviceID id[2]; // device ids
\r
404 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
405 AudioDeviceIOProcID procId[2];
\r
407 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
408 UInt32 nStreams[2]; // number of streams to use
\r
410 char *deviceBuffer;
\r
411 pthread_cond_t condition;
\r
412 int drainCounter; // Tracks callback counts when draining
\r
413 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
416 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
419 RtApiCore:: RtApiCore()
\r
421 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
422 // This is a largely undocumented but absolutely necessary
\r
423 // requirement starting with OS-X 10.6. If not called, queries and
\r
424 // updates to various audio device properties are not handled
\r
426 CFRunLoopRef theRunLoop = NULL;
\r
427 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
428 kAudioObjectPropertyScopeGlobal,
\r
429 kAudioObjectPropertyElementMaster };
\r
430 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
431 if ( result != noErr ) {
\r
432 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
433 error( RtError::WARNING );
\r
438 RtApiCore :: ~RtApiCore()
\r
440 // The subclass destructor gets called before the base class
\r
441 // destructor, so close an existing stream before deallocating
\r
442 // apiDeviceId memory.
\r
443 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
446 unsigned int RtApiCore :: getDeviceCount( void )
\r
448 // Find out how many audio devices there are, if any.
\r
450 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
451 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
452 if ( result != noErr ) {
\r
453 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
454 error( RtError::WARNING );
\r
458 return dataSize / sizeof( AudioDeviceID );
\r
461 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
463 unsigned int nDevices = getDeviceCount();
\r
464 if ( nDevices <= 1 ) return 0;
\r
467 UInt32 dataSize = sizeof( AudioDeviceID );
\r
468 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
469 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
470 if ( result != noErr ) {
\r
471 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
472 error( RtError::WARNING );
\r
476 dataSize *= nDevices;
\r
477 AudioDeviceID deviceList[ nDevices ];
\r
478 property.mSelector = kAudioHardwarePropertyDevices;
\r
479 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
480 if ( result != noErr ) {
\r
481 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
482 error( RtError::WARNING );
\r
486 for ( unsigned int i=0; i<nDevices; i++ )
\r
487 if ( id == deviceList[i] ) return i;
\r
489 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
490 error( RtError::WARNING );
\r
494 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
496 unsigned int nDevices = getDeviceCount();
\r
497 if ( nDevices <= 1 ) return 0;
\r
500 UInt32 dataSize = sizeof( AudioDeviceID );
\r
501 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
502 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
503 if ( result != noErr ) {
\r
504 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
505 error( RtError::WARNING );
\r
509 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
510 AudioDeviceID deviceList[ nDevices ];
\r
511 property.mSelector = kAudioHardwarePropertyDevices;
\r
512 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
513 if ( result != noErr ) {
\r
514 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
515 error( RtError::WARNING );
\r
519 for ( unsigned int i=0; i<nDevices; i++ )
\r
520 if ( id == deviceList[i] ) return i;
\r
522 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
523 error( RtError::WARNING );
\r
527 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
529 RtAudio::DeviceInfo info;
\r
530 info.probed = false;
\r
533 unsigned int nDevices = getDeviceCount();
\r
534 if ( nDevices == 0 ) {
\r
535 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
536 error( RtError::INVALID_USE );
\r
539 if ( device >= nDevices ) {
\r
540 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
541 error( RtError::INVALID_USE );
\r
544 AudioDeviceID deviceList[ nDevices ];
\r
545 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
546 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
547 kAudioObjectPropertyScopeGlobal,
\r
548 kAudioObjectPropertyElementMaster };
\r
549 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
550 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
553 error( RtError::WARNING );
\r
557 AudioDeviceID id = deviceList[ device ];
\r
559 // Get the device name.
\r
561 CFStringRef cfname;
\r
562 dataSize = sizeof( CFStringRef );
\r
563 property.mSelector = kAudioObjectPropertyManufacturer;
\r
564 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
565 if ( result != noErr ) {
\r
566 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
567 errorText_ = errorStream_.str();
\r
568 error( RtError::WARNING );
\r
572 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
573 int length = CFStringGetLength(cfname);
\r
574 char *mname = (char *)malloc(length * 3 + 1);
\r
575 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
576 info.name.append( (const char *)mname, strlen(mname) );
\r
577 info.name.append( ": " );
\r
578 CFRelease( cfname );
\r
581 property.mSelector = kAudioObjectPropertyName;
\r
582 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
583 if ( result != noErr ) {
\r
584 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
585 errorText_ = errorStream_.str();
\r
586 error( RtError::WARNING );
\r
590 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
591 length = CFStringGetLength(cfname);
\r
592 char *name = (char *)malloc(length * 3 + 1);
\r
593 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
594 info.name.append( (const char *)name, strlen(name) );
\r
595 CFRelease( cfname );
\r
598 // Get the output stream "configuration".
\r
599 AudioBufferList *bufferList = nil;
\r
600 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
601 property.mScope = kAudioDevicePropertyScopeOutput;
\r
602 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
604 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
605 if ( result != noErr || dataSize == 0 ) {
\r
606 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtError::WARNING );
\r
612 // Allocate the AudioBufferList.
\r
613 bufferList = (AudioBufferList *) malloc( dataSize );
\r
614 if ( bufferList == NULL ) {
\r
615 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
616 error( RtError::WARNING );
\r
620 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
621 if ( result != noErr || dataSize == 0 ) {
\r
622 free( bufferList );
\r
623 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
624 errorText_ = errorStream_.str();
\r
625 error( RtError::WARNING );
\r
629 // Get output channel information.
\r
630 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
631 for ( i=0; i<nStreams; i++ )
\r
632 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
633 free( bufferList );
\r
635 // Get the input stream "configuration".
\r
636 property.mScope = kAudioDevicePropertyScopeInput;
\r
637 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
638 if ( result != noErr || dataSize == 0 ) {
\r
639 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
640 errorText_ = errorStream_.str();
\r
641 error( RtError::WARNING );
\r
645 // Allocate the AudioBufferList.
\r
646 bufferList = (AudioBufferList *) malloc( dataSize );
\r
647 if ( bufferList == NULL ) {
\r
648 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
649 error( RtError::WARNING );
\r
653 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
654 if (result != noErr || dataSize == 0) {
\r
655 free( bufferList );
\r
656 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
657 errorText_ = errorStream_.str();
\r
658 error( RtError::WARNING );
\r
662 // Get input channel information.
\r
663 nStreams = bufferList->mNumberBuffers;
\r
664 for ( i=0; i<nStreams; i++ )
\r
665 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
666 free( bufferList );
\r
668 // If device opens for both playback and capture, we determine the channels.
\r
669 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
670 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
672 // Probe the device sample rates.
\r
673 bool isInput = false;
\r
674 if ( info.outputChannels == 0 ) isInput = true;
\r
676 // Determine the supported sample rates.
\r
677 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
678 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
679 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
680 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
681 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
682 errorText_ = errorStream_.str();
\r
683 error( RtError::WARNING );
\r
687 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
688 AudioValueRange rangeList[ nRanges ];
\r
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
690 if ( result != kAudioHardwareNoError ) {
\r
691 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
692 errorText_ = errorStream_.str();
\r
693 error( RtError::WARNING );
\r
697 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
698 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
699 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
700 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
703 info.sampleRates.clear();
\r
704 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
705 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
706 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
709 if ( info.sampleRates.size() == 0 ) {
\r
710 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
711 errorText_ = errorStream_.str();
\r
712 error( RtError::WARNING );
\r
716 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
717 // Thus, any other "physical" formats supported by the device are of
\r
718 // no interest to the client.
\r
719 info.nativeFormats = RTAUDIO_FLOAT32;
\r
721 if ( info.outputChannels > 0 )
\r
722 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
723 if ( info.inputChannels > 0 )
\r
724 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
726 info.probed = true;
\r
730 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
731 const AudioTimeStamp* inNow,
\r
732 const AudioBufferList* inInputData,
\r
733 const AudioTimeStamp* inInputTime,
\r
734 AudioBufferList* outOutputData,
\r
735 const AudioTimeStamp* inOutputTime,
\r
736 void* infoPointer )
\r
738 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
740 RtApiCore *object = (RtApiCore *) info->object;
\r
741 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
742 return kAudioHardwareUnspecifiedError;
\r
744 return kAudioHardwareNoError;
\r
747 OSStatus xrunListener( AudioObjectID inDevice,
\r
749 const AudioObjectPropertyAddress properties[],
\r
750 void* handlePointer )
\r
752 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
753 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
754 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
755 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
756 handle->xrun[1] = true;
\r
758 handle->xrun[0] = true;
\r
762 return kAudioHardwareNoError;
\r
765 OSStatus rateListener( AudioObjectID inDevice,
\r
767 const AudioObjectPropertyAddress properties[],
\r
768 void* ratePointer )
\r
771 Float64 *rate = (Float64 *) ratePointer;
\r
772 UInt32 dataSize = sizeof( Float64 );
\r
773 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
774 kAudioObjectPropertyScopeGlobal,
\r
775 kAudioObjectPropertyElementMaster };
\r
776 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
777 return kAudioHardwareNoError;
\r
780 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
781 unsigned int firstChannel, unsigned int sampleRate,
\r
782 RtAudioFormat format, unsigned int *bufferSize,
\r
783 RtAudio::StreamOptions *options )
\r
786 unsigned int nDevices = getDeviceCount();
\r
787 if ( nDevices == 0 ) {
\r
788 // This should not happen because a check is made before this function is called.
\r
789 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
793 if ( device >= nDevices ) {
\r
794 // This should not happen because a check is made before this function is called.
\r
795 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
799 AudioDeviceID deviceList[ nDevices ];
\r
800 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
801 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
802 kAudioObjectPropertyScopeGlobal,
\r
803 kAudioObjectPropertyElementMaster };
\r
804 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
805 0, NULL, &dataSize, (void *) &deviceList );
\r
806 if ( result != noErr ) {
\r
807 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
811 AudioDeviceID id = deviceList[ device ];
\r
813 // Setup for stream mode.
\r
814 bool isInput = false;
\r
815 if ( mode == INPUT ) {
\r
817 property.mScope = kAudioDevicePropertyScopeInput;
\r
820 property.mScope = kAudioDevicePropertyScopeOutput;
\r
822 // Get the stream "configuration".
\r
823 AudioBufferList *bufferList = nil;
\r
825 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
826 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
827 if ( result != noErr || dataSize == 0 ) {
\r
828 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
829 errorText_ = errorStream_.str();
\r
833 // Allocate the AudioBufferList.
\r
834 bufferList = (AudioBufferList *) malloc( dataSize );
\r
835 if ( bufferList == NULL ) {
\r
836 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
840 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
841 if (result != noErr || dataSize == 0) {
\r
842 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
843 errorText_ = errorStream_.str();
\r
847 // Search for one or more streams that contain the desired number of
\r
848 // channels. CoreAudio devices can have an arbitrary number of
\r
849 // streams and each stream can have an arbitrary number of channels.
\r
850 // For each stream, a single buffer of interleaved samples is
\r
851 // provided. RtAudio prefers the use of one stream of interleaved
\r
852 // data or multiple consecutive single-channel streams. However, we
\r
853 // now support multiple consecutive multi-channel streams of
\r
854 // interleaved data as well.
\r
855 UInt32 iStream, offsetCounter = firstChannel;
\r
856 UInt32 nStreams = bufferList->mNumberBuffers;
\r
857 bool monoMode = false;
\r
858 bool foundStream = false;
\r
860 // First check that the device supports the requested number of
\r
862 UInt32 deviceChannels = 0;
\r
863 for ( iStream=0; iStream<nStreams; iStream++ )
\r
864 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
866 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
867 free( bufferList );
\r
868 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
869 errorText_ = errorStream_.str();
\r
873 // Look for a single stream meeting our needs.
\r
874 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
875 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
876 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
877 if ( streamChannels >= channels + offsetCounter ) {
\r
878 firstStream = iStream;
\r
879 channelOffset = offsetCounter;
\r
880 foundStream = true;
\r
883 if ( streamChannels > offsetCounter ) break;
\r
884 offsetCounter -= streamChannels;
\r
887 // If we didn't find a single stream above, then we should be able
\r
888 // to meet the channel specification with multiple streams.
\r
889 if ( foundStream == false ) {
\r
891 offsetCounter = firstChannel;
\r
892 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
893 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
894 if ( streamChannels > offsetCounter ) break;
\r
895 offsetCounter -= streamChannels;
\r
898 firstStream = iStream;
\r
899 channelOffset = offsetCounter;
\r
900 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
902 if ( streamChannels > 1 ) monoMode = false;
\r
903 while ( channelCounter > 0 ) {
\r
904 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
905 if ( streamChannels > 1 ) monoMode = false;
\r
906 channelCounter -= streamChannels;
\r
911 free( bufferList );
\r
913 // Determine the buffer size.
\r
914 AudioValueRange bufferRange;
\r
915 dataSize = sizeof( AudioValueRange );
\r
916 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
917 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
919 if ( result != noErr ) {
\r
920 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
921 errorText_ = errorStream_.str();
\r
925 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
926 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
927 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
929 // Set the buffer size. For multiple streams, I'm assuming we only
\r
930 // need to make this setting for the master channel.
\r
931 UInt32 theSize = (UInt32) *bufferSize;
\r
932 dataSize = sizeof( UInt32 );
\r
933 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
934 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
936 if ( result != noErr ) {
\r
937 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
938 errorText_ = errorStream_.str();
\r
942 // If attempting to setup a duplex stream, the bufferSize parameter
\r
943 // MUST be the same in both directions!
\r
944 *bufferSize = theSize;
\r
945 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
946 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
947 errorText_ = errorStream_.str();
\r
951 stream_.bufferSize = *bufferSize;
\r
952 stream_.nBuffers = 1;
\r
954 // Try to set "hog" mode ... it's not clear to me this is working.
\r
955 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
957 dataSize = sizeof( hog_pid );
\r
958 property.mSelector = kAudioDevicePropertyHogMode;
\r
959 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
960 if ( result != noErr ) {
\r
961 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
962 errorText_ = errorStream_.str();
\r
966 if ( hog_pid != getpid() ) {
\r
967 hog_pid = getpid();
\r
968 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
969 if ( result != noErr ) {
\r
970 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
971 errorText_ = errorStream_.str();
\r
977 // Check and if necessary, change the sample rate for the device.
\r
978 Float64 nominalRate;
\r
979 dataSize = sizeof( Float64 );
\r
980 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
981 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
983 if ( result != noErr ) {
\r
984 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
985 errorText_ = errorStream_.str();
\r
989 // Only change the sample rate if off by more than 1 Hz.
\r
990 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
992 // Set a property listener for the sample rate change
\r
993 Float64 reportedRate = 0.0;
\r
994 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
995 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
996 if ( result != noErr ) {
\r
997 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
998 errorText_ = errorStream_.str();
\r
1002 nominalRate = (Float64) sampleRate;
\r
1003 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1005 if ( result != noErr ) {
\r
1006 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1007 errorText_ = errorStream_.str();
\r
1011 // Now wait until the reported nominal rate is what we just set.
\r
1012 UInt32 microCounter = 0;
\r
1013 while ( reportedRate != nominalRate ) {
\r
1014 microCounter += 5000;
\r
1015 if ( microCounter > 5000000 ) break;
\r
1019 // Remove the property listener.
\r
1020 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1022 if ( microCounter > 5000000 ) {
\r
1023 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1024 errorText_ = errorStream_.str();
\r
1029 // Now set the stream format for all streams. Also, check the
\r
1030 // physical format of the device and change that if necessary.
\r
1031 AudioStreamBasicDescription description;
\r
1032 dataSize = sizeof( AudioStreamBasicDescription );
\r
1033 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1034 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1035 if ( result != noErr ) {
\r
1036 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1037 errorText_ = errorStream_.str();
\r
1041 // Set the sample rate and data format id. However, only make the
\r
1042 // change if the sample rate is not within 1.0 of the desired
\r
1043 // rate and the format is not linear pcm.
\r
1044 bool updateFormat = false;
\r
1045 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1046 description.mSampleRate = (Float64) sampleRate;
\r
1047 updateFormat = true;
\r
1050 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1051 description.mFormatID = kAudioFormatLinearPCM;
\r
1052 updateFormat = true;
\r
1055 if ( updateFormat ) {
\r
1056 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1057 if ( result != noErr ) {
\r
1058 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1059 errorText_ = errorStream_.str();
\r
1064 // Now check the physical format.
\r
1065 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1066 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1067 if ( result != noErr ) {
\r
1068 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1069 errorText_ = errorStream_.str();
\r
1073 //std::cout << "Current physical stream format:" << std::endl;
\r
1074 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1075 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1076 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1077 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1079 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1080 description.mFormatID = kAudioFormatLinearPCM;
\r
1081 //description.mSampleRate = (Float64) sampleRate;
\r
1082 AudioStreamBasicDescription testDescription = description;
\r
1083 UInt32 formatFlags;
\r
1085 // We'll try higher bit rates first and then work our way down.
\r
1086 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1087 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1088 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1089 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1090 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1091 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1092 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1093 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1094 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1095 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1096 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1097 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1098 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1100 bool setPhysicalFormat = false;
\r
1101 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1102 testDescription = description;
\r
1103 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1104 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1105 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1106 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1108 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1109 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1110 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1111 if ( result == noErr ) {
\r
1112 setPhysicalFormat = true;
\r
1113 //std::cout << "Updated physical stream format:" << std::endl;
\r
1114 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1115 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1116 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1117 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1122 if ( !setPhysicalFormat ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1127 } // done setting virtual/physical formats.
\r
1129 // Get the stream / device latency.
\r
1131 dataSize = sizeof( UInt32 );
\r
1132 property.mSelector = kAudioDevicePropertyLatency;
\r
1133 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1135 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1137 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1138 errorText_ = errorStream_.str();
\r
1139 error( RtError::WARNING );
\r
1143 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1144 // always be presented in native-endian format, so we should never
\r
1145 // need to byte swap.
\r
1146 stream_.doByteSwap[mode] = false;
\r
1148 // From the CoreAudio documentation, PCM data must be supplied as
\r
1150 stream_.userFormat = format;
\r
1151 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1153 if ( streamCount == 1 )
\r
1154 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1155 else // multiple streams
\r
1156 stream_.nDeviceChannels[mode] = channels;
\r
1157 stream_.nUserChannels[mode] = channels;
\r
1158 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1159 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1160 else stream_.userInterleaved = true;
\r
1161 stream_.deviceInterleaved[mode] = true;
\r
1162 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1164 // Set flags for buffer conversion.
\r
1165 stream_.doConvertBuffer[mode] = false;
\r
1166 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1167 stream_.doConvertBuffer[mode] = true;
\r
1168 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1169 stream_.doConvertBuffer[mode] = true;
\r
1170 if ( streamCount == 1 ) {
\r
1171 if ( stream_.nUserChannels[mode] > 1 &&
\r
1172 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1173 stream_.doConvertBuffer[mode] = true;
\r
1175 else if ( monoMode && stream_.userInterleaved )
\r
1176 stream_.doConvertBuffer[mode] = true;
\r
1178 // Allocate our CoreHandle structure for the stream.
\r
1179 CoreHandle *handle = 0;
\r
1180 if ( stream_.apiHandle == 0 ) {
\r
1182 handle = new CoreHandle;
\r
1184 catch ( std::bad_alloc& ) {
\r
1185 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1189 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1190 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1193 stream_.apiHandle = (void *) handle;
\r
1196 handle = (CoreHandle *) stream_.apiHandle;
\r
1197 handle->iStream[mode] = firstStream;
\r
1198 handle->nStreams[mode] = streamCount;
\r
1199 handle->id[mode] = id;
\r
1201 // Allocate necessary internal buffers.
\r
1202 unsigned long bufferBytes;
\r
1203 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1204 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1205 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1206 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1207 if ( stream_.userBuffer[mode] == NULL ) {
\r
1208 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1212 // If possible, we will make use of the CoreAudio stream buffers as
\r
1213 // "device buffers". However, we can't do this if using multiple
\r
1215 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1217 bool makeBuffer = true;
\r
1218 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1219 if ( mode == INPUT ) {
\r
1220 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1221 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1222 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1226 if ( makeBuffer ) {
\r
1227 bufferBytes *= *bufferSize;
\r
1228 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1229 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1230 if ( stream_.deviceBuffer == NULL ) {
\r
1231 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1237 stream_.sampleRate = sampleRate;
\r
1238 stream_.device[mode] = device;
\r
1239 stream_.state = STREAM_STOPPED;
\r
1240 stream_.callbackInfo.object = (void *) this;
\r
1242 // Setup the buffer conversion information structure.
\r
1243 if ( stream_.doConvertBuffer[mode] ) {
\r
1244 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1245 else setConvertInfo( mode, channelOffset );
\r
1248 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1249 // Only one callback procedure per device.
\r
1250 stream_.mode = DUPLEX;
\r
1252 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1253 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1255 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1256 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1258 if ( result != noErr ) {
\r
1259 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1260 errorText_ = errorStream_.str();
\r
1263 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1264 stream_.mode = DUPLEX;
\r
1266 stream_.mode = mode;
\r
1269 // Setup the device property listener for over/underload.
\r
1270 property.mSelector = kAudioDeviceProcessorOverload;
\r
1271 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1277 pthread_cond_destroy( &handle->condition );
\r
1279 stream_.apiHandle = 0;
\r
1282 for ( int i=0; i<2; i++ ) {
\r
1283 if ( stream_.userBuffer[i] ) {
\r
1284 free( stream_.userBuffer[i] );
\r
1285 stream_.userBuffer[i] = 0;
\r
1289 if ( stream_.deviceBuffer ) {
\r
1290 free( stream_.deviceBuffer );
\r
1291 stream_.deviceBuffer = 0;
\r
1297 void RtApiCore :: closeStream( void )
\r
1299 if ( stream_.state == STREAM_CLOSED ) {
\r
1300 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1301 error( RtError::WARNING );
\r
1305 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1306 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1307 if ( stream_.state == STREAM_RUNNING )
\r
1308 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1309 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1310 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1312 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1313 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1317 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1318 if ( stream_.state == STREAM_RUNNING )
\r
1319 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1320 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1321 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1323 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1324 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1328 for ( int i=0; i<2; i++ ) {
\r
1329 if ( stream_.userBuffer[i] ) {
\r
1330 free( stream_.userBuffer[i] );
\r
1331 stream_.userBuffer[i] = 0;
\r
1335 if ( stream_.deviceBuffer ) {
\r
1336 free( stream_.deviceBuffer );
\r
1337 stream_.deviceBuffer = 0;
\r
1340 // Destroy pthread condition variable.
\r
1341 pthread_cond_destroy( &handle->condition );
\r
1343 stream_.apiHandle = 0;
\r
1345 stream_.mode = UNINITIALIZED;
\r
1346 stream_.state = STREAM_CLOSED;
\r
1349 void RtApiCore :: startStream( void )
\r
1352 if ( stream_.state == STREAM_RUNNING ) {
\r
1353 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1354 error( RtError::WARNING );
\r
1358 MUTEX_LOCK( &stream_.mutex );
\r
1360 OSStatus result = noErr;
\r
1361 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1362 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1364 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1365 if ( result != noErr ) {
\r
1366 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1367 errorText_ = errorStream_.str();
\r
1372 if ( stream_.mode == INPUT ||
\r
1373 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1375 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1376 if ( result != noErr ) {
\r
1377 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1378 errorText_ = errorStream_.str();
\r
1383 handle->drainCounter = 0;
\r
1384 handle->internalDrain = false;
\r
1385 stream_.state = STREAM_RUNNING;
\r
1388 MUTEX_UNLOCK( &stream_.mutex );
\r
1390 if ( result == noErr ) return;
\r
1391 error( RtError::SYSTEM_ERROR );
\r
1394 void RtApiCore :: stopStream( void )
\r
1397 if ( stream_.state == STREAM_STOPPED ) {
\r
1398 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1399 error( RtError::WARNING );
\r
1403 MUTEX_LOCK( &stream_.mutex );
\r
1405 if ( stream_.state == STREAM_STOPPED ) {
\r
1406 MUTEX_UNLOCK( &stream_.mutex );
\r
1410 OSStatus result = noErr;
\r
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1414 if ( handle->drainCounter == 0 ) {
\r
1415 handle->drainCounter = 2;
\r
1416 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1419 MUTEX_UNLOCK( &stream_.mutex );
\r
1420 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1421 MUTEX_LOCK( &stream_.mutex );
\r
1422 if ( result != noErr ) {
\r
1423 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1424 errorText_ = errorStream_.str();
\r
1429 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1431 MUTEX_UNLOCK( &stream_.mutex );
\r
1432 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1433 MUTEX_LOCK( &stream_.mutex );
\r
1434 if ( result != noErr ) {
\r
1435 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1436 errorText_ = errorStream_.str();
\r
1441 stream_.state = STREAM_STOPPED;
\r
1444 MUTEX_UNLOCK( &stream_.mutex );
\r
1446 if ( result == noErr ) return;
\r
1447 error( RtError::SYSTEM_ERROR );
\r
1450 void RtApiCore :: abortStream( void )
\r
1453 if ( stream_.state == STREAM_STOPPED ) {
\r
1454 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1455 error( RtError::WARNING );
\r
1459 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1460 handle->drainCounter = 2;
\r
1465 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1466 const AudioBufferList *inBufferList,
\r
1467 const AudioBufferList *outBufferList )
\r
1469 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
1470 if ( stream_.state == STREAM_CLOSED ) {
\r
1471 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1472 error( RtError::WARNING );
\r
1476 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1477 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1479 // Check if we were draining the stream and signal is finished.
\r
1480 if ( handle->drainCounter > 3 ) {
\r
1481 if ( handle->internalDrain == true )
\r
1483 else // external call to stopStream()
\r
1484 pthread_cond_signal( &handle->condition );
\r
1488 MUTEX_LOCK( &stream_.mutex );
\r
1490 // The state might change while waiting on a mutex.
\r
1491 if ( stream_.state == STREAM_STOPPED ) {
\r
1492 MUTEX_UNLOCK( &stream_.mutex );
\r
1496 AudioDeviceID outputDevice = handle->id[0];
\r
1498 // Invoke user callback to get fresh output data UNLESS we are
\r
1499 // draining stream or duplex mode AND the input/output devices are
\r
1500 // different AND this function is called for the input device.
\r
1501 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1502 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1503 double streamTime = getStreamTime();
\r
1504 RtAudioStreamStatus status = 0;
\r
1505 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1506 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1507 handle->xrun[0] = false;
\r
1509 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1510 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1511 handle->xrun[1] = false;
\r
1514 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1515 stream_.bufferSize, streamTime, status, info->userData );
\r
1516 if ( cbReturnValue == 2 ) {
\r
1517 MUTEX_UNLOCK( &stream_.mutex );
\r
1518 handle->drainCounter = 2;
\r
1522 else if ( cbReturnValue == 1 )
\r
1523 handle->drainCounter = 1;
\r
1524 handle->internalDrain = true;
\r
1527 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1529 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1531 if ( handle->nStreams[0] == 1 ) {
\r
1532 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1534 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1536 else { // fill multiple streams with zeros
\r
1537 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1538 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1540 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1544 else if ( handle->nStreams[0] == 1 ) {
\r
1545 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1546 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1547 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1549 else { // copy from user buffer
\r
1550 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1551 stream_.userBuffer[0],
\r
1552 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1555 else { // fill multiple streams
\r
1556 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1557 if ( stream_.doConvertBuffer[0] ) {
\r
1558 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1559 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1562 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1563 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1564 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1565 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1566 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1569 else { // fill multiple multi-channel streams with interleaved data
\r
1570 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1571 Float32 *out, *in;
\r
1573 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1574 UInt32 inChannels = stream_.nUserChannels[0];
\r
1575 if ( stream_.doConvertBuffer[0] ) {
\r
1576 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1577 inChannels = stream_.nDeviceChannels[0];
\r
1580 if ( inInterleaved ) inOffset = 1;
\r
1581 else inOffset = stream_.bufferSize;
\r
1583 channelsLeft = inChannels;
\r
1584 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1586 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1587 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1590 // Account for possible channel offset in first stream
\r
1591 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1592 streamChannels -= stream_.channelOffset[0];
\r
1593 outJump = stream_.channelOffset[0];
\r
1597 // Account for possible unfilled channels at end of the last stream
\r
1598 if ( streamChannels > channelsLeft ) {
\r
1599 outJump = streamChannels - channelsLeft;
\r
1600 streamChannels = channelsLeft;
\r
1603 // Determine input buffer offsets and skips
\r
1604 if ( inInterleaved ) {
\r
1605 inJump = inChannels;
\r
1606 in += inChannels - channelsLeft;
\r
1610 in += (inChannels - channelsLeft) * inOffset;
\r
1613 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1614 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1615 *out++ = in[j*inOffset];
\r
1620 channelsLeft -= streamChannels;
\r
1625 if ( handle->drainCounter ) {
\r
1626 handle->drainCounter++;
\r
1631 AudioDeviceID inputDevice;
\r
1632 inputDevice = handle->id[1];
\r
1633 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1635 if ( handle->nStreams[1] == 1 ) {
\r
1636 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1637 convertBuffer( stream_.userBuffer[1],
\r
1638 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1639 stream_.convertInfo[1] );
\r
1641 else { // copy to user buffer
\r
1642 memcpy( stream_.userBuffer[1],
\r
1643 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1644 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1647 else { // read from multiple streams
\r
1648 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1649 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1651 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1652 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1653 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1654 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1655 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1658 else { // read from multiple multi-channel streams
\r
1659 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1660 Float32 *out, *in;
\r
1662 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1663 UInt32 outChannels = stream_.nUserChannels[1];
\r
1664 if ( stream_.doConvertBuffer[1] ) {
\r
1665 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1666 outChannels = stream_.nDeviceChannels[1];
\r
1669 if ( outInterleaved ) outOffset = 1;
\r
1670 else outOffset = stream_.bufferSize;
\r
1672 channelsLeft = outChannels;
\r
1673 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1675 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1676 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1679 // Account for possible channel offset in first stream
\r
1680 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1681 streamChannels -= stream_.channelOffset[1];
\r
1682 inJump = stream_.channelOffset[1];
\r
1686 // Account for possible unread channels at end of the last stream
\r
1687 if ( streamChannels > channelsLeft ) {
\r
1688 inJump = streamChannels - channelsLeft;
\r
1689 streamChannels = channelsLeft;
\r
1692 // Determine output buffer offsets and skips
\r
1693 if ( outInterleaved ) {
\r
1694 outJump = outChannels;
\r
1695 out += outChannels - channelsLeft;
\r
1699 out += (outChannels - channelsLeft) * outOffset;
\r
1702 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1703 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1704 out[j*outOffset] = *in++;
\r
1709 channelsLeft -= streamChannels;
\r
1713 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1714 convertBuffer( stream_.userBuffer[1],
\r
1715 stream_.deviceBuffer,
\r
1716 stream_.convertInfo[1] );
\r
1722 MUTEX_UNLOCK( &stream_.mutex );
\r
1724 RtApi::tickStreamTime();
\r
1728 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1732 case kAudioHardwareNotRunningError:
\r
1733 return "kAudioHardwareNotRunningError";
\r
1735 case kAudioHardwareUnspecifiedError:
\r
1736 return "kAudioHardwareUnspecifiedError";
\r
1738 case kAudioHardwareUnknownPropertyError:
\r
1739 return "kAudioHardwareUnknownPropertyError";
\r
1741 case kAudioHardwareBadPropertySizeError:
\r
1742 return "kAudioHardwareBadPropertySizeError";
\r
1744 case kAudioHardwareIllegalOperationError:
\r
1745 return "kAudioHardwareIllegalOperationError";
\r
1747 case kAudioHardwareBadObjectError:
\r
1748 return "kAudioHardwareBadObjectError";
\r
1750 case kAudioHardwareBadDeviceError:
\r
1751 return "kAudioHardwareBadDeviceError";
\r
1753 case kAudioHardwareBadStreamError:
\r
1754 return "kAudioHardwareBadStreamError";
\r
1756 case kAudioHardwareUnsupportedOperationError:
\r
1757 return "kAudioHardwareUnsupportedOperationError";
\r
1759 case kAudioDeviceUnsupportedFormatError:
\r
1760 return "kAudioDeviceUnsupportedFormatError";
\r
1762 case kAudioDevicePermissionsError:
\r
1763 return "kAudioDevicePermissionsError";
\r
1766 return "CoreAudio unknown error";
\r
1770 //******************** End of __MACOSX_CORE__ *********************//
\r
1773 #if defined(__UNIX_JACK__)
\r
1775 // JACK is a low-latency audio server, originally written for the
\r
1776 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1777 // connect a number of different applications to an audio device, as
\r
1778 // well as allowing them to share audio between themselves.
\r
1780 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1781 // have ports connected to the server. The JACK server is typically
\r
1782 // started in a terminal as follows:
\r
1784 // .jackd -d alsa -d hw:0
\r
1786 // or through an interface program such as qjackctl. Many of the
\r
1787 // parameters normally set for a stream are fixed by the JACK server
\r
1788 // and can be specified when the JACK server is started. In
\r
1791 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1793 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1794 // frames, and number of buffers = 4. Once the server is running, it
\r
1795 // is not possible to override these values. If the values are not
\r
1796 // specified in the command-line, the JACK server uses default values.
\r
1798 // The JACK server does not have to be running when an instance of
\r
1799 // RtApiJack is created, though the function getDeviceCount() will
\r
1800 // report 0 devices found until JACK has been started. When no
\r
1801 // devices are available (i.e., the JACK server is not running), a
\r
1802 // stream cannot be opened.
\r
1804 #include <jack/jack.h>
\r
1805 #include <unistd.h>
\r
1808 // A structure to hold various information related to the Jack API
\r
1809 // implementation.
\r
1810 struct JackHandle {
\r
1811 jack_client_t *client;
\r
1812 jack_port_t **ports[2];
\r
1813 std::string deviceName[2];
\r
1815 pthread_cond_t condition;
\r
1816 int drainCounter; // Tracks callback counts when draining
\r
1817 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1820 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1823 ThreadHandle threadId;
\r
1824 void jackSilentError( const char * ) {};
\r
1826 RtApiJack :: RtApiJack()
\r
1828 // Nothing to do here.
\r
1829 #if !defined(__RTAUDIO_DEBUG__)
\r
1830 // Turn off Jack's internal error reporting.
\r
1831 jack_set_error_function( &jackSilentError );
\r
1835 RtApiJack :: ~RtApiJack()
\r
1837 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1840 unsigned int RtApiJack :: getDeviceCount( void )
\r
1842 // See if we can become a jack client.
\r
1843 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1844 jack_status_t *status = NULL;
\r
1845 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1846 if ( client == 0 ) return 0;
\r
1848 const char **ports;
\r
1849 std::string port, previousPort;
\r
1850 unsigned int nChannels = 0, nDevices = 0;
\r
1851 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1853 // Parse the port names up to the first colon (:).
\r
1854 size_t iColon = 0;
\r
1856 port = (char *) ports[ nChannels ];
\r
1857 iColon = port.find(":");
\r
1858 if ( iColon != std::string::npos ) {
\r
1859 port = port.substr( 0, iColon + 1 );
\r
1860 if ( port != previousPort ) {
\r
1862 previousPort = port;
\r
1865 } while ( ports[++nChannels] );
\r
1869 jack_client_close( client );
\r
1873 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1875 RtAudio::DeviceInfo info;
\r
1876 info.probed = false;
\r
1878 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1879 jack_status_t *status = NULL;
\r
1880 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1881 if ( client == 0 ) {
\r
1882 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1883 error( RtError::WARNING );
\r
1887 const char **ports;
\r
1888 std::string port, previousPort;
\r
1889 unsigned int nPorts = 0, nDevices = 0;
\r
1890 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1892 // Parse the port names up to the first colon (:).
\r
1893 size_t iColon = 0;
\r
1895 port = (char *) ports[ nPorts ];
\r
1896 iColon = port.find(":");
\r
1897 if ( iColon != std::string::npos ) {
\r
1898 port = port.substr( 0, iColon );
\r
1899 if ( port != previousPort ) {
\r
1900 if ( nDevices == device ) info.name = port;
\r
1902 previousPort = port;
\r
1905 } while ( ports[++nPorts] );
\r
1909 if ( device >= nDevices ) {
\r
1910 jack_client_close( client );
\r
1911 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1912 error( RtError::INVALID_USE );
\r
1915 // Get the current jack server sample rate.
\r
1916 info.sampleRates.clear();
\r
1917 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1919 // Count the available ports containing the client name as device
\r
1920 // channels. Jack "input ports" equal RtAudio output channels.
\r
1921 unsigned int nChannels = 0;
\r
1922 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1924 while ( ports[ nChannels ] ) nChannels++;
\r
1926 info.outputChannels = nChannels;
\r
1929 // Jack "output ports" equal RtAudio input channels.
\r
1931 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1933 while ( ports[ nChannels ] ) nChannels++;
\r
1935 info.inputChannels = nChannels;
\r
1938 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1939 jack_client_close(client);
\r
1940 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1941 error( RtError::WARNING );
\r
1945 // If device opens for both playback and capture, we determine the channels.
\r
1946 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1947 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1949 // Jack always uses 32-bit floats.
\r
1950 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1952 // Jack doesn't provide default devices so we'll use the first available one.
\r
1953 if ( device == 0 && info.outputChannels > 0 )
\r
1954 info.isDefaultOutput = true;
\r
1955 if ( device == 0 && info.inputChannels > 0 )
\r
1956 info.isDefaultInput = true;
\r
1958 jack_client_close(client);
\r
1959 info.probed = true;
\r
1963 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1965 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1967 RtApiJack *object = (RtApiJack *) info->object;
\r
1968 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1973 // This function will be called by a spawned thread when the Jack
\r
1974 // server signals that it is shutting down. It is necessary to handle
\r
1975 // it this way because the jackShutdown() function must return before
\r
1976 // the jack_deactivate() function (in closeStream()) will return.
\r
1977 extern "C" void *jackCloseStream( void *ptr )
\r
1979 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1980 RtApiJack *object = (RtApiJack *) info->object;
\r
1982 object->closeStream();
\r
1984 pthread_exit( NULL );
\r
1986 void jackShutdown( void *infoPointer )
\r
1988 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1989 RtApiJack *object = (RtApiJack *) info->object;
\r
1991 // Check current stream state. If stopped, then we'll assume this
\r
1992 // was called as a result of a call to RtApiJack::stopStream (the
\r
1993 // deactivation of a client handle causes this function to be called).
\r
1994 // If not, we'll assume the Jack server is shutting down or some
\r
1995 // other problem occurred and we should close the stream.
\r
1996 if ( object->isStreamRunning() == false ) return;
\r
1998 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
1999 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2002 int jackXrun( void *infoPointer )
\r
2004 JackHandle *handle = (JackHandle *) infoPointer;
\r
2006 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2007 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2012 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2013 unsigned int firstChannel, unsigned int sampleRate,
\r
2014 RtAudioFormat format, unsigned int *bufferSize,
\r
2015 RtAudio::StreamOptions *options )
\r
2017 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2019 // Look for jack server and try to become a client (only do once per stream).
\r
2020 jack_client_t *client = 0;
\r
2021 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2022 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2023 jack_status_t *status = NULL;
\r
2024 if ( options && !options->streamName.empty() )
\r
2025 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2027 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2028 if ( client == 0 ) {
\r
2029 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2030 error( RtError::WARNING );
\r
2035 // The handle must have been created on an earlier pass.
\r
2036 client = handle->client;
\r
2039 const char **ports;
\r
2040 std::string port, previousPort, deviceName;
\r
2041 unsigned int nPorts = 0, nDevices = 0;
\r
2042 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2044 // Parse the port names up to the first colon (:).
\r
2045 size_t iColon = 0;
\r
2047 port = (char *) ports[ nPorts ];
\r
2048 iColon = port.find(":");
\r
2049 if ( iColon != std::string::npos ) {
\r
2050 port = port.substr( 0, iColon );
\r
2051 if ( port != previousPort ) {
\r
2052 if ( nDevices == device ) deviceName = port;
\r
2054 previousPort = port;
\r
2057 } while ( ports[++nPorts] );
\r
2061 if ( device >= nDevices ) {
\r
2062 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2066 // Count the available ports containing the client name as device
\r
2067 // channels. Jack "input ports" equal RtAudio output channels.
\r
2068 unsigned int nChannels = 0;
\r
2069 unsigned long flag = JackPortIsInput;
\r
2070 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2071 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2073 while ( ports[ nChannels ] ) nChannels++;
\r
2077 // Compare the jack ports for specified client to the requested number of channels.
\r
2078 if ( nChannels < (channels + firstChannel) ) {
\r
2079 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2080 errorText_ = errorStream_.str();
\r
2084 // Check the jack server sample rate.
\r
2085 unsigned int jackRate = jack_get_sample_rate( client );
\r
2086 if ( sampleRate != jackRate ) {
\r
2087 jack_client_close( client );
\r
2088 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2089 errorText_ = errorStream_.str();
\r
2092 stream_.sampleRate = jackRate;
\r
2094 // Get the latency of the JACK port.
\r
2095 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2096 if ( ports[ firstChannel ] )
\r
2097 stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2100 // The jack server always uses 32-bit floating-point data.
\r
2101 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2102 stream_.userFormat = format;
\r
2104 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2105 else stream_.userInterleaved = true;
\r
2107 // Jack always uses non-interleaved buffers.
\r
2108 stream_.deviceInterleaved[mode] = false;
\r
2110 // Jack always provides host byte-ordered data.
\r
2111 stream_.doByteSwap[mode] = false;
\r
2113 // Get the buffer size. The buffer size and number of buffers
\r
2114 // (periods) is set when the jack server is started.
\r
2115 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2116 *bufferSize = stream_.bufferSize;
\r
2118 stream_.nDeviceChannels[mode] = channels;
\r
2119 stream_.nUserChannels[mode] = channels;
\r
2121 // Set flags for buffer conversion.
\r
2122 stream_.doConvertBuffer[mode] = false;
\r
2123 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2124 stream_.doConvertBuffer[mode] = true;
\r
2125 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2126 stream_.nUserChannels[mode] > 1 )
\r
2127 stream_.doConvertBuffer[mode] = true;
\r
2129 // Allocate our JackHandle structure for the stream.
\r
2130 if ( handle == 0 ) {
\r
2132 handle = new JackHandle;
\r
2134 catch ( std::bad_alloc& ) {
\r
2135 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2139 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2140 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2143 stream_.apiHandle = (void *) handle;
\r
2144 handle->client = client;
\r
2146 handle->deviceName[mode] = deviceName;
\r
2148 // Allocate necessary internal buffers.
\r
2149 unsigned long bufferBytes;
\r
2150 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2151 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2152 if ( stream_.userBuffer[mode] == NULL ) {
\r
2153 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2157 if ( stream_.doConvertBuffer[mode] ) {
\r
2159 bool makeBuffer = true;
\r
2160 if ( mode == OUTPUT )
\r
2161 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2162 else { // mode == INPUT
\r
2163 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2164 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2165 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2166 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2170 if ( makeBuffer ) {
\r
2171 bufferBytes *= *bufferSize;
\r
2172 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2173 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2174 if ( stream_.deviceBuffer == NULL ) {
\r
2175 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2181 // Allocate memory for the Jack ports (channels) identifiers.
\r
2182 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2183 if ( handle->ports[mode] == NULL ) {
\r
2184 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2188 stream_.device[mode] = device;
\r
2189 stream_.channelOffset[mode] = firstChannel;
\r
2190 stream_.state = STREAM_STOPPED;
\r
2191 stream_.callbackInfo.object = (void *) this;
\r
2193 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2194 // We had already set up the stream for output.
\r
2195 stream_.mode = DUPLEX;
\r
2197 stream_.mode = mode;
\r
2198 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2199 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2200 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2203 // Register our ports.
\r
2205 if ( mode == OUTPUT ) {
\r
2206 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2207 snprintf( label, 64, "outport %d", i );
\r
2208 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2209 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2213 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2214 snprintf( label, 64, "inport %d", i );
\r
2215 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2216 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2220 // Setup the buffer conversion information structure. We don't use
\r
2221 // buffers to do channel offsets, so we override that parameter
\r
2223 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2229 pthread_cond_destroy( &handle->condition );
\r
2230 jack_client_close( handle->client );
\r
2232 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2233 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2236 stream_.apiHandle = 0;
\r
2239 for ( int i=0; i<2; i++ ) {
\r
2240 if ( stream_.userBuffer[i] ) {
\r
2241 free( stream_.userBuffer[i] );
\r
2242 stream_.userBuffer[i] = 0;
\r
2246 if ( stream_.deviceBuffer ) {
\r
2247 free( stream_.deviceBuffer );
\r
2248 stream_.deviceBuffer = 0;
\r
2254 void RtApiJack :: closeStream( void )
\r
2256 if ( stream_.state == STREAM_CLOSED ) {
\r
2257 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2258 error( RtError::WARNING );
\r
2262 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2265 if ( stream_.state == STREAM_RUNNING )
\r
2266 jack_deactivate( handle->client );
\r
2268 jack_client_close( handle->client );
\r
2272 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2273 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2274 pthread_cond_destroy( &handle->condition );
\r
2276 stream_.apiHandle = 0;
\r
2279 for ( int i=0; i<2; i++ ) {
\r
2280 if ( stream_.userBuffer[i] ) {
\r
2281 free( stream_.userBuffer[i] );
\r
2282 stream_.userBuffer[i] = 0;
\r
2286 if ( stream_.deviceBuffer ) {
\r
2287 free( stream_.deviceBuffer );
\r
2288 stream_.deviceBuffer = 0;
\r
2291 stream_.mode = UNINITIALIZED;
\r
2292 stream_.state = STREAM_CLOSED;
\r
2295 void RtApiJack :: startStream( void )
\r
2298 if ( stream_.state == STREAM_RUNNING ) {
\r
2299 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2300 error( RtError::WARNING );
\r
2304 MUTEX_LOCK(&stream_.mutex);
\r
2306 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2307 int result = jack_activate( handle->client );
\r
2309 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2313 const char **ports;
\r
2315 // Get the list of available ports.
\r
2316 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2318 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2319 if ( ports == NULL) {
\r
2320 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2324 // Now make the port connections. Since RtAudio wasn't designed to
\r
2325 // allow the user to select particular channels of a device, we'll
\r
2326 // just open the first "nChannels" ports with offset.
\r
2327 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2329 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2330 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2333 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2340 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2342 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2343 if ( ports == NULL) {
\r
2344 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2348 // Now make the port connections. See note above.
\r
2349 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2351 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2352 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2355 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2362 handle->drainCounter = 0;
\r
2363 handle->internalDrain = false;
\r
2364 stream_.state = STREAM_RUNNING;
\r
2367 MUTEX_UNLOCK(&stream_.mutex);
\r
2369 if ( result == 0 ) return;
\r
2370 error( RtError::SYSTEM_ERROR );
\r
2373 void RtApiJack :: stopStream( void )
\r
2376 if ( stream_.state == STREAM_STOPPED ) {
\r
2377 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2378 error( RtError::WARNING );
\r
2382 MUTEX_LOCK( &stream_.mutex );
\r
2384 if ( stream_.state == STREAM_STOPPED ) {
\r
2385 MUTEX_UNLOCK( &stream_.mutex );
\r
2389 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2390 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2392 if ( handle->drainCounter == 0 ) {
\r
2393 handle->drainCounter = 2;
\r
2394 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2398 jack_deactivate( handle->client );
\r
2399 stream_.state = STREAM_STOPPED;
\r
2401 MUTEX_UNLOCK( &stream_.mutex );
\r
2404 void RtApiJack :: abortStream( void )
\r
2407 if ( stream_.state == STREAM_STOPPED ) {
\r
2408 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2409 error( RtError::WARNING );
\r
2413 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2414 handle->drainCounter = 2;
\r
2419 // This function will be called by a spawned thread when the user
\r
2420 // callback function signals that the stream should be stopped or
\r
2421 // aborted. It is necessary to handle it this way because the
\r
2422 // callbackEvent() function must return before the jack_deactivate()
\r
2423 // function will return.
\r
2424 extern "C" void *jackStopStream( void *ptr )
\r
2426 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2427 RtApiJack *object = (RtApiJack *) info->object;
\r
2429 object->stopStream();
\r
2431 pthread_exit( NULL );
\r
2434 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2436 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
2437 if ( stream_.state == STREAM_CLOSED ) {
\r
2438 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2439 error( RtError::WARNING );
\r
2442 if ( stream_.bufferSize != nframes ) {
\r
2443 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2444 error( RtError::WARNING );
\r
2448 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2449 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2451 // Check if we were draining the stream and signal is finished.
\r
2452 if ( handle->drainCounter > 3 ) {
\r
2453 if ( handle->internalDrain == true )
\r
2454 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2456 pthread_cond_signal( &handle->condition );
\r
2460 MUTEX_LOCK( &stream_.mutex );
\r
2462 // The state might change while waiting on a mutex.
\r
2463 if ( stream_.state == STREAM_STOPPED ) {
\r
2464 MUTEX_UNLOCK( &stream_.mutex );
\r
2468 // Invoke user callback first, to get fresh output data.
\r
2469 if ( handle->drainCounter == 0 ) {
\r
2470 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2471 double streamTime = getStreamTime();
\r
2472 RtAudioStreamStatus status = 0;
\r
2473 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2474 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2475 handle->xrun[0] = false;
\r
2477 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2478 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2479 handle->xrun[1] = false;
\r
2481 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2482 stream_.bufferSize, streamTime, status, info->userData );
\r
2483 if ( cbReturnValue == 2 ) {
\r
2484 MUTEX_UNLOCK( &stream_.mutex );
\r
2486 handle->drainCounter = 2;
\r
2487 pthread_create( &id, NULL, jackStopStream, info );
\r
2490 else if ( cbReturnValue == 1 )
\r
2491 handle->drainCounter = 1;
\r
2492 handle->internalDrain = true;
\r
2495 jack_default_audio_sample_t *jackbuffer;
\r
2496 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2497 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2499 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2501 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2502 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2503 memset( jackbuffer, 0, bufferBytes );
\r
2507 else if ( stream_.doConvertBuffer[0] ) {
\r
2509 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2511 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2512 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2513 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2516 else { // no buffer conversion
\r
2517 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2518 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2519 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2523 if ( handle->drainCounter ) {
\r
2524 handle->drainCounter++;
\r
2529 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2531 if ( stream_.doConvertBuffer[1] ) {
\r
2532 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2533 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2534 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2536 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2538 else { // no buffer conversion
\r
2539 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2540 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2541 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2547 MUTEX_UNLOCK(&stream_.mutex);
\r
2549 RtApi::tickStreamTime();
\r
2552 //******************** End of __UNIX_JACK__ *********************//
\r
2555 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2557 // The ASIO API is designed around a callback scheme, so this
\r
2558 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2559 // Jack. The primary constraint with ASIO is that it only allows
\r
2560 // access to a single driver at a time. Thus, it is not possible to
\r
2561 // have more than one simultaneous RtAudio stream.
\r
2563 // This implementation also requires a number of external ASIO files
\r
2564 // and a few global variables. The ASIO callback scheme does not
\r
2565 // allow for the passing of user data, so we must create a global
\r
2566 // pointer to our callbackInfo structure.
\r
2568 // On unix systems, we make use of a pthread condition variable.
\r
2569 // Since there is no equivalent in Windows, I hacked something based
\r
2570 // on information found in
\r
2571 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2573 #include "asiosys.h"
\r
2575 #include "iasiothiscallresolver.h"
\r
2576 #include "asiodrivers.h"
\r
2579 AsioDrivers drivers;
\r
2580 ASIOCallbacks asioCallbacks;
\r
2581 ASIODriverInfo driverInfo;
\r
2582 CallbackInfo *asioCallbackInfo;
\r
2585 struct AsioHandle {
\r
2586 int drainCounter; // Tracks callback counts when draining
\r
2587 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2588 ASIOBufferInfo *bufferInfos;
\r
2592 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2595 // Function declarations (definitions at end of section)
\r
2596 static const char* getAsioErrorString( ASIOError result );
\r
2597 void sampleRateChanged( ASIOSampleRate sRate );
\r
2598 long asioMessages( long selector, long value, void* message, double* opt );
\r
2600 RtApiAsio :: RtApiAsio()
\r
2602 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2603 // CoInitialize beforehand, but it must be for appartment threading
\r
2604 // (in which case, CoInitilialize will return S_FALSE here).
\r
2605 coInitialized_ = false;
\r
2606 HRESULT hr = CoInitialize( NULL );
\r
2607 if ( FAILED(hr) ) {
\r
2608 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2609 error( RtError::WARNING );
\r
2611 coInitialized_ = true;
\r
2613 drivers.removeCurrentDriver();
\r
2614 driverInfo.asioVersion = 2;
\r
2616 // See note in DirectSound implementation about GetDesktopWindow().
\r
2617 driverInfo.sysRef = GetForegroundWindow();
\r
2620 RtApiAsio :: ~RtApiAsio()
\r
2622 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2623 if ( coInitialized_ ) CoUninitialize();
\r
2626 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2628 return (unsigned int) drivers.asioGetNumDev();
\r
2631 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2633 RtAudio::DeviceInfo info;
\r
2634 info.probed = false;
\r
2637 unsigned int nDevices = getDeviceCount();
\r
2638 if ( nDevices == 0 ) {
\r
2639 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2640 error( RtError::INVALID_USE );
\r
2643 if ( device >= nDevices ) {
\r
2644 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2645 error( RtError::INVALID_USE );
\r
2648 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2649 if ( stream_.state != STREAM_CLOSED ) {
\r
2650 if ( device >= devices_.size() ) {
\r
2651 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2652 error( RtError::WARNING );
\r
2655 return devices_[ device ];
\r
2658 char driverName[32];
\r
2659 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2660 if ( result != ASE_OK ) {
\r
2661 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2662 errorText_ = errorStream_.str();
\r
2663 error( RtError::WARNING );
\r
2667 info.name = driverName;
\r
2669 if ( !drivers.loadDriver( driverName ) ) {
\r
2670 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2671 errorText_ = errorStream_.str();
\r
2672 error( RtError::WARNING );
\r
2676 result = ASIOInit( &driverInfo );
\r
2677 if ( result != ASE_OK ) {
\r
2678 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2679 errorText_ = errorStream_.str();
\r
2680 error( RtError::WARNING );
\r
2684 // Determine the device channel information.
\r
2685 long inputChannels, outputChannels;
\r
2686 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2687 if ( result != ASE_OK ) {
\r
2688 drivers.removeCurrentDriver();
\r
2689 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2690 errorText_ = errorStream_.str();
\r
2691 error( RtError::WARNING );
\r
2695 info.outputChannels = outputChannels;
\r
2696 info.inputChannels = inputChannels;
\r
2697 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2698 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2700 // Determine the supported sample rates.
\r
2701 info.sampleRates.clear();
\r
2702 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2703 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2704 if ( result == ASE_OK )
\r
2705 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2708 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2709 ASIOChannelInfo channelInfo;
\r
2710 channelInfo.channel = 0;
\r
2711 channelInfo.isInput = true;
\r
2712 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2713 result = ASIOGetChannelInfo( &channelInfo );
\r
2714 if ( result != ASE_OK ) {
\r
2715 drivers.removeCurrentDriver();
\r
2716 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2717 errorText_ = errorStream_.str();
\r
2718 error( RtError::WARNING );
\r
2722 info.nativeFormats = 0;
\r
2723 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2724 info.nativeFormats |= RTAUDIO_SINT16;
\r
2725 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2726 info.nativeFormats |= RTAUDIO_SINT32;
\r
2727 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2728 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2729 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2730 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2732 if ( info.outputChannels > 0 )
\r
2733 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2734 if ( info.inputChannels > 0 )
\r
2735 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2737 info.probed = true;
\r
2738 drivers.removeCurrentDriver();
\r
2742 void bufferSwitch( long index, ASIOBool processNow )
\r
2744 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2745 object->callbackEvent( index );
\r
2748 void RtApiAsio :: saveDeviceInfo( void )
\r
2752 unsigned int nDevices = getDeviceCount();
\r
2753 devices_.resize( nDevices );
\r
2754 for ( unsigned int i=0; i<nDevices; i++ )
\r
2755 devices_[i] = getDeviceInfo( i );
\r
2758 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2759 unsigned int firstChannel, unsigned int sampleRate,
\r
2760 RtAudioFormat format, unsigned int *bufferSize,
\r
2761 RtAudio::StreamOptions *options )
\r
2763 // For ASIO, a duplex stream MUST use the same driver.
\r
2764 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2765 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2769 char driverName[32];
\r
2770 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2771 if ( result != ASE_OK ) {
\r
2772 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2773 errorText_ = errorStream_.str();
\r
2777 // Only load the driver once for duplex stream.
\r
2778 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2779 // The getDeviceInfo() function will not work when a stream is open
\r
2780 // because ASIO does not allow multiple devices to run at the same
\r
2781 // time. Thus, we'll probe the system before opening a stream and
\r
2782 // save the results for use by getDeviceInfo().
\r
2783 this->saveDeviceInfo();
\r
2785 if ( !drivers.loadDriver( driverName ) ) {
\r
2786 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2787 errorText_ = errorStream_.str();
\r
2791 result = ASIOInit( &driverInfo );
\r
2792 if ( result != ASE_OK ) {
\r
2793 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2794 errorText_ = errorStream_.str();
\r
2799 // Check the device channel count.
\r
2800 long inputChannels, outputChannels;
\r
2801 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2802 if ( result != ASE_OK ) {
\r
2803 drivers.removeCurrentDriver();
\r
2804 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2805 errorText_ = errorStream_.str();
\r
2809 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2810 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2811 drivers.removeCurrentDriver();
\r
2812 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2813 errorText_ = errorStream_.str();
\r
2816 stream_.nDeviceChannels[mode] = channels;
\r
2817 stream_.nUserChannels[mode] = channels;
\r
2818 stream_.channelOffset[mode] = firstChannel;
\r
2820 // Verify the sample rate is supported.
\r
2821 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2822 if ( result != ASE_OK ) {
\r
2823 drivers.removeCurrentDriver();
\r
2824 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2825 errorText_ = errorStream_.str();
\r
2829 // Get the current sample rate
\r
2830 ASIOSampleRate currentRate;
\r
2831 result = ASIOGetSampleRate( ¤tRate );
\r
2832 if ( result != ASE_OK ) {
\r
2833 drivers.removeCurrentDriver();
\r
2834 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2835 errorText_ = errorStream_.str();
\r
2839 // Set the sample rate only if necessary
\r
2840 if ( currentRate != sampleRate ) {
\r
2841 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2842 if ( result != ASE_OK ) {
\r
2843 drivers.removeCurrentDriver();
\r
2844 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2845 errorText_ = errorStream_.str();
\r
2850 // Determine the driver data type.
\r
2851 ASIOChannelInfo channelInfo;
\r
2852 channelInfo.channel = 0;
\r
2853 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2854 else channelInfo.isInput = true;
\r
2855 result = ASIOGetChannelInfo( &channelInfo );
\r
2856 if ( result != ASE_OK ) {
\r
2857 drivers.removeCurrentDriver();
\r
2858 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2859 errorText_ = errorStream_.str();
\r
2863 // Assuming WINDOWS host is always little-endian.
\r
2864 stream_.doByteSwap[mode] = false;
\r
2865 stream_.userFormat = format;
\r
2866 stream_.deviceFormat[mode] = 0;
\r
2867 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2868 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2869 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2871 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2872 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2873 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2875 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2876 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2877 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2879 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2880 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2881 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2884 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2885 drivers.removeCurrentDriver();
\r
2886 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2887 errorText_ = errorStream_.str();
\r
2891 // Set the buffer size. For a duplex stream, this will end up
\r
2892 // setting the buffer size based on the input constraints, which
\r
2894 long minSize, maxSize, preferSize, granularity;
\r
2895 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2896 if ( result != ASE_OK ) {
\r
2897 drivers.removeCurrentDriver();
\r
2898 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2899 errorText_ = errorStream_.str();
\r
2903 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2904 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2905 else if ( granularity == -1 ) {
\r
2906 // Make sure bufferSize is a power of two.
\r
2907 int log2_of_min_size = 0;
\r
2908 int log2_of_max_size = 0;
\r
2910 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2911 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2912 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2915 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2916 int min_delta_num = log2_of_min_size;
\r
2918 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2919 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2920 if (current_delta < min_delta) {
\r
2921 min_delta = current_delta;
\r
2922 min_delta_num = i;
\r
2926 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2927 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2928 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2930 else if ( granularity != 0 ) {
\r
2931 // Set to an even multiple of granularity, rounding up.
\r
2932 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2935 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2936 drivers.removeCurrentDriver();
\r
2937 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2941 stream_.bufferSize = *bufferSize;
\r
2942 stream_.nBuffers = 2;
\r
2944 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2945 else stream_.userInterleaved = true;
\r
2947 // ASIO always uses non-interleaved buffers.
\r
2948 stream_.deviceInterleaved[mode] = false;
\r
2950 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2951 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2952 if ( handle == 0 ) {
\r
2954 handle = new AsioHandle;
\r
2956 catch ( std::bad_alloc& ) {
\r
2957 //if ( handle == NULL ) {
\r
2958 drivers.removeCurrentDriver();
\r
2959 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2962 handle->bufferInfos = 0;
\r
2964 // Create a manual-reset event.
\r
2965 handle->condition = CreateEvent( NULL, // no security
\r
2966 TRUE, // manual-reset
\r
2967 FALSE, // non-signaled initially
\r
2968 NULL ); // unnamed
\r
2969 stream_.apiHandle = (void *) handle;
\r
2972 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2973 // and output separately, we'll have to dispose of previously
\r
2974 // created output buffers for a duplex stream.
\r
2975 long inputLatency, outputLatency;
\r
2976 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2977 ASIODisposeBuffers();
\r
2978 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2981 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2982 bool buffersAllocated = false;
\r
2983 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2984 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2985 if ( handle->bufferInfos == NULL ) {
\r
2986 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2987 errorText_ = errorStream_.str();
\r
2991 ASIOBufferInfo *infos;
\r
2992 infos = handle->bufferInfos;
\r
2993 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2994 infos->isInput = ASIOFalse;
\r
2995 infos->channelNum = i + stream_.channelOffset[0];
\r
2996 infos->buffers[0] = infos->buffers[1] = 0;
\r
2998 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2999 infos->isInput = ASIOTrue;
\r
3000 infos->channelNum = i + stream_.channelOffset[1];
\r
3001 infos->buffers[0] = infos->buffers[1] = 0;
\r
3004 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3005 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3006 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3007 asioCallbacks.asioMessage = &asioMessages;
\r
3008 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3009 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3010 if ( result != ASE_OK ) {
\r
3011 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3012 errorText_ = errorStream_.str();
\r
3015 buffersAllocated = true;
\r
3017 // Set flags for buffer conversion.
\r
3018 stream_.doConvertBuffer[mode] = false;
\r
3019 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3020 stream_.doConvertBuffer[mode] = true;
\r
3021 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3022 stream_.nUserChannels[mode] > 1 )
\r
3023 stream_.doConvertBuffer[mode] = true;
\r
3025 // Allocate necessary internal buffers
\r
3026 unsigned long bufferBytes;
\r
3027 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3028 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3029 if ( stream_.userBuffer[mode] == NULL ) {
\r
3030 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3034 if ( stream_.doConvertBuffer[mode] ) {
\r
3036 bool makeBuffer = true;
\r
3037 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3038 if ( mode == INPUT ) {
\r
3039 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3040 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3041 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3045 if ( makeBuffer ) {
\r
3046 bufferBytes *= *bufferSize;
\r
3047 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3048 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3049 if ( stream_.deviceBuffer == NULL ) {
\r
3050 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3056 stream_.sampleRate = sampleRate;
\r
3057 stream_.device[mode] = device;
\r
3058 stream_.state = STREAM_STOPPED;
\r
3059 asioCallbackInfo = &stream_.callbackInfo;
\r
3060 stream_.callbackInfo.object = (void *) this;
\r
3061 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3062 // We had already set up an output stream.
\r
3063 stream_.mode = DUPLEX;
\r
3065 stream_.mode = mode;
\r
3067 // Determine device latencies
\r
3068 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3069 if ( result != ASE_OK ) {
\r
3070 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3071 errorText_ = errorStream_.str();
\r
3072 error( RtError::WARNING); // warn but don't fail
\r
3075 stream_.latency[0] = outputLatency;
\r
3076 stream_.latency[1] = inputLatency;
\r
3079 // Setup the buffer conversion information structure. We don't use
\r
3080 // buffers to do channel offsets, so we override that parameter
\r
3082 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3087 if ( buffersAllocated )
\r
3088 ASIODisposeBuffers();
\r
3089 drivers.removeCurrentDriver();
\r
3092 CloseHandle( handle->condition );
\r
3093 if ( handle->bufferInfos )
\r
3094 free( handle->bufferInfos );
\r
3096 stream_.apiHandle = 0;
\r
3099 for ( int i=0; i<2; i++ ) {
\r
3100 if ( stream_.userBuffer[i] ) {
\r
3101 free( stream_.userBuffer[i] );
\r
3102 stream_.userBuffer[i] = 0;
\r
3106 if ( stream_.deviceBuffer ) {
\r
3107 free( stream_.deviceBuffer );
\r
3108 stream_.deviceBuffer = 0;
\r
3114 void RtApiAsio :: closeStream()
\r
3116 if ( stream_.state == STREAM_CLOSED ) {
\r
3117 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3118 error( RtError::WARNING );
\r
3122 if ( stream_.state == STREAM_RUNNING ) {
\r
3123 stream_.state = STREAM_STOPPED;
\r
3126 ASIODisposeBuffers();
\r
3127 drivers.removeCurrentDriver();
\r
3129 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3131 CloseHandle( handle->condition );
\r
3132 if ( handle->bufferInfos )
\r
3133 free( handle->bufferInfos );
\r
3135 stream_.apiHandle = 0;
\r
3138 for ( int i=0; i<2; i++ ) {
\r
3139 if ( stream_.userBuffer[i] ) {
\r
3140 free( stream_.userBuffer[i] );
\r
3141 stream_.userBuffer[i] = 0;
\r
3145 if ( stream_.deviceBuffer ) {
\r
3146 free( stream_.deviceBuffer );
\r
3147 stream_.deviceBuffer = 0;
\r
3150 stream_.mode = UNINITIALIZED;
\r
3151 stream_.state = STREAM_CLOSED;
\r
3154 bool stopThreadCalled = false;
\r
3156 void RtApiAsio :: startStream()
\r
3159 if ( stream_.state == STREAM_RUNNING ) {
\r
3160 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3161 error( RtError::WARNING );
\r
3165 //MUTEX_LOCK( &stream_.mutex );
\r
3167 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3168 ASIOError result = ASIOStart();
\r
3169 if ( result != ASE_OK ) {
\r
3170 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3171 errorText_ = errorStream_.str();
\r
3175 handle->drainCounter = 0;
\r
3176 handle->internalDrain = false;
\r
3177 ResetEvent( handle->condition );
\r
3178 stream_.state = STREAM_RUNNING;
\r
3182 //MUTEX_UNLOCK( &stream_.mutex );
\r
3184 stopThreadCalled = false;
\r
3186 if ( result == ASE_OK ) return;
\r
3187 error( RtError::SYSTEM_ERROR );
\r
3190 void RtApiAsio :: stopStream()
\r
3193 if ( stream_.state == STREAM_STOPPED ) {
\r
3194 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3195 error( RtError::WARNING );
\r
3200 MUTEX_LOCK( &stream_.mutex );
\r
3202 if ( stream_.state == STREAM_STOPPED ) {
\r
3203 MUTEX_UNLOCK( &stream_.mutex );
\r
3208 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3209 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3210 if ( handle->drainCounter == 0 ) {
\r
3211 handle->drainCounter = 2;
\r
3212 // MUTEX_UNLOCK( &stream_.mutex );
\r
3213 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3214 //ResetEvent( handle->condition );
\r
3215 // MUTEX_LOCK( &stream_.mutex );
\r
3219 stream_.state = STREAM_STOPPED;
\r
3221 ASIOError result = ASIOStop();
\r
3222 if ( result != ASE_OK ) {
\r
3223 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3224 errorText_ = errorStream_.str();
\r
3227 // MUTEX_UNLOCK( &stream_.mutex );
\r
3229 if ( result == ASE_OK ) return;
\r
3230 error( RtError::SYSTEM_ERROR );
\r
3233 void RtApiAsio :: abortStream()
\r
3236 if ( stream_.state == STREAM_STOPPED ) {
\r
3237 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3238 error( RtError::WARNING );
\r
3242 // The following lines were commented-out because some behavior was
\r
3243 // noted where the device buffers need to be zeroed to avoid
\r
3244 // continuing sound, even when the device buffers are completely
\r
3245 // disposed. So now, calling abort is the same as calling stop.
\r
3246 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3247 // handle->drainCounter = 2;
\r
3251 // This function will be called by a spawned thread when the user
\r
3252 // callback function signals that the stream should be stopped or
\r
3253 // aborted. It is necessary to handle it this way because the
\r
3254 // callbackEvent() function must return before the ASIOStop()
\r
3255 // function will return.
\r
3256 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3258 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3259 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3261 object->stopStream();
\r
3263 _endthreadex( 0 );
\r
3267 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3269 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
3270 if ( stopThreadCalled ) return SUCCESS;
\r
3271 if ( stream_.state == STREAM_CLOSED ) {
\r
3272 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3273 error( RtError::WARNING );
\r
3277 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3278 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3280 // Check if we were draining the stream and signal if finished.
\r
3281 if ( handle->drainCounter > 3 ) {
\r
3282 if ( handle->internalDrain == false )
\r
3283 SetEvent( handle->condition );
\r
3284 else { // spawn a thread to stop the stream
\r
3285 unsigned threadId;
\r
3286 stopThreadCalled = true;
\r
3287 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3288 &stream_.callbackInfo, 0, &threadId );
\r
3293 /*MUTEX_LOCK( &stream_.mutex );
\r
3295 // The state might change while waiting on a mutex.
\r
3296 if ( stream_.state == STREAM_STOPPED ) goto unlock; */
\r
3298 // Invoke user callback to get fresh output data UNLESS we are
\r
3299 // draining stream.
\r
3300 if ( handle->drainCounter == 0 ) {
\r
3301 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3302 double streamTime = getStreamTime();
\r
3303 RtAudioStreamStatus status = 0;
\r
3304 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3305 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3308 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3309 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3312 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3313 stream_.bufferSize, streamTime, status, info->userData );
\r
3314 if ( cbReturnValue == 2 ) {
\r
3315 // MUTEX_UNLOCK( &stream_.mutex );
\r
3317 unsigned threadId;
\r
3318 stopThreadCalled = true;
\r
3319 handle->drainCounter = 2;
\r
3320 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3321 &stream_.callbackInfo, 0, &threadId );
\r
3324 else if ( cbReturnValue == 1 )
\r
3325 handle->drainCounter = 1;
\r
3326 handle->internalDrain = true;
\r
3329 unsigned int nChannels, bufferBytes, i, j;
\r
3330 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3331 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3333 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3335 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3337 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3338 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3339 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3343 else if ( stream_.doConvertBuffer[0] ) {
\r
3345 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3346 if ( stream_.doByteSwap[0] )
\r
3347 byteSwapBuffer( stream_.deviceBuffer,
\r
3348 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3349 stream_.deviceFormat[0] );
\r
3351 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3352 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3353 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3354 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3360 if ( stream_.doByteSwap[0] )
\r
3361 byteSwapBuffer( stream_.userBuffer[0],
\r
3362 stream_.bufferSize * stream_.nUserChannels[0],
\r
3363 stream_.userFormat );
\r
3365 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3366 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3367 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3368 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3373 if ( handle->drainCounter ) {
\r
3374 handle->drainCounter++;
\r
3379 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3381 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3383 if (stream_.doConvertBuffer[1]) {
\r
3385 // Always interleave ASIO input data.
\r
3386 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3387 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3388 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3389 handle->bufferInfos[i].buffers[bufferIndex],
\r
3393 if ( stream_.doByteSwap[1] )
\r
3394 byteSwapBuffer( stream_.deviceBuffer,
\r
3395 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3396 stream_.deviceFormat[1] );
\r
3397 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3401 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3402 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3403 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3404 handle->bufferInfos[i].buffers[bufferIndex],
\r
3409 if ( stream_.doByteSwap[1] )
\r
3410 byteSwapBuffer( stream_.userBuffer[1],
\r
3411 stream_.bufferSize * stream_.nUserChannels[1],
\r
3412 stream_.userFormat );
\r
3417 // The following call was suggested by Malte Clasen. While the API
\r
3418 // documentation indicates it should not be required, some device
\r
3419 // drivers apparently do not function correctly without it.
\r
3420 ASIOOutputReady();
\r
3422 // MUTEX_UNLOCK( &stream_.mutex );
\r
3424 RtApi::tickStreamTime();
\r
3428 void sampleRateChanged( ASIOSampleRate sRate )
\r
3430 // The ASIO documentation says that this usually only happens during
\r
3431 // external sync. Audio processing is not stopped by the driver,
\r
3432 // actual sample rate might not have even changed, maybe only the
\r
3433 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3436 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3438 object->stopStream();
\r
3440 catch ( RtError &exception ) {
\r
3441 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3445 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3448 long asioMessages( long selector, long value, void* message, double* opt )
\r
3452 switch( selector ) {
\r
3453 case kAsioSelectorSupported:
\r
3454 if ( value == kAsioResetRequest
\r
3455 || value == kAsioEngineVersion
\r
3456 || value == kAsioResyncRequest
\r
3457 || value == kAsioLatenciesChanged
\r
3458 // The following three were added for ASIO 2.0, you don't
\r
3459 // necessarily have to support them.
\r
3460 || value == kAsioSupportsTimeInfo
\r
3461 || value == kAsioSupportsTimeCode
\r
3462 || value == kAsioSupportsInputMonitor)
\r
3465 case kAsioResetRequest:
\r
3466 // Defer the task and perform the reset of the driver during the
\r
3467 // next "safe" situation. You cannot reset the driver right now,
\r
3468 // as this code is called from the driver. Reset the driver is
\r
3469 // done by completely destruct is. I.e. ASIOStop(),
\r
3470 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3472 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3475 case kAsioResyncRequest:
\r
3476 // This informs the application that the driver encountered some
\r
3477 // non-fatal data loss. It is used for synchronization purposes
\r
3478 // of different media. Added mainly to work around the Win16Mutex
\r
3479 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3480 // which could lose data because the Mutex was held too long by
\r
3481 // another thread. However a driver can issue it in other
\r
3482 // situations, too.
\r
3483 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3487 case kAsioLatenciesChanged:
\r
3488 // This will inform the host application that the drivers were
\r
3489 // latencies changed. Beware, it this does not mean that the
\r
3490 // buffer sizes have changed! You might need to update internal
\r
3492 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3495 case kAsioEngineVersion:
\r
3496 // Return the supported ASIO version of the host application. If
\r
3497 // a host application does not implement this selector, ASIO 1.0
\r
3498 // is assumed by the driver.
\r
3501 case kAsioSupportsTimeInfo:
\r
3502 // Informs the driver whether the
\r
3503 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3504 // For compatibility with ASIO 1.0 drivers the host application
\r
3505 // should always support the "old" bufferSwitch method, too.
\r
3508 case kAsioSupportsTimeCode:
\r
3509 // Informs the driver whether application is interested in time
\r
3510 // code info. If an application does not need to know about time
\r
3511 // code, the driver has less work to do.
\r
3518 static const char* getAsioErrorString( ASIOError result )
\r
3523 const char*message;
\r
3526 static Messages m[] =
\r
3528 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3529 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3530 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3531 { ASE_InvalidMode, "Invalid mode." },
\r
3532 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3533 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3534 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3537 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3538 if ( m[i].value == result ) return m[i].message;
\r
3540 return "Unknown error.";
\r
3542 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3546 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3548 // Modified by Robin Davies, October 2005
\r
3549 // - Improvements to DirectX pointer chasing.
\r
3550 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3551 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3552 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3553 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3555 #include <dsound.h>
\r
3556 #include <assert.h>
\r
3557 #include <algorithm>
\r
3559 #if defined(__MINGW32__)
\r
3560 // missing from latest mingw winapi
\r
3561 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3562 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3563 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3564 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3567 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3569 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3570 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3573 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3575 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3576 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3577 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3578 return pointer >= earlierPointer && pointer < laterPointer;
\r
3581 // A structure to hold various information related to the DirectSound
\r
3582 // API implementation.
\r
3584 unsigned int drainCounter; // Tracks callback counts when draining
\r
3585 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3589 UINT bufferPointer[2];
\r
3590 DWORD dsBufferSize[2];
\r
3591 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3595 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3598 // Declarations for utility functions, callbacks, and structures
\r
3599 // specific to the DirectSound implementation.
\r
3600 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3601 LPCTSTR description,
\r
3603 LPVOID lpContext );
\r
3605 static const char* getErrorString( int code );
\r
3607 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3616 : found(false) { validId[0] = false; validId[1] = false; }
\r
3619 std::vector< DsDevice > dsDevices;
\r
3621 RtApiDs :: RtApiDs()
\r
3623 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3624 // accept whatever the mainline chose for a threading model.
\r
3625 coInitialized_ = false;
\r
3626 HRESULT hr = CoInitialize( NULL );
\r
3627 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3630 RtApiDs :: ~RtApiDs()
\r
3632 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3633 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3636 // The DirectSound default output is always the first device.
\r
3637 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3642 // The DirectSound default input is always the first input device,
\r
3643 // which is the first capture device enumerated.
\r
3644 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3649 unsigned int RtApiDs :: getDeviceCount( void )
\r
3651 // Set query flag for previously found devices to false, so that we
\r
3652 // can check for any devices that have disappeared.
\r
3653 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3654 dsDevices[i].found = false;
\r
3656 // Query DirectSound devices.
\r
3657 bool isInput = false;
\r
3658 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3659 if ( FAILED( result ) ) {
\r
3660 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3661 errorText_ = errorStream_.str();
\r
3662 error( RtError::WARNING );
\r
3665 // Query DirectSoundCapture devices.
\r
3667 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3668 if ( FAILED( result ) ) {
\r
3669 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3670 errorText_ = errorStream_.str();
\r
3671 error( RtError::WARNING );
\r
3674 // Clean out any devices that may have disappeared.
\r
3675 std::vector< int > indices;
\r
3676 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3677 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3678 unsigned int nErased = 0;
\r
3679 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3680 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3682 return dsDevices.size();
\r
3685 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3687 RtAudio::DeviceInfo info;
\r
3688 info.probed = false;
\r
3690 if ( dsDevices.size() == 0 ) {
\r
3691 // Force a query of all devices
\r
3693 if ( dsDevices.size() == 0 ) {
\r
3694 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3695 error( RtError::INVALID_USE );
\r
3699 if ( device >= dsDevices.size() ) {
\r
3700 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3701 error( RtError::INVALID_USE );
\r
3705 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3707 LPDIRECTSOUND output;
\r
3709 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3710 if ( FAILED( result ) ) {
\r
3711 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3712 errorText_ = errorStream_.str();
\r
3713 error( RtError::WARNING );
\r
3717 outCaps.dwSize = sizeof( outCaps );
\r
3718 result = output->GetCaps( &outCaps );
\r
3719 if ( FAILED( result ) ) {
\r
3720 output->Release();
\r
3721 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3722 errorText_ = errorStream_.str();
\r
3723 error( RtError::WARNING );
\r
3727 // Get output channel information.
\r
3728 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3730 // Get sample rate information.
\r
3731 info.sampleRates.clear();
\r
3732 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3733 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3734 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3735 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3738 // Get format information.
\r
3739 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3740 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3742 output->Release();
\r
3744 if ( getDefaultOutputDevice() == device )
\r
3745 info.isDefaultOutput = true;
\r
3747 if ( dsDevices[ device ].validId[1] == false ) {
\r
3748 info.name = dsDevices[ device ].name;
\r
3749 info.probed = true;
\r
3755 LPDIRECTSOUNDCAPTURE input;
\r
3756 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3757 if ( FAILED( result ) ) {
\r
3758 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3759 errorText_ = errorStream_.str();
\r
3760 error( RtError::WARNING );
\r
3765 inCaps.dwSize = sizeof( inCaps );
\r
3766 result = input->GetCaps( &inCaps );
\r
3767 if ( FAILED( result ) ) {
\r
3769 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3770 errorText_ = errorStream_.str();
\r
3771 error( RtError::WARNING );
\r
3775 // Get input channel information.
\r
3776 info.inputChannels = inCaps.dwChannels;
\r
3778 // Get sample rate and format information.
\r
3779 std::vector<unsigned int> rates;
\r
3780 if ( inCaps.dwChannels >= 2 ) {
\r
3781 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3786 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3787 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3790 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3791 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3792 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3793 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3794 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3796 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3797 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3798 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3799 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3800 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3803 else if ( inCaps.dwChannels == 1 ) {
\r
3804 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3805 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3806 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3807 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3808 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3809 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3813 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3814 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3815 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3817 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3819 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3820 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3821 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3822 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3823 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3826 else info.inputChannels = 0; // technically, this would be an error
\r
3830 if ( info.inputChannels == 0 ) return info;
\r
3832 // Copy the supported rates to the info structure but avoid duplication.
\r
3834 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3836 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3837 if ( rates[i] == info.sampleRates[j] ) {
\r
3842 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3844 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3846 // If device opens for both playback and capture, we determine the channels.
\r
3847 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3848 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3850 if ( device == 0 ) info.isDefaultInput = true;
\r
3852 // Copy name and return.
\r
3853 info.name = dsDevices[ device ].name;
\r
3854 info.probed = true;
\r
3858 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3859 unsigned int firstChannel, unsigned int sampleRate,
\r
3860 RtAudioFormat format, unsigned int *bufferSize,
\r
3861 RtAudio::StreamOptions *options )
\r
3863 if ( channels + firstChannel > 2 ) {
\r
3864 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3868 unsigned int nDevices = dsDevices.size();
\r
3869 if ( nDevices == 0 ) {
\r
3870 // This should not happen because a check is made before this function is called.
\r
3871 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3875 if ( device >= nDevices ) {
\r
3876 // This should not happen because a check is made before this function is called.
\r
3877 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3881 if ( mode == OUTPUT ) {
\r
3882 if ( dsDevices[ device ].validId[0] == false ) {
\r
3883 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3884 errorText_ = errorStream_.str();
\r
3888 else { // mode == INPUT
\r
3889 if ( dsDevices[ device ].validId[1] == false ) {
\r
3890 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3891 errorText_ = errorStream_.str();
\r
3896 // According to a note in PortAudio, using GetDesktopWindow()
\r
3897 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3898 // that occur when the application's window is not the foreground
\r
3899 // window. Also, if the application window closes before the
\r
3900 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3901 // problems when using GetDesktopWindow() but it seems fine now
\r
3902 // (January 2010). I'll leave it commented here.
\r
3903 // HWND hWnd = GetForegroundWindow();
\r
3904 HWND hWnd = GetDesktopWindow();
\r
3906 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3907 // two. This is a judgement call and a value of two is probably too
\r
3908 // low for capture, but it should work for playback.
\r
3910 if ( options ) nBuffers = options->numberOfBuffers;
\r
3911 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3912 if ( nBuffers < 2 ) nBuffers = 3;
\r
3914 // Check the lower range of the user-specified buffer size and set
\r
3915 // (arbitrarily) to a lower bound of 32.
\r
3916 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3918 // Create the wave format structure. The data format setting will
\r
3919 // be determined later.
\r
3920 WAVEFORMATEX waveFormat;
\r
3921 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3922 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3923 waveFormat.nChannels = channels + firstChannel;
\r
3924 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3926 // Determine the device buffer size. By default, we'll use the value
\r
3927 // defined above (32K), but we will grow it to make allowances for
\r
3928 // very large software buffer sizes.
\r
3929 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;
\r
3930 DWORD dsPointerLeadTime = 0;
\r
3932 void *ohandle = 0, *bhandle = 0;
\r
3934 if ( mode == OUTPUT ) {
\r
3936 LPDIRECTSOUND output;
\r
3937 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3938 if ( FAILED( result ) ) {
\r
3939 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3940 errorText_ = errorStream_.str();
\r
3945 outCaps.dwSize = sizeof( outCaps );
\r
3946 result = output->GetCaps( &outCaps );
\r
3947 if ( FAILED( result ) ) {
\r
3948 output->Release();
\r
3949 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3950 errorText_ = errorStream_.str();
\r
3954 // Check channel information.
\r
3955 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3956 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3957 errorText_ = errorStream_.str();
\r
3961 // Check format information. Use 16-bit format unless not
\r
3962 // supported or user requests 8-bit.
\r
3963 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3964 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3965 waveFormat.wBitsPerSample = 16;
\r
3966 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3969 waveFormat.wBitsPerSample = 8;
\r
3970 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3972 stream_.userFormat = format;
\r
3974 // Update wave format structure and buffer information.
\r
3975 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3976 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3977 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3979 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3980 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3981 dsBufferSize *= 2;
\r
3983 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3984 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3985 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3986 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3987 if ( FAILED( result ) ) {
\r
3988 output->Release();
\r
3989 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3990 errorText_ = errorStream_.str();
\r
3994 // Even though we will write to the secondary buffer, we need to
\r
3995 // access the primary buffer to set the correct output format
\r
3996 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3997 // buffer description.
\r
3998 DSBUFFERDESC bufferDescription;
\r
3999 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4000 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4001 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
4003 // Obtain the primary buffer
\r
4004 LPDIRECTSOUNDBUFFER buffer;
\r
4005 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4006 if ( FAILED( result ) ) {
\r
4007 output->Release();
\r
4008 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4009 errorText_ = errorStream_.str();
\r
4013 // Set the primary DS buffer sound format.
\r
4014 result = buffer->SetFormat( &waveFormat );
\r
4015 if ( FAILED( result ) ) {
\r
4016 output->Release();
\r
4017 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4018 errorText_ = errorStream_.str();
\r
4022 // Setup the secondary DS buffer description.
\r
4023 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4024 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4025 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4026 DSBCAPS_GLOBALFOCUS |
\r
4027 DSBCAPS_GETCURRENTPOSITION2 |
\r
4028 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4029 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4030 bufferDescription.lpwfxFormat = &waveFormat;
\r
4032 // Try to create the secondary DS buffer. If that doesn't work,
\r
4033 // try to use software mixing. Otherwise, there's a problem.
\r
4034 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4035 if ( FAILED( result ) ) {
\r
4036 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4037 DSBCAPS_GLOBALFOCUS |
\r
4038 DSBCAPS_GETCURRENTPOSITION2 |
\r
4039 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4040 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4041 if ( FAILED( result ) ) {
\r
4042 output->Release();
\r
4043 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4044 errorText_ = errorStream_.str();
\r
4049 // Get the buffer size ... might be different from what we specified.
\r
4051 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4052 result = buffer->GetCaps( &dsbcaps );
\r
4053 if ( FAILED( result ) ) {
\r
4054 output->Release();
\r
4055 buffer->Release();
\r
4056 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4057 errorText_ = errorStream_.str();
\r
4061 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4063 // Lock the DS buffer
\r
4066 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4067 if ( FAILED( result ) ) {
\r
4068 output->Release();
\r
4069 buffer->Release();
\r
4070 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4071 errorText_ = errorStream_.str();
\r
4075 // Zero the DS buffer
\r
4076 ZeroMemory( audioPtr, dataLen );
\r
4078 // Unlock the DS buffer
\r
4079 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4080 if ( FAILED( result ) ) {
\r
4081 output->Release();
\r
4082 buffer->Release();
\r
4083 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4084 errorText_ = errorStream_.str();
\r
4088 ohandle = (void *) output;
\r
4089 bhandle = (void *) buffer;
\r
4092 if ( mode == INPUT ) {
\r
4094 LPDIRECTSOUNDCAPTURE input;
\r
4095 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4096 if ( FAILED( result ) ) {
\r
4097 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4098 errorText_ = errorStream_.str();
\r
4103 inCaps.dwSize = sizeof( inCaps );
\r
4104 result = input->GetCaps( &inCaps );
\r
4105 if ( FAILED( result ) ) {
\r
4107 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4108 errorText_ = errorStream_.str();
\r
4112 // Check channel information.
\r
4113 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4114 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4118 // Check format information. Use 16-bit format unless user
\r
4119 // requests 8-bit.
\r
4120 DWORD deviceFormats;
\r
4121 if ( channels + firstChannel == 2 ) {
\r
4122 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4123 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4124 waveFormat.wBitsPerSample = 8;
\r
4125 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4127 else { // assume 16-bit is supported
\r
4128 waveFormat.wBitsPerSample = 16;
\r
4129 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4132 else { // channel == 1
\r
4133 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4134 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4135 waveFormat.wBitsPerSample = 8;
\r
4136 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4138 else { // assume 16-bit is supported
\r
4139 waveFormat.wBitsPerSample = 16;
\r
4140 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4143 stream_.userFormat = format;
\r
4145 // Update wave format structure and buffer information.
\r
4146 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4147 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4148 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4150 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4151 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4152 dsBufferSize *= 2;
\r
4154 // Setup the secondary DS buffer description.
\r
4155 DSCBUFFERDESC bufferDescription;
\r
4156 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4157 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4158 bufferDescription.dwFlags = 0;
\r
4159 bufferDescription.dwReserved = 0;
\r
4160 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4161 bufferDescription.lpwfxFormat = &waveFormat;
\r
4163 // Create the capture buffer.
\r
4164 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4165 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4166 if ( FAILED( result ) ) {
\r
4168 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4169 errorText_ = errorStream_.str();
\r
4173 // Get the buffer size ... might be different from what we specified.
\r
4174 DSCBCAPS dscbcaps;
\r
4175 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4176 result = buffer->GetCaps( &dscbcaps );
\r
4177 if ( FAILED( result ) ) {
\r
4179 buffer->Release();
\r
4180 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4181 errorText_ = errorStream_.str();
\r
4185 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4187 // NOTE: We could have a problem here if this is a duplex stream
\r
4188 // and the play and capture hardware buffer sizes are different
\r
4189 // (I'm actually not sure if that is a problem or not).
\r
4190 // Currently, we are not verifying that.
\r
4192 // Lock the capture buffer
\r
4195 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4196 if ( FAILED( result ) ) {
\r
4198 buffer->Release();
\r
4199 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4200 errorText_ = errorStream_.str();
\r
4204 // Zero the buffer
\r
4205 ZeroMemory( audioPtr, dataLen );
\r
4207 // Unlock the buffer
\r
4208 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4209 if ( FAILED( result ) ) {
\r
4211 buffer->Release();
\r
4212 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4213 errorText_ = errorStream_.str();
\r
4217 ohandle = (void *) input;
\r
4218 bhandle = (void *) buffer;
\r
4221 // Set various stream parameters
\r
4222 DsHandle *handle = 0;
\r
4223 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4224 stream_.nUserChannels[mode] = channels;
\r
4225 stream_.bufferSize = *bufferSize;
\r
4226 stream_.channelOffset[mode] = firstChannel;
\r
4227 stream_.deviceInterleaved[mode] = true;
\r
4228 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4229 else stream_.userInterleaved = true;
\r
4231 // Set flag for buffer conversion
\r
4232 stream_.doConvertBuffer[mode] = false;
\r
4233 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4234 stream_.doConvertBuffer[mode] = true;
\r
4235 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4236 stream_.doConvertBuffer[mode] = true;
\r
4237 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4238 stream_.nUserChannels[mode] > 1 )
\r
4239 stream_.doConvertBuffer[mode] = true;
\r
4241 // Allocate necessary internal buffers
\r
4242 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4243 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4244 if ( stream_.userBuffer[mode] == NULL ) {
\r
4245 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4249 if ( stream_.doConvertBuffer[mode] ) {
\r
4251 bool makeBuffer = true;
\r
4252 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4253 if ( mode == INPUT ) {
\r
4254 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4255 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4256 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4260 if ( makeBuffer ) {
\r
4261 bufferBytes *= *bufferSize;
\r
4262 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4263 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4264 if ( stream_.deviceBuffer == NULL ) {
\r
4265 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4271 // Allocate our DsHandle structures for the stream.
\r
4272 if ( stream_.apiHandle == 0 ) {
\r
4274 handle = new DsHandle;
\r
4276 catch ( std::bad_alloc& ) {
\r
4277 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4281 // Create a manual-reset event.
\r
4282 handle->condition = CreateEvent( NULL, // no security
\r
4283 TRUE, // manual-reset
\r
4284 FALSE, // non-signaled initially
\r
4285 NULL ); // unnamed
\r
4286 stream_.apiHandle = (void *) handle;
\r
4289 handle = (DsHandle *) stream_.apiHandle;
\r
4290 handle->id[mode] = ohandle;
\r
4291 handle->buffer[mode] = bhandle;
\r
4292 handle->dsBufferSize[mode] = dsBufferSize;
\r
4293 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4295 stream_.device[mode] = device;
\r
4296 stream_.state = STREAM_STOPPED;
\r
4297 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4298 // We had already set up an output stream.
\r
4299 stream_.mode = DUPLEX;
\r
4301 stream_.mode = mode;
\r
4302 stream_.nBuffers = nBuffers;
\r
4303 stream_.sampleRate = sampleRate;
\r
4305 // Setup the buffer conversion information structure.
\r
4306 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4308 // Setup the callback thread.
\r
4309 if ( stream_.callbackInfo.isRunning == false ) {
\r
4310 unsigned threadId;
\r
4311 stream_.callbackInfo.isRunning = true;
\r
4312 stream_.callbackInfo.object = (void *) this;
\r
4313 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4314 &stream_.callbackInfo, 0, &threadId );
\r
4315 if ( stream_.callbackInfo.thread == 0 ) {
\r
4316 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4320 // Boost DS thread priority
\r
4321 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4327 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4328 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4329 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4330 if ( buffer ) buffer->Release();
\r
4331 object->Release();
\r
4333 if ( handle->buffer[1] ) {
\r
4334 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4335 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4336 if ( buffer ) buffer->Release();
\r
4337 object->Release();
\r
4339 CloseHandle( handle->condition );
\r
4341 stream_.apiHandle = 0;
\r
4344 for ( int i=0; i<2; i++ ) {
\r
4345 if ( stream_.userBuffer[i] ) {
\r
4346 free( stream_.userBuffer[i] );
\r
4347 stream_.userBuffer[i] = 0;
\r
4351 if ( stream_.deviceBuffer ) {
\r
4352 free( stream_.deviceBuffer );
\r
4353 stream_.deviceBuffer = 0;
\r
4359 void RtApiDs :: closeStream()
\r
4361 if ( stream_.state == STREAM_CLOSED ) {
\r
4362 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4363 error( RtError::WARNING );
\r
4367 // Stop the callback thread.
\r
4368 stream_.callbackInfo.isRunning = false;
\r
4369 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4370 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4372 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4374 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4375 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4376 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4379 buffer->Release();
\r
4381 object->Release();
\r
4383 if ( handle->buffer[1] ) {
\r
4384 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4385 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4388 buffer->Release();
\r
4390 object->Release();
\r
4392 CloseHandle( handle->condition );
\r
4394 stream_.apiHandle = 0;
\r
4397 for ( int i=0; i<2; i++ ) {
\r
4398 if ( stream_.userBuffer[i] ) {
\r
4399 free( stream_.userBuffer[i] );
\r
4400 stream_.userBuffer[i] = 0;
\r
4404 if ( stream_.deviceBuffer ) {
\r
4405 free( stream_.deviceBuffer );
\r
4406 stream_.deviceBuffer = 0;
\r
4409 stream_.mode = UNINITIALIZED;
\r
4410 stream_.state = STREAM_CLOSED;
\r
4413 void RtApiDs :: startStream()
\r
4416 if ( stream_.state == STREAM_RUNNING ) {
\r
4417 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4418 error( RtError::WARNING );
\r
4422 //MUTEX_LOCK( &stream_.mutex );
\r
4424 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4426 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4427 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4428 // this is already in effect.
\r
4429 timeBeginPeriod( 1 );
\r
4431 buffersRolling = false;
\r
4432 duplexPrerollBytes = 0;
\r
4434 if ( stream_.mode == DUPLEX ) {
\r
4435 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4436 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4439 HRESULT result = 0;
\r
4440 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4442 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4443 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4444 if ( FAILED( result ) ) {
\r
4445 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4446 errorText_ = errorStream_.str();
\r
4451 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4453 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4454 result = buffer->Start( DSCBSTART_LOOPING );
\r
4455 if ( FAILED( result ) ) {
\r
4456 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4457 errorText_ = errorStream_.str();
\r
4462 handle->drainCounter = 0;
\r
4463 handle->internalDrain = false;
\r
4464 ResetEvent( handle->condition );
\r
4465 stream_.state = STREAM_RUNNING;
\r
4468 // MUTEX_UNLOCK( &stream_.mutex );
\r
4470 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4473 void RtApiDs :: stopStream()
\r
4476 if ( stream_.state == STREAM_STOPPED ) {
\r
4477 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4478 error( RtError::WARNING );
\r
4483 MUTEX_LOCK( &stream_.mutex );
\r
4485 if ( stream_.state == STREAM_STOPPED ) {
\r
4486 MUTEX_UNLOCK( &stream_.mutex );
\r
4491 HRESULT result = 0;
\r
4494 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4495 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4496 if ( handle->drainCounter == 0 ) {
\r
4497 handle->drainCounter = 2;
\r
4498 // MUTEX_UNLOCK( &stream_.mutex );
\r
4499 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4500 //ResetEvent( handle->condition );
\r
4501 // MUTEX_LOCK( &stream_.mutex );
\r
4504 stream_.state = STREAM_STOPPED;
\r
4506 // Stop the buffer and clear memory
\r
4507 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4508 result = buffer->Stop();
\r
4509 if ( FAILED( result ) ) {
\r
4510 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4511 errorText_ = errorStream_.str();
\r
4515 // Lock the buffer and clear it so that if we start to play again,
\r
4516 // we won't have old data playing.
\r
4517 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4518 if ( FAILED( result ) ) {
\r
4519 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4520 errorText_ = errorStream_.str();
\r
4524 // Zero the DS buffer
\r
4525 ZeroMemory( audioPtr, dataLen );
\r
4527 // Unlock the DS buffer
\r
4528 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4529 if ( FAILED( result ) ) {
\r
4530 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4531 errorText_ = errorStream_.str();
\r
4535 // If we start playing again, we must begin at beginning of buffer.
\r
4536 handle->bufferPointer[0] = 0;
\r
4539 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4540 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4544 stream_.state = STREAM_STOPPED;
\r
4546 result = buffer->Stop();
\r
4547 if ( FAILED( result ) ) {
\r
4548 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4549 errorText_ = errorStream_.str();
\r
4553 // Lock the buffer and clear it so that if we start to play again,
\r
4554 // we won't have old data playing.
\r
4555 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4556 if ( FAILED( result ) ) {
\r
4557 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4558 errorText_ = errorStream_.str();
\r
4562 // Zero the DS buffer
\r
4563 ZeroMemory( audioPtr, dataLen );
\r
4565 // Unlock the DS buffer
\r
4566 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4567 if ( FAILED( result ) ) {
\r
4568 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4569 errorText_ = errorStream_.str();
\r
4573 // If we start recording again, we must begin at beginning of buffer.
\r
4574 handle->bufferPointer[1] = 0;
\r
4578 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4579 // MUTEX_UNLOCK( &stream_.mutex );
\r
4581 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4584 void RtApiDs :: abortStream()
\r
4587 if ( stream_.state == STREAM_STOPPED ) {
\r
4588 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4589 error( RtError::WARNING );
\r
4593 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4594 handle->drainCounter = 2;
\r
4599 void RtApiDs :: callbackEvent()
\r
4601 if ( stream_.state == STREAM_STOPPED ) {
\r
4602 Sleep( 50 ); // sleep 50 milliseconds
\r
4606 if ( stream_.state == STREAM_CLOSED ) {
\r
4607 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4608 error( RtError::WARNING );
\r
4612 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4613 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4615 // Check if we were draining the stream and signal is finished.
\r
4616 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4617 if ( handle->internalDrain == false )
\r
4618 SetEvent( handle->condition );
\r
4625 MUTEX_LOCK( &stream_.mutex );
\r
4627 // The state might change while waiting on a mutex.
\r
4628 if ( stream_.state == STREAM_STOPPED ) {
\r
4629 MUTEX_UNLOCK( &stream_.mutex );
\r
4634 // Invoke user callback to get fresh output data UNLESS we are
\r
4635 // draining stream.
\r
4636 if ( handle->drainCounter == 0 ) {
\r
4637 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4638 double streamTime = getStreamTime();
\r
4639 RtAudioStreamStatus status = 0;
\r
4640 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4641 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4642 handle->xrun[0] = false;
\r
4644 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4645 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4646 handle->xrun[1] = false;
\r
4648 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4649 stream_.bufferSize, streamTime, status, info->userData );
\r
4650 if ( cbReturnValue == 2 ) {
\r
4651 // MUTEX_UNLOCK( &stream_.mutex );
\r
4652 handle->drainCounter = 2;
\r
4656 else if ( cbReturnValue == 1 )
\r
4657 handle->drainCounter = 1;
\r
4658 handle->internalDrain = true;
\r
4662 DWORD currentWritePointer, safeWritePointer;
\r
4663 DWORD currentReadPointer, safeReadPointer;
\r
4664 UINT nextWritePointer;
\r
4666 LPVOID buffer1 = NULL;
\r
4667 LPVOID buffer2 = NULL;
\r
4668 DWORD bufferSize1 = 0;
\r
4669 DWORD bufferSize2 = 0;
\r
4674 if ( buffersRolling == false ) {
\r
4675 if ( stream_.mode == DUPLEX ) {
\r
4676 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4678 // It takes a while for the devices to get rolling. As a result,
\r
4679 // there's no guarantee that the capture and write device pointers
\r
4680 // will move in lockstep. Wait here for both devices to start
\r
4681 // rolling, and then set our buffer pointers accordingly.
\r
4682 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4683 // bytes later than the write buffer.
\r
4685 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4686 // take place between the two GetCurrentPosition calls... but I'm
\r
4687 // really not sure how to solve the problem. Temporarily boost to
\r
4688 // Realtime priority, maybe; but I'm not sure what priority the
\r
4689 // DirectSound service threads run at. We *should* be roughly
\r
4690 // within a ms or so of correct.
\r
4692 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4693 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4695 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4697 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4698 if ( FAILED( result ) ) {
\r
4699 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4700 errorText_ = errorStream_.str();
\r
4701 error( RtError::SYSTEM_ERROR );
\r
4703 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4704 if ( FAILED( result ) ) {
\r
4705 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4706 errorText_ = errorStream_.str();
\r
4707 error( RtError::SYSTEM_ERROR );
\r
4710 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4711 if ( FAILED( result ) ) {
\r
4712 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4713 errorText_ = errorStream_.str();
\r
4714 error( RtError::SYSTEM_ERROR );
\r
4716 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4717 if ( FAILED( result ) ) {
\r
4718 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4719 errorText_ = errorStream_.str();
\r
4720 error( RtError::SYSTEM_ERROR );
\r
4722 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4726 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4728 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4729 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4730 handle->bufferPointer[1] = safeReadPointer;
\r
4732 else if ( stream_.mode == OUTPUT ) {
\r
4734 // Set the proper nextWritePosition after initial startup.
\r
4735 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4736 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4737 if ( FAILED( result ) ) {
\r
4738 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4739 errorText_ = errorStream_.str();
\r
4740 error( RtError::SYSTEM_ERROR );
\r
4742 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4743 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4746 buffersRolling = true;
\r
4749 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4751 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4753 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4754 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4755 bufferBytes *= formatBytes( stream_.userFormat );
\r
4756 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4759 // Setup parameters and do buffer conversion if necessary.
\r
4760 if ( stream_.doConvertBuffer[0] ) {
\r
4761 buffer = stream_.deviceBuffer;
\r
4762 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4763 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4764 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4767 buffer = stream_.userBuffer[0];
\r
4768 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4769 bufferBytes *= formatBytes( stream_.userFormat );
\r
4772 // No byte swapping necessary in DirectSound implementation.
\r
4774 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4775 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4777 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4778 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4780 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4781 nextWritePointer = handle->bufferPointer[0];
\r
4783 DWORD endWrite, leadPointer;
\r
4785 // Find out where the read and "safe write" pointers are.
\r
4786 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4787 if ( FAILED( result ) ) {
\r
4788 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4789 errorText_ = errorStream_.str();
\r
4790 error( RtError::SYSTEM_ERROR );
\r
4793 // We will copy our output buffer into the region between
\r
4794 // safeWritePointer and leadPointer. If leadPointer is not
\r
4795 // beyond the next endWrite position, wait until it is.
\r
4796 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4797 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4798 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4799 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4800 endWrite = nextWritePointer + bufferBytes;
\r
4802 // Check whether the entire write region is behind the play pointer.
\r
4803 if ( leadPointer >= endWrite ) break;
\r
4805 // If we are here, then we must wait until the leadPointer advances
\r
4806 // beyond the end of our next write region. We use the
\r
4807 // Sleep() function to suspend operation until that happens.
\r
4808 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4809 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4810 if ( millis < 1.0 ) millis = 1.0;
\r
4811 Sleep( (DWORD) millis );
\r
4814 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4815 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4816 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4817 handle->xrun[0] = true;
\r
4818 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4819 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4820 handle->bufferPointer[0] = nextWritePointer;
\r
4821 endWrite = nextWritePointer + bufferBytes;
\r
4824 // Lock free space in the buffer
\r
4825 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4826 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4827 if ( FAILED( result ) ) {
\r
4828 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4829 errorText_ = errorStream_.str();
\r
4830 error( RtError::SYSTEM_ERROR );
\r
4833 // Copy our buffer into the DS buffer
\r
4834 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4835 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4837 // Update our buffer offset and unlock sound buffer
\r
4838 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4839 if ( FAILED( result ) ) {
\r
4840 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4841 errorText_ = errorStream_.str();
\r
4842 error( RtError::SYSTEM_ERROR );
\r
4844 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4845 handle->bufferPointer[0] = nextWritePointer;
\r
4847 if ( handle->drainCounter ) {
\r
4848 handle->drainCounter++;
\r
4853 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4855 // Setup parameters.
\r
4856 if ( stream_.doConvertBuffer[1] ) {
\r
4857 buffer = stream_.deviceBuffer;
\r
4858 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4859 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4862 buffer = stream_.userBuffer[1];
\r
4863 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4864 bufferBytes *= formatBytes( stream_.userFormat );
\r
4867 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4868 long nextReadPointer = handle->bufferPointer[1];
\r
4869 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4871 // Find out where the write and "safe read" pointers are.
\r
4872 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4873 if ( FAILED( result ) ) {
\r
4874 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4875 errorText_ = errorStream_.str();
\r
4876 error( RtError::SYSTEM_ERROR );
\r
4879 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4880 DWORD endRead = nextReadPointer + bufferBytes;
\r
4882 // Handling depends on whether we are INPUT or DUPLEX.
\r
4883 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4884 // then a wait here will drag the write pointers into the forbidden zone.
\r
4886 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4887 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4888 // practical way to sync up the read and write pointers reliably, given the
\r
4889 // the very complex relationship between phase and increment of the read and write
\r
4892 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4893 // provide a pre-roll period of 0.5 seconds in which we return
\r
4894 // zeros from the read buffer while the pointers sync up.
\r
4896 if ( stream_.mode == DUPLEX ) {
\r
4897 if ( safeReadPointer < endRead ) {
\r
4898 if ( duplexPrerollBytes <= 0 ) {
\r
4899 // Pre-roll time over. Be more agressive.
\r
4900 int adjustment = endRead-safeReadPointer;
\r
4902 handle->xrun[1] = true;
\r
4904 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4905 // and perform fine adjustments later.
\r
4906 // - small adjustments: back off by twice as much.
\r
4907 if ( adjustment >= 2*bufferBytes )
\r
4908 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4910 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4912 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4916 // In pre=roll time. Just do it.
\r
4917 nextReadPointer = safeReadPointer - bufferBytes;
\r
4918 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4920 endRead = nextReadPointer + bufferBytes;
\r
4923 else { // mode == INPUT
\r
4924 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4925 // See comments for playback.
\r
4926 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4927 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4928 if ( millis < 1.0 ) millis = 1.0;
\r
4929 Sleep( (DWORD) millis );
\r
4931 // Wake up and find out where we are now.
\r
4932 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4933 if ( FAILED( result ) ) {
\r
4934 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4935 errorText_ = errorStream_.str();
\r
4936 error( RtError::SYSTEM_ERROR );
\r
4939 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4943 // Lock free space in the buffer
\r
4944 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4945 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4946 if ( FAILED( result ) ) {
\r
4947 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4948 errorText_ = errorStream_.str();
\r
4949 error( RtError::SYSTEM_ERROR );
\r
4952 if ( duplexPrerollBytes <= 0 ) {
\r
4953 // Copy our buffer into the DS buffer
\r
4954 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4955 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4958 memset( buffer, 0, bufferSize1 );
\r
4959 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4960 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4963 // Update our buffer offset and unlock sound buffer
\r
4964 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4965 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4966 if ( FAILED( result ) ) {
\r
4967 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4968 errorText_ = errorStream_.str();
\r
4969 error( RtError::SYSTEM_ERROR );
\r
4971 handle->bufferPointer[1] = nextReadPointer;
\r
4973 // No byte swapping necessary in DirectSound implementation.
\r
4975 // If necessary, convert 8-bit data from unsigned to signed.
\r
4976 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4977 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4979 // Do buffer conversion if necessary.
\r
4980 if ( stream_.doConvertBuffer[1] )
\r
4981 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4985 // MUTEX_UNLOCK( &stream_.mutex );
\r
4987 RtApi::tickStreamTime();
\r
4990 // Definitions for utility functions and callbacks
\r
4991 // specific to the DirectSound implementation.
\r
4993 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4995 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4996 RtApiDs *object = (RtApiDs *) info->object;
\r
4997 bool* isRunning = &info->isRunning;
\r
4999 while ( *isRunning == true ) {
\r
5000 object->callbackEvent();
\r
5003 _endthreadex( 0 );
\r
5007 #include "tchar.h"
\r
5009 std::string convertTChar( LPCTSTR name )
\r
5011 #if defined( UNICODE ) || defined( _UNICODE )
\r
5012 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
5013 std::string s( length, 0 );
\r
5014 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
5016 std::string s( name );
\r
5022 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5023 LPCTSTR description,
\r
5025 LPVOID lpContext )
\r
5027 bool *isInput = (bool *) lpContext;
\r
5030 bool validDevice = false;
\r
5031 if ( *isInput == true ) {
\r
5033 LPDIRECTSOUNDCAPTURE object;
\r
5035 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5036 if ( hr != DS_OK ) return TRUE;
\r
5038 caps.dwSize = sizeof(caps);
\r
5039 hr = object->GetCaps( &caps );
\r
5040 if ( hr == DS_OK ) {
\r
5041 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5042 validDevice = true;
\r
5044 object->Release();
\r
5048 LPDIRECTSOUND object;
\r
5049 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5050 if ( hr != DS_OK ) return TRUE;
\r
5052 caps.dwSize = sizeof(caps);
\r
5053 hr = object->GetCaps( &caps );
\r
5054 if ( hr == DS_OK ) {
\r
5055 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5056 validDevice = true;
\r
5058 object->Release();
\r
5061 // If good device, then save its name and guid.
\r
5062 std::string name = convertTChar( description );
\r
5063 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5064 name = "Default Device";
\r
5065 if ( validDevice ) {
\r
5066 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5067 if ( dsDevices[i].name == name ) {
\r
5068 dsDevices[i].found = true;
\r
5070 dsDevices[i].id[1] = lpguid;
\r
5071 dsDevices[i].validId[1] = true;
\r
5074 dsDevices[i].id[0] = lpguid;
\r
5075 dsDevices[i].validId[0] = true;
\r
5082 device.name = name;
\r
5083 device.found = true;
\r
5085 device.id[1] = lpguid;
\r
5086 device.validId[1] = true;
\r
5089 device.id[0] = lpguid;
\r
5090 device.validId[0] = true;
\r
5092 dsDevices.push_back( device );
\r
5098 static const char* getErrorString( int code )
\r
5102 case DSERR_ALLOCATED:
\r
5103 return "Already allocated";
\r
5105 case DSERR_CONTROLUNAVAIL:
\r
5106 return "Control unavailable";
\r
5108 case DSERR_INVALIDPARAM:
\r
5109 return "Invalid parameter";
\r
5111 case DSERR_INVALIDCALL:
\r
5112 return "Invalid call";
\r
5114 case DSERR_GENERIC:
\r
5115 return "Generic error";
\r
5117 case DSERR_PRIOLEVELNEEDED:
\r
5118 return "Priority level needed";
\r
5120 case DSERR_OUTOFMEMORY:
\r
5121 return "Out of memory";
\r
5123 case DSERR_BADFORMAT:
\r
5124 return "The sample rate or the channel format is not supported";
\r
5126 case DSERR_UNSUPPORTED:
\r
5127 return "Not supported";
\r
5129 case DSERR_NODRIVER:
\r
5130 return "No driver";
\r
5132 case DSERR_ALREADYINITIALIZED:
\r
5133 return "Already initialized";
\r
5135 case DSERR_NOAGGREGATION:
\r
5136 return "No aggregation";
\r
5138 case DSERR_BUFFERLOST:
\r
5139 return "Buffer lost";
\r
5141 case DSERR_OTHERAPPHASPRIO:
\r
5142 return "Another application already has priority";
\r
5144 case DSERR_UNINITIALIZED:
\r
5145 return "Uninitialized";
\r
5148 return "DirectSound unknown error";
\r
5151 //******************** End of __WINDOWS_DS__ *********************//
\r
5155 #if defined(__LINUX_ALSA__)
\r
5157 #include <alsa/asoundlib.h>
\r
5158 #include <unistd.h>
\r
5160 // A structure to hold various information related to the ALSA API
\r
5161 // implementation.
\r
5162 struct AlsaHandle {
\r
5163 snd_pcm_t *handles[2];
\r
5164 bool synchronized;
\r
5166 pthread_cond_t runnable_cv;
\r
5170 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5173 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5175 RtApiAlsa :: RtApiAlsa()
\r
5177 // Nothing to do here.
\r
5180 RtApiAlsa :: ~RtApiAlsa()
\r
5182 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5185 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5187 unsigned nDevices = 0;
\r
5188 int result, subdevice, card;
\r
5190 snd_ctl_t *handle;
\r
5192 // Count cards and devices
\r
5194 snd_card_next( &card );
\r
5195 while ( card >= 0 ) {
\r
5196 sprintf( name, "hw:%d", card );
\r
5197 result = snd_ctl_open( &handle, name, 0 );
\r
5198 if ( result < 0 ) {
\r
5199 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5200 errorText_ = errorStream_.str();
\r
5201 error( RtError::WARNING );
\r
5206 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5207 if ( result < 0 ) {
\r
5208 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5209 errorText_ = errorStream_.str();
\r
5210 error( RtError::WARNING );
\r
5213 if ( subdevice < 0 )
\r
5218 snd_ctl_close( handle );
\r
5219 snd_card_next( &card );
\r
5225 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5227 RtAudio::DeviceInfo info;
\r
5228 info.probed = false;
\r
5230 unsigned nDevices = 0;
\r
5231 int result, subdevice, card;
\r
5233 snd_ctl_t *chandle;
\r
5235 // Count cards and devices
\r
5237 snd_card_next( &card );
\r
5238 while ( card >= 0 ) {
\r
5239 sprintf( name, "hw:%d", card );
\r
5240 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5241 if ( result < 0 ) {
\r
5242 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5243 errorText_ = errorStream_.str();
\r
5244 error( RtError::WARNING );
\r
5249 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5250 if ( result < 0 ) {
\r
5251 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5252 errorText_ = errorStream_.str();
\r
5253 error( RtError::WARNING );
\r
5256 if ( subdevice < 0 ) break;
\r
5257 if ( nDevices == device ) {
\r
5258 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5264 snd_ctl_close( chandle );
\r
5265 snd_card_next( &card );
\r
5268 if ( nDevices == 0 ) {
\r
5269 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5270 error( RtError::INVALID_USE );
\r
5273 if ( device >= nDevices ) {
\r
5274 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5275 error( RtError::INVALID_USE );
\r
5280 // If a stream is already open, we cannot probe the stream devices.
\r
5281 // Thus, use the saved results.
\r
5282 if ( stream_.state != STREAM_CLOSED &&
\r
5283 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5284 snd_ctl_close( chandle );
\r
5285 if ( device >= devices_.size() ) {
\r
5286 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5287 error( RtError::WARNING );
\r
5290 return devices_[ device ];
\r
5293 int openMode = SND_PCM_ASYNC;
\r
5294 snd_pcm_stream_t stream;
\r
5295 snd_pcm_info_t *pcminfo;
\r
5296 snd_pcm_info_alloca( &pcminfo );
\r
5297 snd_pcm_t *phandle;
\r
5298 snd_pcm_hw_params_t *params;
\r
5299 snd_pcm_hw_params_alloca( ¶ms );
\r
5301 // First try for playback
\r
5302 stream = SND_PCM_STREAM_PLAYBACK;
\r
5303 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5304 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5305 snd_pcm_info_set_stream( pcminfo, stream );
\r
5307 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5308 if ( result < 0 ) {
\r
5309 // Device probably doesn't support playback.
\r
5310 goto captureProbe;
\r
5313 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5314 if ( result < 0 ) {
\r
5315 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5316 errorText_ = errorStream_.str();
\r
5317 error( RtError::WARNING );
\r
5318 goto captureProbe;
\r
5321 // The device is open ... fill the parameter structure.
\r
5322 result = snd_pcm_hw_params_any( phandle, params );
\r
5323 if ( result < 0 ) {
\r
5324 snd_pcm_close( phandle );
\r
5325 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5326 errorText_ = errorStream_.str();
\r
5327 error( RtError::WARNING );
\r
5328 goto captureProbe;
\r
5331 // Get output channel information.
\r
5332 unsigned int value;
\r
5333 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5334 if ( result < 0 ) {
\r
5335 snd_pcm_close( phandle );
\r
5336 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5337 errorText_ = errorStream_.str();
\r
5338 error( RtError::WARNING );
\r
5339 goto captureProbe;
\r
5341 info.outputChannels = value;
\r
5342 snd_pcm_close( phandle );
\r
5345 // Now try for capture
\r
5346 stream = SND_PCM_STREAM_CAPTURE;
\r
5347 snd_pcm_info_set_stream( pcminfo, stream );
\r
5349 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5350 snd_ctl_close( chandle );
\r
5351 if ( result < 0 ) {
\r
5352 // Device probably doesn't support capture.
\r
5353 if ( info.outputChannels == 0 ) return info;
\r
5354 goto probeParameters;
\r
5357 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5358 if ( result < 0 ) {
\r
5359 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5360 errorText_ = errorStream_.str();
\r
5361 error( RtError::WARNING );
\r
5362 if ( info.outputChannels == 0 ) return info;
\r
5363 goto probeParameters;
\r
5366 // The device is open ... fill the parameter structure.
\r
5367 result = snd_pcm_hw_params_any( phandle, params );
\r
5368 if ( result < 0 ) {
\r
5369 snd_pcm_close( phandle );
\r
5370 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5371 errorText_ = errorStream_.str();
\r
5372 error( RtError::WARNING );
\r
5373 if ( info.outputChannels == 0 ) return info;
\r
5374 goto probeParameters;
\r
5377 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5378 if ( result < 0 ) {
\r
5379 snd_pcm_close( phandle );
\r
5380 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5381 errorText_ = errorStream_.str();
\r
5382 error( RtError::WARNING );
\r
5383 if ( info.outputChannels == 0 ) return info;
\r
5384 goto probeParameters;
\r
5386 info.inputChannels = value;
\r
5387 snd_pcm_close( phandle );
\r
5389 // If device opens for both playback and capture, we determine the channels.
\r
5390 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5391 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5393 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5394 if ( device == 0 && info.outputChannels > 0 )
\r
5395 info.isDefaultOutput = true;
\r
5396 if ( device == 0 && info.inputChannels > 0 )
\r
5397 info.isDefaultInput = true;
\r
5400 // At this point, we just need to figure out the supported data
\r
5401 // formats and sample rates. We'll proceed by opening the device in
\r
5402 // the direction with the maximum number of channels, or playback if
\r
5403 // they are equal. This might limit our sample rate options, but so
\r
5406 if ( info.outputChannels >= info.inputChannels )
\r
5407 stream = SND_PCM_STREAM_PLAYBACK;
\r
5409 stream = SND_PCM_STREAM_CAPTURE;
\r
5410 snd_pcm_info_set_stream( pcminfo, stream );
\r
5412 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5413 if ( result < 0 ) {
\r
5414 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5415 errorText_ = errorStream_.str();
\r
5416 error( RtError::WARNING );
\r
5420 // The device is open ... fill the parameter structure.
\r
5421 result = snd_pcm_hw_params_any( phandle, params );
\r
5422 if ( result < 0 ) {
\r
5423 snd_pcm_close( phandle );
\r
5424 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5425 errorText_ = errorStream_.str();
\r
5426 error( RtError::WARNING );
\r
5430 // Test our discrete set of sample rate values.
\r
5431 info.sampleRates.clear();
\r
5432 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5433 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5434 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5436 if ( info.sampleRates.size() == 0 ) {
\r
5437 snd_pcm_close( phandle );
\r
5438 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5439 errorText_ = errorStream_.str();
\r
5440 error( RtError::WARNING );
\r
5444 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5445 snd_pcm_format_t format;
\r
5446 info.nativeFormats = 0;
\r
5447 format = SND_PCM_FORMAT_S8;
\r
5448 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5449 info.nativeFormats |= RTAUDIO_SINT8;
\r
5450 format = SND_PCM_FORMAT_S16;
\r
5451 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5452 info.nativeFormats |= RTAUDIO_SINT16;
\r
5453 format = SND_PCM_FORMAT_S24;
\r
5454 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5455 info.nativeFormats |= RTAUDIO_SINT24;
\r
5456 format = SND_PCM_FORMAT_S32;
\r
5457 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5458 info.nativeFormats |= RTAUDIO_SINT32;
\r
5459 format = SND_PCM_FORMAT_FLOAT;
\r
5460 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5461 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5462 format = SND_PCM_FORMAT_FLOAT64;
\r
5463 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5464 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5466 // Check that we have at least one supported format
\r
5467 if ( info.nativeFormats == 0 ) {
\r
5468 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5469 errorText_ = errorStream_.str();
\r
5470 error( RtError::WARNING );
\r
5474 // Get the device name
\r
5476 result = snd_card_get_name( card, &cardname );
\r
5477 if ( result >= 0 )
\r
5478 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5481 // That's all ... close the device and return
\r
5482 snd_pcm_close( phandle );
\r
5483 info.probed = true;
\r
5487 void RtApiAlsa :: saveDeviceInfo( void )
\r
5491 unsigned int nDevices = getDeviceCount();
\r
5492 devices_.resize( nDevices );
\r
5493 for ( unsigned int i=0; i<nDevices; i++ )
\r
5494 devices_[i] = getDeviceInfo( i );
\r
5497 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5498 unsigned int firstChannel, unsigned int sampleRate,
\r
5499 RtAudioFormat format, unsigned int *bufferSize,
\r
5500 RtAudio::StreamOptions *options )
\r
5503 #if defined(__RTAUDIO_DEBUG__)
\r
5504 snd_output_t *out;
\r
5505 snd_output_stdio_attach(&out, stderr, 0);
\r
5508 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5510 unsigned nDevices = 0;
\r
5511 int result, subdevice, card;
\r
5513 snd_ctl_t *chandle;
\r
5515 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5516 snprintf(name, sizeof(name), "%s", "default");
\r
5518 // Count cards and devices
\r
5520 snd_card_next( &card );
\r
5521 while ( card >= 0 ) {
\r
5522 sprintf( name, "hw:%d", card );
\r
5523 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5524 if ( result < 0 ) {
\r
5525 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5526 errorText_ = errorStream_.str();
\r
5531 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5532 if ( result < 0 ) break;
\r
5533 if ( subdevice < 0 ) break;
\r
5534 if ( nDevices == device ) {
\r
5535 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5536 snd_ctl_close( chandle );
\r
5541 snd_ctl_close( chandle );
\r
5542 snd_card_next( &card );
\r
5545 if ( nDevices == 0 ) {
\r
5546 // This should not happen because a check is made before this function is called.
\r
5547 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5551 if ( device >= nDevices ) {
\r
5552 // This should not happen because a check is made before this function is called.
\r
5553 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5560 // The getDeviceInfo() function will not work for a device that is
\r
5561 // already open. Thus, we'll probe the system before opening a
\r
5562 // stream and save the results for use by getDeviceInfo().
\r
5563 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5564 this->saveDeviceInfo();
\r
5566 snd_pcm_stream_t stream;
\r
5567 if ( mode == OUTPUT )
\r
5568 stream = SND_PCM_STREAM_PLAYBACK;
\r
5570 stream = SND_PCM_STREAM_CAPTURE;
\r
5572 snd_pcm_t *phandle;
\r
5573 int openMode = SND_PCM_ASYNC;
\r
5574 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5575 if ( result < 0 ) {
\r
5576 if ( mode == OUTPUT )
\r
5577 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5579 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5580 errorText_ = errorStream_.str();
\r
5584 // Fill the parameter structure.
\r
5585 snd_pcm_hw_params_t *hw_params;
\r
5586 snd_pcm_hw_params_alloca( &hw_params );
\r
5587 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5588 if ( result < 0 ) {
\r
5589 snd_pcm_close( phandle );
\r
5590 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5591 errorText_ = errorStream_.str();
\r
5595 #if defined(__RTAUDIO_DEBUG__)
\r
5596 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5597 snd_pcm_hw_params_dump( hw_params, out );
\r
5600 // Set access ... check user preference.
\r
5601 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5602 stream_.userInterleaved = false;
\r
5603 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5604 if ( result < 0 ) {
\r
5605 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5606 stream_.deviceInterleaved[mode] = true;
\r
5609 stream_.deviceInterleaved[mode] = false;
\r
5612 stream_.userInterleaved = true;
\r
5613 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5614 if ( result < 0 ) {
\r
5615 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5616 stream_.deviceInterleaved[mode] = false;
\r
5619 stream_.deviceInterleaved[mode] = true;
\r
5622 if ( result < 0 ) {
\r
5623 snd_pcm_close( phandle );
\r
5624 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5625 errorText_ = errorStream_.str();
\r
5629 // Determine how to set the device format.
\r
5630 stream_.userFormat = format;
\r
5631 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5633 if ( format == RTAUDIO_SINT8 )
\r
5634 deviceFormat = SND_PCM_FORMAT_S8;
\r
5635 else if ( format == RTAUDIO_SINT16 )
\r
5636 deviceFormat = SND_PCM_FORMAT_S16;
\r
5637 else if ( format == RTAUDIO_SINT24 )
\r
5638 deviceFormat = SND_PCM_FORMAT_S24;
\r
5639 else if ( format == RTAUDIO_SINT32 )
\r
5640 deviceFormat = SND_PCM_FORMAT_S32;
\r
5641 else if ( format == RTAUDIO_FLOAT32 )
\r
5642 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5643 else if ( format == RTAUDIO_FLOAT64 )
\r
5644 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5646 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5647 stream_.deviceFormat[mode] = format;
\r
5651 // The user requested format is not natively supported by the device.
\r
5652 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5653 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5654 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5658 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5659 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5660 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5664 deviceFormat = SND_PCM_FORMAT_S32;
\r
5665 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5666 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5670 deviceFormat = SND_PCM_FORMAT_S24;
\r
5671 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5672 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5676 deviceFormat = SND_PCM_FORMAT_S16;
\r
5677 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5678 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5682 deviceFormat = SND_PCM_FORMAT_S8;
\r
5683 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5684 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5688 // If we get here, no supported format was found.
\r
5689 snd_pcm_close( phandle );
\r
5690 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5691 errorText_ = errorStream_.str();
\r
5695 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5696 if ( result < 0 ) {
\r
5697 snd_pcm_close( phandle );
\r
5698 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5699 errorText_ = errorStream_.str();
\r
5703 // Determine whether byte-swaping is necessary.
\r
5704 stream_.doByteSwap[mode] = false;
\r
5705 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5706 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5707 if ( result == 0 )
\r
5708 stream_.doByteSwap[mode] = true;
\r
5709 else if (result < 0) {
\r
5710 snd_pcm_close( phandle );
\r
5711 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5712 errorText_ = errorStream_.str();
\r
5717 // Set the sample rate.
\r
5718 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5719 if ( result < 0 ) {
\r
5720 snd_pcm_close( phandle );
\r
5721 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5722 errorText_ = errorStream_.str();
\r
5726 // Determine the number of channels for this device. We support a possible
\r
5727 // minimum device channel number > than the value requested by the user.
\r
5728 stream_.nUserChannels[mode] = channels;
\r
5729 unsigned int value;
\r
5730 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5731 unsigned int deviceChannels = value;
\r
5732 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5733 snd_pcm_close( phandle );
\r
5734 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5735 errorText_ = errorStream_.str();
\r
5739 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5740 if ( result < 0 ) {
\r
5741 snd_pcm_close( phandle );
\r
5742 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5743 errorText_ = errorStream_.str();
\r
5746 deviceChannels = value;
\r
5747 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5748 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5750 // Set the device channels.
\r
5751 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5752 if ( result < 0 ) {
\r
5753 snd_pcm_close( phandle );
\r
5754 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5755 errorText_ = errorStream_.str();
\r
5759 // Set the buffer (or period) size.
\r
5761 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5762 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5763 if ( result < 0 ) {
\r
5764 snd_pcm_close( phandle );
\r
5765 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5766 errorText_ = errorStream_.str();
\r
5769 *bufferSize = periodSize;
\r
5771 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5772 unsigned int periods = 0;
\r
5773 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5774 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5775 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5776 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5777 if ( result < 0 ) {
\r
5778 snd_pcm_close( phandle );
\r
5779 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5780 errorText_ = errorStream_.str();
\r
5784 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5785 // MUST be the same in both directions!
\r
5786 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5787 snd_pcm_close( phandle );
\r
5788 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5789 errorText_ = errorStream_.str();
\r
5793 stream_.bufferSize = *bufferSize;
\r
5795 // Install the hardware configuration
\r
5796 result = snd_pcm_hw_params( phandle, hw_params );
\r
5797 if ( result < 0 ) {
\r
5798 snd_pcm_close( phandle );
\r
5799 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5800 errorText_ = errorStream_.str();
\r
5804 #if defined(__RTAUDIO_DEBUG__)
\r
5805 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5806 snd_pcm_hw_params_dump( hw_params, out );
\r
5809 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5810 snd_pcm_sw_params_t *sw_params = NULL;
\r
5811 snd_pcm_sw_params_alloca( &sw_params );
\r
5812 snd_pcm_sw_params_current( phandle, sw_params );
\r
5813 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5814 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5815 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5817 // The following two settings were suggested by Theo Veenker
\r
5818 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5819 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5821 // here are two options for a fix
\r
5822 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5823 snd_pcm_uframes_t val;
\r
5824 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5825 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5827 result = snd_pcm_sw_params( phandle, sw_params );
\r
5828 if ( result < 0 ) {
\r
5829 snd_pcm_close( phandle );
\r
5830 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5831 errorText_ = errorStream_.str();
\r
5835 #if defined(__RTAUDIO_DEBUG__)
\r
5836 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5837 snd_pcm_sw_params_dump( sw_params, out );
\r
5840 // Set flags for buffer conversion
\r
5841 stream_.doConvertBuffer[mode] = false;
\r
5842 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5843 stream_.doConvertBuffer[mode] = true;
\r
5844 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5845 stream_.doConvertBuffer[mode] = true;
\r
5846 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5847 stream_.nUserChannels[mode] > 1 )
\r
5848 stream_.doConvertBuffer[mode] = true;
\r
5850 // Allocate the ApiHandle if necessary and then save.
\r
5851 AlsaHandle *apiInfo = 0;
\r
5852 if ( stream_.apiHandle == 0 ) {
\r
5854 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5856 catch ( std::bad_alloc& ) {
\r
5857 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5861 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5862 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5866 stream_.apiHandle = (void *) apiInfo;
\r
5867 apiInfo->handles[0] = 0;
\r
5868 apiInfo->handles[1] = 0;
\r
5871 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5873 apiInfo->handles[mode] = phandle;
\r
5876 // Allocate necessary internal buffers.
\r
5877 unsigned long bufferBytes;
\r
5878 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5879 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5880 if ( stream_.userBuffer[mode] == NULL ) {
\r
5881 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5885 if ( stream_.doConvertBuffer[mode] ) {
\r
5887 bool makeBuffer = true;
\r
5888 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5889 if ( mode == INPUT ) {
\r
5890 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5891 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5892 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5896 if ( makeBuffer ) {
\r
5897 bufferBytes *= *bufferSize;
\r
5898 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5899 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5900 if ( stream_.deviceBuffer == NULL ) {
\r
5901 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5907 stream_.sampleRate = sampleRate;
\r
5908 stream_.nBuffers = periods;
\r
5909 stream_.device[mode] = device;
\r
5910 stream_.state = STREAM_STOPPED;
\r
5912 // Setup the buffer conversion information structure.
\r
5913 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5915 // Setup thread if necessary.
\r
5916 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5917 // We had already set up an output stream.
\r
5918 stream_.mode = DUPLEX;
\r
5919 // Link the streams if possible.
\r
5920 apiInfo->synchronized = false;
\r
5921 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5922 apiInfo->synchronized = true;
\r
5924 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5925 error( RtError::WARNING );
\r
5929 stream_.mode = mode;
\r
5931 // Setup callback thread.
\r
5932 stream_.callbackInfo.object = (void *) this;
\r
5934 // Set the thread attributes for joinable and realtime scheduling
\r
5935 // priority (optional). The higher priority will only take affect
\r
5936 // if the program is run as root or suid. Note, under Linux
\r
5937 // processes with CAP_SYS_NICE privilege, a user can change
\r
5938 // scheduling policy and priority (thus need not be root). See
\r
5939 // POSIX "capabilities".
\r
5940 pthread_attr_t attr;
\r
5941 pthread_attr_init( &attr );
\r
5942 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5943 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5944 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5945 struct sched_param param;
\r
5946 int priority = options->priority;
\r
5947 int min = sched_get_priority_min( SCHED_RR );
\r
5948 int max = sched_get_priority_max( SCHED_RR );
\r
5949 if ( priority < min ) priority = min;
\r
5950 else if ( priority > max ) priority = max;
\r
5951 param.sched_priority = priority;
\r
5952 pthread_attr_setschedparam( &attr, ¶m );
\r
5953 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5956 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5958 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5961 stream_.callbackInfo.isRunning = true;
\r
5962 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5963 pthread_attr_destroy( &attr );
\r
5965 stream_.callbackInfo.isRunning = false;
\r
5966 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5975 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5976 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5977 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5979 stream_.apiHandle = 0;
\r
5982 if ( phandle) snd_pcm_close( phandle );
\r
5984 for ( int i=0; i<2; i++ ) {
\r
5985 if ( stream_.userBuffer[i] ) {
\r
5986 free( stream_.userBuffer[i] );
\r
5987 stream_.userBuffer[i] = 0;
\r
5991 if ( stream_.deviceBuffer ) {
\r
5992 free( stream_.deviceBuffer );
\r
5993 stream_.deviceBuffer = 0;
\r
5999 void RtApiAlsa :: closeStream()
\r
6001 if ( stream_.state == STREAM_CLOSED ) {
\r
6002 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6003 error( RtError::WARNING );
\r
6007 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6008 stream_.callbackInfo.isRunning = false;
\r
6009 MUTEX_LOCK( &stream_.mutex );
\r
6010 if ( stream_.state == STREAM_STOPPED ) {
\r
6011 apiInfo->runnable = true;
\r
6012 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6014 MUTEX_UNLOCK( &stream_.mutex );
\r
6015 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6017 if ( stream_.state == STREAM_RUNNING ) {
\r
6018 stream_.state = STREAM_STOPPED;
\r
6019 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6020 snd_pcm_drop( apiInfo->handles[0] );
\r
6021 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6022 snd_pcm_drop( apiInfo->handles[1] );
\r
6026 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6027 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6028 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6030 stream_.apiHandle = 0;
\r
6033 for ( int i=0; i<2; i++ ) {
\r
6034 if ( stream_.userBuffer[i] ) {
\r
6035 free( stream_.userBuffer[i] );
\r
6036 stream_.userBuffer[i] = 0;
\r
6040 if ( stream_.deviceBuffer ) {
\r
6041 free( stream_.deviceBuffer );
\r
6042 stream_.deviceBuffer = 0;
\r
6045 stream_.mode = UNINITIALIZED;
\r
6046 stream_.state = STREAM_CLOSED;
\r
6049 void RtApiAlsa :: startStream()
\r
6051 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6054 if ( stream_.state == STREAM_RUNNING ) {
\r
6055 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6056 error( RtError::WARNING );
\r
6060 MUTEX_LOCK( &stream_.mutex );
\r
6063 snd_pcm_state_t state;
\r
6064 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6065 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6066 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6067 state = snd_pcm_state( handle[0] );
\r
6068 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6069 result = snd_pcm_prepare( handle[0] );
\r
6070 if ( result < 0 ) {
\r
6071 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6072 errorText_ = errorStream_.str();
\r
6078 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6079 state = snd_pcm_state( handle[1] );
\r
6080 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6081 result = snd_pcm_prepare( handle[1] );
\r
6082 if ( result < 0 ) {
\r
6083 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6084 errorText_ = errorStream_.str();
\r
6090 stream_.state = STREAM_RUNNING;
\r
6093 apiInfo->runnable = true;
\r
6094 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6095 MUTEX_UNLOCK( &stream_.mutex );
\r
6097 if ( result >= 0 ) return;
\r
6098 error( RtError::SYSTEM_ERROR );
\r
6101 void RtApiAlsa :: stopStream()
\r
6104 if ( stream_.state == STREAM_STOPPED ) {
\r
6105 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6106 error( RtError::WARNING );
\r
6110 stream_.state = STREAM_STOPPED;
\r
6111 MUTEX_LOCK( &stream_.mutex );
\r
6113 //if ( stream_.state == STREAM_STOPPED ) {
\r
6114 // MUTEX_UNLOCK( &stream_.mutex );
\r
6119 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6120 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6121 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6122 if ( apiInfo->synchronized )
\r
6123 result = snd_pcm_drop( handle[0] );
\r
6125 result = snd_pcm_drain( handle[0] );
\r
6126 if ( result < 0 ) {
\r
6127 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6128 errorText_ = errorStream_.str();
\r
6133 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6134 result = snd_pcm_drop( handle[1] );
\r
6135 if ( result < 0 ) {
\r
6136 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6137 errorText_ = errorStream_.str();
\r
6143 stream_.state = STREAM_STOPPED;
\r
6144 MUTEX_UNLOCK( &stream_.mutex );
\r
6146 if ( result >= 0 ) return;
\r
6147 error( RtError::SYSTEM_ERROR );
\r
6150 void RtApiAlsa :: abortStream()
\r
6153 if ( stream_.state == STREAM_STOPPED ) {
\r
6154 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6155 error( RtError::WARNING );
\r
6159 stream_.state = STREAM_STOPPED;
\r
6160 MUTEX_LOCK( &stream_.mutex );
\r
6162 //if ( stream_.state == STREAM_STOPPED ) {
\r
6163 // MUTEX_UNLOCK( &stream_.mutex );
\r
6168 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6169 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6170 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6171 result = snd_pcm_drop( handle[0] );
\r
6172 if ( result < 0 ) {
\r
6173 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6174 errorText_ = errorStream_.str();
\r
6179 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6180 result = snd_pcm_drop( handle[1] );
\r
6181 if ( result < 0 ) {
\r
6182 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6183 errorText_ = errorStream_.str();
\r
6189 stream_.state = STREAM_STOPPED;
\r
6190 MUTEX_UNLOCK( &stream_.mutex );
\r
6192 if ( result >= 0 ) return;
\r
6193 error( RtError::SYSTEM_ERROR );
\r
6196 void RtApiAlsa :: callbackEvent()
\r
6198 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6199 if ( stream_.state == STREAM_STOPPED ) {
\r
6200 MUTEX_LOCK( &stream_.mutex );
\r
6201 while ( !apiInfo->runnable )
\r
6202 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6204 if ( stream_.state != STREAM_RUNNING ) {
\r
6205 MUTEX_UNLOCK( &stream_.mutex );
\r
6208 MUTEX_UNLOCK( &stream_.mutex );
\r
6211 if ( stream_.state == STREAM_CLOSED ) {
\r
6212 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6213 error( RtError::WARNING );
\r
6217 int doStopStream = 0;
\r
6218 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6219 double streamTime = getStreamTime();
\r
6220 RtAudioStreamStatus status = 0;
\r
6221 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6222 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6223 apiInfo->xrun[0] = false;
\r
6225 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6226 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6227 apiInfo->xrun[1] = false;
\r
6229 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6230 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6232 if ( doStopStream == 2 ) {
\r
6237 MUTEX_LOCK( &stream_.mutex );
\r
6239 // The state might change while waiting on a mutex.
\r
6240 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6245 snd_pcm_t **handle;
\r
6246 snd_pcm_sframes_t frames;
\r
6247 RtAudioFormat format;
\r
6248 handle = (snd_pcm_t **) apiInfo->handles;
\r
6250 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6252 // Setup parameters.
\r
6253 if ( stream_.doConvertBuffer[1] ) {
\r
6254 buffer = stream_.deviceBuffer;
\r
6255 channels = stream_.nDeviceChannels[1];
\r
6256 format = stream_.deviceFormat[1];
\r
6259 buffer = stream_.userBuffer[1];
\r
6260 channels = stream_.nUserChannels[1];
\r
6261 format = stream_.userFormat;
\r
6264 // Read samples from device in interleaved/non-interleaved format.
\r
6265 if ( stream_.deviceInterleaved[1] )
\r
6266 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6268 void *bufs[channels];
\r
6269 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6270 for ( int i=0; i<channels; i++ )
\r
6271 bufs[i] = (void *) (buffer + (i * offset));
\r
6272 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6275 if ( result < (int) stream_.bufferSize ) {
\r
6276 // Either an error or overrun occured.
\r
6277 if ( result == -EPIPE ) {
\r
6278 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6279 if ( state == SND_PCM_STATE_XRUN ) {
\r
6280 apiInfo->xrun[1] = true;
\r
6281 result = snd_pcm_prepare( handle[1] );
\r
6282 if ( result < 0 ) {
\r
6283 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6284 errorText_ = errorStream_.str();
\r
6288 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6289 errorText_ = errorStream_.str();
\r
6293 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6294 errorText_ = errorStream_.str();
\r
6296 error( RtError::WARNING );
\r
6300 // Do byte swapping if necessary.
\r
6301 if ( stream_.doByteSwap[1] )
\r
6302 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6304 // Do buffer conversion if necessary.
\r
6305 if ( stream_.doConvertBuffer[1] )
\r
6306 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6308 // Check stream latency
\r
6309 result = snd_pcm_delay( handle[1], &frames );
\r
6310 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6315 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6317 // Setup parameters and do buffer conversion if necessary.
\r
6318 if ( stream_.doConvertBuffer[0] ) {
\r
6319 buffer = stream_.deviceBuffer;
\r
6320 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6321 channels = stream_.nDeviceChannels[0];
\r
6322 format = stream_.deviceFormat[0];
\r
6325 buffer = stream_.userBuffer[0];
\r
6326 channels = stream_.nUserChannels[0];
\r
6327 format = stream_.userFormat;
\r
6330 // Do byte swapping if necessary.
\r
6331 if ( stream_.doByteSwap[0] )
\r
6332 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6334 // Write samples to device in interleaved/non-interleaved format.
\r
6335 if ( stream_.deviceInterleaved[0] )
\r
6336 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6338 void *bufs[channels];
\r
6339 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6340 for ( int i=0; i<channels; i++ )
\r
6341 bufs[i] = (void *) (buffer + (i * offset));
\r
6342 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6345 if ( result < (int) stream_.bufferSize ) {
\r
6346 // Either an error or underrun occured.
\r
6347 if ( result == -EPIPE ) {
\r
6348 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6349 if ( state == SND_PCM_STATE_XRUN ) {
\r
6350 apiInfo->xrun[0] = true;
\r
6351 result = snd_pcm_prepare( handle[0] );
\r
6352 if ( result < 0 ) {
\r
6353 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6354 errorText_ = errorStream_.str();
\r
6358 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6359 errorText_ = errorStream_.str();
\r
6363 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6364 errorText_ = errorStream_.str();
\r
6366 error( RtError::WARNING );
\r
6370 // Check stream latency
\r
6371 result = snd_pcm_delay( handle[0], &frames );
\r
6372 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6376 MUTEX_UNLOCK( &stream_.mutex );
\r
6378 RtApi::tickStreamTime();
\r
6379 if ( doStopStream == 1 ) this->stopStream();
\r
6382 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6384 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6385 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6386 bool *isRunning = &info->isRunning;
\r
6388 while ( *isRunning == true ) {
\r
6389 pthread_testcancel();
\r
6390 object->callbackEvent();
\r
6393 pthread_exit( NULL );
\r
6396 //******************** End of __LINUX_ALSA__ *********************//
\r
6400 #if defined(__LINUX_OSS__)
\r
6402 #include <unistd.h>
\r
6403 #include <sys/ioctl.h>
\r
6404 #include <unistd.h>
\r
6405 #include <fcntl.h>
\r
6406 #include "soundcard.h"
\r
6407 #include <errno.h>
\r
6410 extern "C" void *ossCallbackHandler(void * ptr);
\r
6412 // A structure to hold various information related to the OSS API
\r
6413 // implementation.
\r
6414 struct OssHandle {
\r
6415 int id[2]; // device ids
\r
6418 pthread_cond_t runnable;
\r
6421 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6424 RtApiOss :: RtApiOss()
\r
6426 // Nothing to do here.
\r
6429 RtApiOss :: ~RtApiOss()
\r
6431 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6434 unsigned int RtApiOss :: getDeviceCount( void )
\r
6436 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6437 if ( mixerfd == -1 ) {
\r
6438 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6439 error( RtError::WARNING );
\r
6443 oss_sysinfo sysinfo;
\r
6444 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6446 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6447 error( RtError::WARNING );
\r
6452 return sysinfo.numaudios;
\r
6455 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6457 RtAudio::DeviceInfo info;
\r
6458 info.probed = false;
\r
6460 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6461 if ( mixerfd == -1 ) {
\r
6462 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6463 error( RtError::WARNING );
\r
6467 oss_sysinfo sysinfo;
\r
6468 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6469 if ( result == -1 ) {
\r
6471 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6472 error( RtError::WARNING );
\r
6476 unsigned nDevices = sysinfo.numaudios;
\r
6477 if ( nDevices == 0 ) {
\r
6479 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6480 error( RtError::INVALID_USE );
\r
6483 if ( device >= nDevices ) {
\r
6485 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6486 error( RtError::INVALID_USE );
\r
6489 oss_audioinfo ainfo;
\r
6490 ainfo.dev = device;
\r
6491 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6493 if ( result == -1 ) {
\r
6494 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6495 errorText_ = errorStream_.str();
\r
6496 error( RtError::WARNING );
\r
6501 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6502 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6503 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6504 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6505 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6508 // Probe data formats ... do for input
\r
6509 unsigned long mask = ainfo.iformats;
\r
6510 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6511 info.nativeFormats |= RTAUDIO_SINT16;
\r
6512 if ( mask & AFMT_S8 )
\r
6513 info.nativeFormats |= RTAUDIO_SINT8;
\r
6514 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6515 info.nativeFormats |= RTAUDIO_SINT32;
\r
6516 if ( mask & AFMT_FLOAT )
\r
6517 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6518 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6519 info.nativeFormats |= RTAUDIO_SINT24;
\r
6521 // Check that we have at least one supported format
\r
6522 if ( info.nativeFormats == 0 ) {
\r
6523 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6524 errorText_ = errorStream_.str();
\r
6525 error( RtError::WARNING );
\r
6529 // Probe the supported sample rates.
\r
6530 info.sampleRates.clear();
\r
6531 if ( ainfo.nrates ) {
\r
6532 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6533 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6534 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6535 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6542 // Check min and max rate values;
\r
6543 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6544 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6545 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6549 if ( info.sampleRates.size() == 0 ) {
\r
6550 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6551 errorText_ = errorStream_.str();
\r
6552 error( RtError::WARNING );
\r
6555 info.probed = true;
\r
6556 info.name = ainfo.name;
\r
6563 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6564 unsigned int firstChannel, unsigned int sampleRate,
\r
6565 RtAudioFormat format, unsigned int *bufferSize,
\r
6566 RtAudio::StreamOptions *options )
\r
6568 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6569 if ( mixerfd == -1 ) {
\r
6570 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6574 oss_sysinfo sysinfo;
\r
6575 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6576 if ( result == -1 ) {
\r
6578 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6582 unsigned nDevices = sysinfo.numaudios;
\r
6583 if ( nDevices == 0 ) {
\r
6584 // This should not happen because a check is made before this function is called.
\r
6586 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6590 if ( device >= nDevices ) {
\r
6591 // This should not happen because a check is made before this function is called.
\r
6593 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6597 oss_audioinfo ainfo;
\r
6598 ainfo.dev = device;
\r
6599 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6601 if ( result == -1 ) {
\r
6602 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6603 errorText_ = errorStream_.str();
\r
6607 // Check if device supports input or output
\r
6608 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6609 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6610 if ( mode == OUTPUT )
\r
6611 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6613 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6614 errorText_ = errorStream_.str();
\r
6619 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6620 if ( mode == OUTPUT )
\r
6621 flags |= O_WRONLY;
\r
6622 else { // mode == INPUT
\r
6623 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6624 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6625 close( handle->id[0] );
\r
6626 handle->id[0] = 0;
\r
6627 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6628 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6629 errorText_ = errorStream_.str();
\r
6632 // Check that the number previously set channels is the same.
\r
6633 if ( stream_.nUserChannels[0] != channels ) {
\r
6634 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6635 errorText_ = errorStream_.str();
\r
6641 flags |= O_RDONLY;
\r
6644 // Set exclusive access if specified.
\r
6645 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
6647 // Try to open the device.
\r
6649 fd = open( ainfo.devnode, flags, 0 );
\r
6651 if ( errno == EBUSY )
\r
6652 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
6654 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
6655 errorText_ = errorStream_.str();
\r
6659 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
6661 if ( flags | O_RDWR ) {
\r
6662 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
6663 if ( result == -1) {
\r
6664 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
6665 errorText_ = errorStream_.str();
\r
6671 // Check the device channel support.
\r
6672 stream_.nUserChannels[mode] = channels;
\r
6673 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
6675 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
6676 errorText_ = errorStream_.str();
\r
6680 // Set the number of channels.
\r
6681 int deviceChannels = channels + firstChannel;
\r
6682 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
6683 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
6685 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
6686 errorText_ = errorStream_.str();
\r
6689 stream_.nDeviceChannels[mode] = deviceChannels;
\r
6691 // Get the data format mask
\r
6693 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
6694 if ( result == -1 ) {
\r
6696 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
6697 errorText_ = errorStream_.str();
\r
6701 // Determine how to set the device format.
\r
6702 stream_.userFormat = format;
\r
6703 int deviceFormat = -1;
\r
6704 stream_.doByteSwap[mode] = false;
\r
6705 if ( format == RTAUDIO_SINT8 ) {
\r
6706 if ( mask & AFMT_S8 ) {
\r
6707 deviceFormat = AFMT_S8;
\r
6708 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6711 else if ( format == RTAUDIO_SINT16 ) {
\r
6712 if ( mask & AFMT_S16_NE ) {
\r
6713 deviceFormat = AFMT_S16_NE;
\r
6714 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6716 else if ( mask & AFMT_S16_OE ) {
\r
6717 deviceFormat = AFMT_S16_OE;
\r
6718 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6719 stream_.doByteSwap[mode] = true;
\r
6722 else if ( format == RTAUDIO_SINT24 ) {
\r
6723 if ( mask & AFMT_S24_NE ) {
\r
6724 deviceFormat = AFMT_S24_NE;
\r
6725 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6727 else if ( mask & AFMT_S24_OE ) {
\r
6728 deviceFormat = AFMT_S24_OE;
\r
6729 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6730 stream_.doByteSwap[mode] = true;
\r
6733 else if ( format == RTAUDIO_SINT32 ) {
\r
6734 if ( mask & AFMT_S32_NE ) {
\r
6735 deviceFormat = AFMT_S32_NE;
\r
6736 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6738 else if ( mask & AFMT_S32_OE ) {
\r
6739 deviceFormat = AFMT_S32_OE;
\r
6740 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6741 stream_.doByteSwap[mode] = true;
\r
6745 if ( deviceFormat == -1 ) {
\r
6746 // The user requested format is not natively supported by the device.
\r
6747 if ( mask & AFMT_S16_NE ) {
\r
6748 deviceFormat = AFMT_S16_NE;
\r
6749 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6751 else if ( mask & AFMT_S32_NE ) {
\r
6752 deviceFormat = AFMT_S32_NE;
\r
6753 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6755 else if ( mask & AFMT_S24_NE ) {
\r
6756 deviceFormat = AFMT_S24_NE;
\r
6757 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6759 else if ( mask & AFMT_S16_OE ) {
\r
6760 deviceFormat = AFMT_S16_OE;
\r
6761 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6762 stream_.doByteSwap[mode] = true;
\r
6764 else if ( mask & AFMT_S32_OE ) {
\r
6765 deviceFormat = AFMT_S32_OE;
\r
6766 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6767 stream_.doByteSwap[mode] = true;
\r
6769 else if ( mask & AFMT_S24_OE ) {
\r
6770 deviceFormat = AFMT_S24_OE;
\r
6771 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6772 stream_.doByteSwap[mode] = true;
\r
6774 else if ( mask & AFMT_S8) {
\r
6775 deviceFormat = AFMT_S8;
\r
6776 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6780 if ( stream_.deviceFormat[mode] == 0 ) {
\r
6781 // This really shouldn't happen ...
\r
6783 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6784 errorText_ = errorStream_.str();
\r
6788 // Set the data format.
\r
6789 int temp = deviceFormat;
\r
6790 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
6791 if ( result == -1 || deviceFormat != temp ) {
\r
6793 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
6794 errorText_ = errorStream_.str();
\r
6798 // Attempt to set the buffer size. According to OSS, the minimum
\r
6799 // number of buffers is two. The supposed minimum buffer size is 16
\r
6800 // bytes, so that will be our lower bound. The argument to this
\r
6801 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
6802 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
6803 // We'll check the actual value used near the end of the setup
\r
6805 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
6806 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
6808 if ( options ) buffers = options->numberOfBuffers;
\r
6809 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
6810 if ( buffers < 2 ) buffers = 3;
\r
6811 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
6812 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
6813 if ( result == -1 ) {
\r
6815 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
6816 errorText_ = errorStream_.str();
\r
6819 stream_.nBuffers = buffers;
\r
6821 // Save buffer size (in sample frames).
\r
6822 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
6823 stream_.bufferSize = *bufferSize;
\r
6825 // Set the sample rate.
\r
6826 int srate = sampleRate;
\r
6827 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
6828 if ( result == -1 ) {
\r
6830 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
6831 errorText_ = errorStream_.str();
\r
6835 // Verify the sample rate setup worked.
\r
6836 if ( abs( srate - sampleRate ) > 100 ) {
\r
6838 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
6839 errorText_ = errorStream_.str();
\r
6842 stream_.sampleRate = sampleRate;
\r
6844 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6845 // We're doing duplex setup here.
\r
6846 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
6847 stream_.nDeviceChannels[0] = deviceChannels;
\r
6850 // Set interleaving parameters.
\r
6851 stream_.userInterleaved = true;
\r
6852 stream_.deviceInterleaved[mode] = true;
\r
6853 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
6854 stream_.userInterleaved = false;
\r
6856 // Set flags for buffer conversion
\r
6857 stream_.doConvertBuffer[mode] = false;
\r
6858 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
6859 stream_.doConvertBuffer[mode] = true;
\r
6860 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
6861 stream_.doConvertBuffer[mode] = true;
\r
6862 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
6863 stream_.nUserChannels[mode] > 1 )
\r
6864 stream_.doConvertBuffer[mode] = true;
\r
6866 // Allocate the stream handles if necessary and then save.
\r
6867 if ( stream_.apiHandle == 0 ) {
\r
6869 handle = new OssHandle;
\r
6871 catch ( std::bad_alloc& ) {
\r
6872 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
6876 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
6877 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
6881 stream_.apiHandle = (void *) handle;
\r
6884 handle = (OssHandle *) stream_.apiHandle;
\r
6886 handle->id[mode] = fd;
\r
6888 // Allocate necessary internal buffers.
\r
6889 unsigned long bufferBytes;
\r
6890 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6891 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6892 if ( stream_.userBuffer[mode] == NULL ) {
\r
6893 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
6897 if ( stream_.doConvertBuffer[mode] ) {
\r
6899 bool makeBuffer = true;
\r
6900 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6901 if ( mode == INPUT ) {
\r
6902 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6903 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6904 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6908 if ( makeBuffer ) {
\r
6909 bufferBytes *= *bufferSize;
\r
6910 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6911 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6912 if ( stream_.deviceBuffer == NULL ) {
\r
6913 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
6919 stream_.device[mode] = device;
\r
6920 stream_.state = STREAM_STOPPED;
\r
6922 // Setup the buffer conversion information structure.
\r
6923 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6925 // Setup thread if necessary.
\r
6926 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
6927 // We had already set up an output stream.
\r
6928 stream_.mode = DUPLEX;
\r
6929 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
6932 stream_.mode = mode;
\r
6934 // Setup callback thread.
\r
6935 stream_.callbackInfo.object = (void *) this;
\r
6937 // Set the thread attributes for joinable and realtime scheduling
\r
6938 // priority. The higher priority will only take affect if the
\r
6939 // program is run as root or suid.
\r
6940 pthread_attr_t attr;
\r
6941 pthread_attr_init( &attr );
\r
6942 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
6943 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6944 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
6945 struct sched_param param;
\r
6946 int priority = options->priority;
\r
6947 int min = sched_get_priority_min( SCHED_RR );
\r
6948 int max = sched_get_priority_max( SCHED_RR );
\r
6949 if ( priority < min ) priority = min;
\r
6950 else if ( priority > max ) priority = max;
\r
6951 param.sched_priority = priority;
\r
6952 pthread_attr_setschedparam( &attr, ¶m );
\r
6953 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
6956 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6958 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6961 stream_.callbackInfo.isRunning = true;
\r
6962 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
6963 pthread_attr_destroy( &attr );
\r
6965 stream_.callbackInfo.isRunning = false;
\r
6966 errorText_ = "RtApiOss::error creating callback thread!";
\r
6975 pthread_cond_destroy( &handle->runnable );
\r
6976 if ( handle->id[0] ) close( handle->id[0] );
\r
6977 if ( handle->id[1] ) close( handle->id[1] );
\r
6979 stream_.apiHandle = 0;
\r
6982 for ( int i=0; i<2; i++ ) {
\r
6983 if ( stream_.userBuffer[i] ) {
\r
6984 free( stream_.userBuffer[i] );
\r
6985 stream_.userBuffer[i] = 0;
\r
6989 if ( stream_.deviceBuffer ) {
\r
6990 free( stream_.deviceBuffer );
\r
6991 stream_.deviceBuffer = 0;
\r
6997 void RtApiOss :: closeStream()
\r
6999 if ( stream_.state == STREAM_CLOSED ) {
\r
7000 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7001 error( RtError::WARNING );
\r
7005 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7006 stream_.callbackInfo.isRunning = false;
\r
7007 MUTEX_LOCK( &stream_.mutex );
\r
7008 if ( stream_.state == STREAM_STOPPED )
\r
7009 pthread_cond_signal( &handle->runnable );
\r
7010 MUTEX_UNLOCK( &stream_.mutex );
\r
7011 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7013 if ( stream_.state == STREAM_RUNNING ) {
\r
7014 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7015 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7017 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7018 stream_.state = STREAM_STOPPED;
\r
7022 pthread_cond_destroy( &handle->runnable );
\r
7023 if ( handle->id[0] ) close( handle->id[0] );
\r
7024 if ( handle->id[1] ) close( handle->id[1] );
\r
7026 stream_.apiHandle = 0;
\r
7029 for ( int i=0; i<2; i++ ) {
\r
7030 if ( stream_.userBuffer[i] ) {
\r
7031 free( stream_.userBuffer[i] );
\r
7032 stream_.userBuffer[i] = 0;
\r
7036 if ( stream_.deviceBuffer ) {
\r
7037 free( stream_.deviceBuffer );
\r
7038 stream_.deviceBuffer = 0;
\r
7041 stream_.mode = UNINITIALIZED;
\r
7042 stream_.state = STREAM_CLOSED;
\r
7045 void RtApiOss :: startStream()
\r
7048 if ( stream_.state == STREAM_RUNNING ) {
\r
7049 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7050 error( RtError::WARNING );
\r
7054 MUTEX_LOCK( &stream_.mutex );
\r
7056 stream_.state = STREAM_RUNNING;
\r
7058 // No need to do anything else here ... OSS automatically starts
\r
7059 // when fed samples.
\r
7061 MUTEX_UNLOCK( &stream_.mutex );
\r
7063 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7064 pthread_cond_signal( &handle->runnable );
\r
7067 void RtApiOss :: stopStream()
\r
7070 if ( stream_.state == STREAM_STOPPED ) {
\r
7071 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7072 error( RtError::WARNING );
\r
7076 MUTEX_LOCK( &stream_.mutex );
\r
7078 // The state might change while waiting on a mutex.
\r
7079 if ( stream_.state == STREAM_STOPPED ) {
\r
7080 MUTEX_UNLOCK( &stream_.mutex );
\r
7085 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7086 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7088 // Flush the output with zeros a few times.
\r
7091 RtAudioFormat format;
\r
7093 if ( stream_.doConvertBuffer[0] ) {
\r
7094 buffer = stream_.deviceBuffer;
\r
7095 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7096 format = stream_.deviceFormat[0];
\r
7099 buffer = stream_.userBuffer[0];
\r
7100 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7101 format = stream_.userFormat;
\r
7104 memset( buffer, 0, samples * formatBytes(format) );
\r
7105 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7106 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7107 if ( result == -1 ) {
\r
7108 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7109 error( RtError::WARNING );
\r
7113 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7114 if ( result == -1 ) {
\r
7115 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7116 errorText_ = errorStream_.str();
\r
7119 handle->triggered = false;
\r
7122 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7123 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7124 if ( result == -1 ) {
\r
7125 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7126 errorText_ = errorStream_.str();
\r
7132 stream_.state = STREAM_STOPPED;
\r
7133 MUTEX_UNLOCK( &stream_.mutex );
\r
7135 if ( result != -1 ) return;
\r
7136 error( RtError::SYSTEM_ERROR );
\r
7139 void RtApiOss :: abortStream()
\r
7142 if ( stream_.state == STREAM_STOPPED ) {
\r
7143 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7144 error( RtError::WARNING );
\r
7148 MUTEX_LOCK( &stream_.mutex );
\r
7150 // The state might change while waiting on a mutex.
\r
7151 if ( stream_.state == STREAM_STOPPED ) {
\r
7152 MUTEX_UNLOCK( &stream_.mutex );
\r
7157 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7158 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7159 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7160 if ( result == -1 ) {
\r
7161 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7162 errorText_ = errorStream_.str();
\r
7165 handle->triggered = false;
\r
7168 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7169 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7170 if ( result == -1 ) {
\r
7171 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7172 errorText_ = errorStream_.str();
\r
7178 stream_.state = STREAM_STOPPED;
\r
7179 MUTEX_UNLOCK( &stream_.mutex );
\r
7181 if ( result != -1 ) return;
\r
7182 error( RtError::SYSTEM_ERROR );
\r
7185 void RtApiOss :: callbackEvent()
\r
7187 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7188 if ( stream_.state == STREAM_STOPPED ) {
\r
7189 MUTEX_LOCK( &stream_.mutex );
\r
7190 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7191 if ( stream_.state != STREAM_RUNNING ) {
\r
7192 MUTEX_UNLOCK( &stream_.mutex );
\r
7195 MUTEX_UNLOCK( &stream_.mutex );
\r
7198 if ( stream_.state == STREAM_CLOSED ) {
\r
7199 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7200 error( RtError::WARNING );
\r
7204 // Invoke user callback to get fresh output data.
\r
7205 int doStopStream = 0;
\r
7206 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7207 double streamTime = getStreamTime();
\r
7208 RtAudioStreamStatus status = 0;
\r
7209 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7210 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7211 handle->xrun[0] = false;
\r
7213 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7214 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7215 handle->xrun[1] = false;
\r
7217 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7218 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7219 if ( doStopStream == 2 ) {
\r
7220 this->abortStream();
\r
7224 MUTEX_LOCK( &stream_.mutex );
\r
7226 // The state might change while waiting on a mutex.
\r
7227 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7232 RtAudioFormat format;
\r
7234 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7236 // Setup parameters and do buffer conversion if necessary.
\r
7237 if ( stream_.doConvertBuffer[0] ) {
\r
7238 buffer = stream_.deviceBuffer;
\r
7239 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7240 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7241 format = stream_.deviceFormat[0];
\r
7244 buffer = stream_.userBuffer[0];
\r
7245 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7246 format = stream_.userFormat;
\r
7249 // Do byte swapping if necessary.
\r
7250 if ( stream_.doByteSwap[0] )
\r
7251 byteSwapBuffer( buffer, samples, format );
\r
7253 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7255 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7256 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7257 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7258 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7259 handle->triggered = true;
\r
7262 // Write samples to device.
\r
7263 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7265 if ( result == -1 ) {
\r
7266 // We'll assume this is an underrun, though there isn't a
\r
7267 // specific means for determining that.
\r
7268 handle->xrun[0] = true;
\r
7269 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7270 error( RtError::WARNING );
\r
7271 // Continue on to input section.
\r
7275 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7277 // Setup parameters.
\r
7278 if ( stream_.doConvertBuffer[1] ) {
\r
7279 buffer = stream_.deviceBuffer;
\r
7280 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7281 format = stream_.deviceFormat[1];
\r
7284 buffer = stream_.userBuffer[1];
\r
7285 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7286 format = stream_.userFormat;
\r
7289 // Read samples from device.
\r
7290 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7292 if ( result == -1 ) {
\r
7293 // We'll assume this is an overrun, though there isn't a
\r
7294 // specific means for determining that.
\r
7295 handle->xrun[1] = true;
\r
7296 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7297 error( RtError::WARNING );
\r
7301 // Do byte swapping if necessary.
\r
7302 if ( stream_.doByteSwap[1] )
\r
7303 byteSwapBuffer( buffer, samples, format );
\r
7305 // Do buffer conversion if necessary.
\r
7306 if ( stream_.doConvertBuffer[1] )
\r
7307 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7311 MUTEX_UNLOCK( &stream_.mutex );
\r
7313 RtApi::tickStreamTime();
\r
7314 if ( doStopStream == 1 ) this->stopStream();
\r
7317 extern "C" void *ossCallbackHandler( void *ptr )
\r
7319 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7320 RtApiOss *object = (RtApiOss *) info->object;
\r
7321 bool *isRunning = &info->isRunning;
\r
7323 while ( *isRunning == true ) {
\r
7324 pthread_testcancel();
\r
7325 object->callbackEvent();
\r
7328 pthread_exit( NULL );
\r
7331 //******************** End of __LINUX_OSS__ *********************//
\r
7335 // *************************************************** //
\r
7337 // Protected common (OS-independent) RtAudio methods.
\r
7339 // *************************************************** //
\r
7341 // This method can be modified to control the behavior of error
\r
7342 // message printing.
\r
7343 void RtApi :: error( RtError::Type type )
\r
7345 errorStream_.str(""); // clear the ostringstream
\r
7346 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7347 std::cerr << '\n' << errorText_ << "\n\n";
\r
7348 else if ( type != RtError::WARNING )
\r
7349 throw( RtError( errorText_, type ) );
\r
7352 void RtApi :: verifyStream()
\r
7354 if ( stream_.state == STREAM_CLOSED ) {
\r
7355 errorText_ = "RtApi:: a stream is not open!";
\r
7356 error( RtError::INVALID_USE );
\r
7360 void RtApi :: clearStreamInfo()
\r
7362 stream_.mode = UNINITIALIZED;
\r
7363 stream_.state = STREAM_CLOSED;
\r
7364 stream_.sampleRate = 0;
\r
7365 stream_.bufferSize = 0;
\r
7366 stream_.nBuffers = 0;
\r
7367 stream_.userFormat = 0;
\r
7368 stream_.userInterleaved = true;
\r
7369 stream_.streamTime = 0.0;
\r
7370 stream_.apiHandle = 0;
\r
7371 stream_.deviceBuffer = 0;
\r
7372 stream_.callbackInfo.callback = 0;
\r
7373 stream_.callbackInfo.userData = 0;
\r
7374 stream_.callbackInfo.isRunning = false;
\r
7375 for ( int i=0; i<2; i++ ) {
\r
7376 stream_.device[i] = 11111;
\r
7377 stream_.doConvertBuffer[i] = false;
\r
7378 stream_.deviceInterleaved[i] = true;
\r
7379 stream_.doByteSwap[i] = false;
\r
7380 stream_.nUserChannels[i] = 0;
\r
7381 stream_.nDeviceChannels[i] = 0;
\r
7382 stream_.channelOffset[i] = 0;
\r
7383 stream_.deviceFormat[i] = 0;
\r
7384 stream_.latency[i] = 0;
\r
7385 stream_.userBuffer[i] = 0;
\r
7386 stream_.convertInfo[i].channels = 0;
\r
7387 stream_.convertInfo[i].inJump = 0;
\r
7388 stream_.convertInfo[i].outJump = 0;
\r
7389 stream_.convertInfo[i].inFormat = 0;
\r
7390 stream_.convertInfo[i].outFormat = 0;
\r
7391 stream_.convertInfo[i].inOffset.clear();
\r
7392 stream_.convertInfo[i].outOffset.clear();
\r
7396 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7398 if ( format == RTAUDIO_SINT16 )
\r
7400 else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||
\r
7401 format == RTAUDIO_FLOAT32 )
\r
7403 else if ( format == RTAUDIO_FLOAT64 )
\r
7405 else if ( format == RTAUDIO_SINT8 )
\r
7408 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7409 error( RtError::WARNING );
\r
7414 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7416 if ( mode == INPUT ) { // convert device to user buffer
\r
7417 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7418 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7419 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7420 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7422 else { // convert user to device buffer
\r
7423 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7424 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7425 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7426 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7429 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7430 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7432 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7434 // Set up the interleave/deinterleave offsets.
\r
7435 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7436 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7437 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7438 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7439 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7440 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7441 stream_.convertInfo[mode].inJump = 1;
\r
7445 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7446 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7447 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7448 stream_.convertInfo[mode].outJump = 1;
\r
7452 else { // no (de)interleaving
\r
7453 if ( stream_.userInterleaved ) {
\r
7454 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7455 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7456 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7460 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7461 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7462 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7463 stream_.convertInfo[mode].inJump = 1;
\r
7464 stream_.convertInfo[mode].outJump = 1;
\r
7469 // Add channel offset.
\r
7470 if ( firstChannel > 0 ) {
\r
7471 if ( stream_.deviceInterleaved[mode] ) {
\r
7472 if ( mode == OUTPUT ) {
\r
7473 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7474 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7477 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7478 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7482 if ( mode == OUTPUT ) {
\r
7483 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7484 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7487 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7488 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7494 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7496 // This function does format conversion, input/output channel compensation, and
\r
7497 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7498 // the lower three bytes of a 32-bit integer.
\r
7500 // Clear our device buffer when in/out duplex device channels are different
\r
7501 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7502 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7503 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7506 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7508 Float64 *out = (Float64 *)outBuffer;
\r
7510 if (info.inFormat == RTAUDIO_SINT8) {
\r
7511 signed char *in = (signed char *)inBuffer;
\r
7512 scale = 1.0 / 127.5;
\r
7513 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7514 for (j=0; j<info.channels; j++) {
\r
7515 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7516 out[info.outOffset[j]] += 0.5;
\r
7517 out[info.outOffset[j]] *= scale;
\r
7519 in += info.inJump;
\r
7520 out += info.outJump;
\r
7523 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7524 Int16 *in = (Int16 *)inBuffer;
\r
7525 scale = 1.0 / 32767.5;
\r
7526 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7527 for (j=0; j<info.channels; j++) {
\r
7528 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7529 out[info.outOffset[j]] += 0.5;
\r
7530 out[info.outOffset[j]] *= scale;
\r
7532 in += info.inJump;
\r
7533 out += info.outJump;
\r
7536 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7537 Int32 *in = (Int32 *)inBuffer;
\r
7538 scale = 1.0 / 8388607.5;
\r
7539 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7540 for (j=0; j<info.channels; j++) {
\r
7541 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);
\r
7542 out[info.outOffset[j]] += 0.5;
\r
7543 out[info.outOffset[j]] *= scale;
\r
7545 in += info.inJump;
\r
7546 out += info.outJump;
\r
7549 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7550 Int32 *in = (Int32 *)inBuffer;
\r
7551 scale = 1.0 / 2147483647.5;
\r
7552 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7553 for (j=0; j<info.channels; j++) {
\r
7554 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7555 out[info.outOffset[j]] += 0.5;
\r
7556 out[info.outOffset[j]] *= scale;
\r
7558 in += info.inJump;
\r
7559 out += info.outJump;
\r
7562 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7563 Float32 *in = (Float32 *)inBuffer;
\r
7564 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7565 for (j=0; j<info.channels; j++) {
\r
7566 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7568 in += info.inJump;
\r
7569 out += info.outJump;
\r
7572 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7573 // Channel compensation and/or (de)interleaving only.
\r
7574 Float64 *in = (Float64 *)inBuffer;
\r
7575 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7576 for (j=0; j<info.channels; j++) {
\r
7577 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7579 in += info.inJump;
\r
7580 out += info.outJump;
\r
7584 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7586 Float32 *out = (Float32 *)outBuffer;
\r
7588 if (info.inFormat == RTAUDIO_SINT8) {
\r
7589 signed char *in = (signed char *)inBuffer;
\r
7590 scale = (Float32) ( 1.0 / 127.5 );
\r
7591 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7592 for (j=0; j<info.channels; j++) {
\r
7593 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7594 out[info.outOffset[j]] += 0.5;
\r
7595 out[info.outOffset[j]] *= scale;
\r
7597 in += info.inJump;
\r
7598 out += info.outJump;
\r
7601 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7602 Int16 *in = (Int16 *)inBuffer;
\r
7603 scale = (Float32) ( 1.0 / 32767.5 );
\r
7604 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7605 for (j=0; j<info.channels; j++) {
\r
7606 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7607 out[info.outOffset[j]] += 0.5;
\r
7608 out[info.outOffset[j]] *= scale;
\r
7610 in += info.inJump;
\r
7611 out += info.outJump;
\r
7614 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7615 Int32 *in = (Int32 *)inBuffer;
\r
7616 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7617 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7618 for (j=0; j<info.channels; j++) {
\r
7619 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);
\r
7620 out[info.outOffset[j]] += 0.5;
\r
7621 out[info.outOffset[j]] *= scale;
\r
7623 in += info.inJump;
\r
7624 out += info.outJump;
\r
7627 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7628 Int32 *in = (Int32 *)inBuffer;
\r
7629 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7630 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7631 for (j=0; j<info.channels; j++) {
\r
7632 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7633 out[info.outOffset[j]] += 0.5;
\r
7634 out[info.outOffset[j]] *= scale;
\r
7636 in += info.inJump;
\r
7637 out += info.outJump;
\r
7640 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7641 // Channel compensation and/or (de)interleaving only.
\r
7642 Float32 *in = (Float32 *)inBuffer;
\r
7643 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7644 for (j=0; j<info.channels; j++) {
\r
7645 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7647 in += info.inJump;
\r
7648 out += info.outJump;
\r
7651 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7652 Float64 *in = (Float64 *)inBuffer;
\r
7653 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7654 for (j=0; j<info.channels; j++) {
\r
7655 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7657 in += info.inJump;
\r
7658 out += info.outJump;
\r
7662 else if (info.outFormat == RTAUDIO_SINT32) {
\r
7663 Int32 *out = (Int32 *)outBuffer;
\r
7664 if (info.inFormat == RTAUDIO_SINT8) {
\r
7665 signed char *in = (signed char *)inBuffer;
\r
7666 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7667 for (j=0; j<info.channels; j++) {
\r
7668 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7669 out[info.outOffset[j]] <<= 24;
\r
7671 in += info.inJump;
\r
7672 out += info.outJump;
\r
7675 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7676 Int16 *in = (Int16 *)inBuffer;
\r
7677 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7678 for (j=0; j<info.channels; j++) {
\r
7679 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7680 out[info.outOffset[j]] <<= 16;
\r
7682 in += info.inJump;
\r
7683 out += info.outJump;
\r
7686 else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes
\r
7687 Int32 *in = (Int32 *)inBuffer;
\r
7688 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7689 for (j=0; j<info.channels; j++) {
\r
7690 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7691 out[info.outOffset[j]] <<= 8;
\r
7693 in += info.inJump;
\r
7694 out += info.outJump;
\r
7697 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7698 // Channel compensation and/or (de)interleaving only.
\r
7699 Int32 *in = (Int32 *)inBuffer;
\r
7700 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7701 for (j=0; j<info.channels; j++) {
\r
7702 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7704 in += info.inJump;
\r
7705 out += info.outJump;
\r
7708 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7709 Float32 *in = (Float32 *)inBuffer;
\r
7710 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7711 for (j=0; j<info.channels; j++) {
\r
7712 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7714 in += info.inJump;
\r
7715 out += info.outJump;
\r
7718 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7719 Float64 *in = (Float64 *)inBuffer;
\r
7720 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7721 for (j=0; j<info.channels; j++) {
\r
7722 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7724 in += info.inJump;
\r
7725 out += info.outJump;
\r
7729 else if (info.outFormat == RTAUDIO_SINT24) {
\r
7730 Int32 *out = (Int32 *)outBuffer;
\r
7731 if (info.inFormat == RTAUDIO_SINT8) {
\r
7732 signed char *in = (signed char *)inBuffer;
\r
7733 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7734 for (j=0; j<info.channels; j++) {
\r
7735 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7736 out[info.outOffset[j]] <<= 16;
\r
7738 in += info.inJump;
\r
7739 out += info.outJump;
\r
7742 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7743 Int16 *in = (Int16 *)inBuffer;
\r
7744 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7745 for (j=0; j<info.channels; j++) {
\r
7746 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7747 out[info.outOffset[j]] <<= 8;
\r
7749 in += info.inJump;
\r
7750 out += info.outJump;
\r
7753 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7754 // Channel compensation and/or (de)interleaving only.
\r
7755 Int32 *in = (Int32 *)inBuffer;
\r
7756 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7757 for (j=0; j<info.channels; j++) {
\r
7758 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7760 in += info.inJump;
\r
7761 out += info.outJump;
\r
7764 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7765 Int32 *in = (Int32 *)inBuffer;
\r
7766 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7767 for (j=0; j<info.channels; j++) {
\r
7768 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7769 out[info.outOffset[j]] >>= 8;
\r
7771 in += info.inJump;
\r
7772 out += info.outJump;
\r
7775 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7776 Float32 *in = (Float32 *)inBuffer;
\r
7777 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7778 for (j=0; j<info.channels; j++) {
\r
7779 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7781 in += info.inJump;
\r
7782 out += info.outJump;
\r
7785 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7786 Float64 *in = (Float64 *)inBuffer;
\r
7787 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7788 for (j=0; j<info.channels; j++) {
\r
7789 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7791 in += info.inJump;
\r
7792 out += info.outJump;
\r
7796 else if (info.outFormat == RTAUDIO_SINT16) {
\r
7797 Int16 *out = (Int16 *)outBuffer;
\r
7798 if (info.inFormat == RTAUDIO_SINT8) {
\r
7799 signed char *in = (signed char *)inBuffer;
\r
7800 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7801 for (j=0; j<info.channels; j++) {
\r
7802 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
7803 out[info.outOffset[j]] <<= 8;
\r
7805 in += info.inJump;
\r
7806 out += info.outJump;
\r
7809 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7810 // Channel compensation and/or (de)interleaving only.
\r
7811 Int16 *in = (Int16 *)inBuffer;
\r
7812 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7813 for (j=0; j<info.channels; j++) {
\r
7814 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7816 in += info.inJump;
\r
7817 out += info.outJump;
\r
7820 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7821 Int32 *in = (Int32 *)inBuffer;
\r
7822 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7823 for (j=0; j<info.channels; j++) {
\r
7824 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);
\r
7826 in += info.inJump;
\r
7827 out += info.outJump;
\r
7830 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7831 Int32 *in = (Int32 *)inBuffer;
\r
7832 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7833 for (j=0; j<info.channels; j++) {
\r
7834 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
7836 in += info.inJump;
\r
7837 out += info.outJump;
\r
7840 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7841 Float32 *in = (Float32 *)inBuffer;
\r
7842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7843 for (j=0; j<info.channels; j++) {
\r
7844 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7846 in += info.inJump;
\r
7847 out += info.outJump;
\r
7850 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7851 Float64 *in = (Float64 *)inBuffer;
\r
7852 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7853 for (j=0; j<info.channels; j++) {
\r
7854 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7856 in += info.inJump;
\r
7857 out += info.outJump;
\r
7861 else if (info.outFormat == RTAUDIO_SINT8) {
\r
7862 signed char *out = (signed char *)outBuffer;
\r
7863 if (info.inFormat == RTAUDIO_SINT8) {
\r
7864 // Channel compensation and/or (de)interleaving only.
\r
7865 signed char *in = (signed char *)inBuffer;
\r
7866 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7867 for (j=0; j<info.channels; j++) {
\r
7868 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7870 in += info.inJump;
\r
7871 out += info.outJump;
\r
7874 if (info.inFormat == RTAUDIO_SINT16) {
\r
7875 Int16 *in = (Int16 *)inBuffer;
\r
7876 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7877 for (j=0; j<info.channels; j++) {
\r
7878 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
7880 in += info.inJump;
\r
7881 out += info.outJump;
\r
7884 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7885 Int32 *in = (Int32 *)inBuffer;
\r
7886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7887 for (j=0; j<info.channels; j++) {
\r
7888 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);
\r
7890 in += info.inJump;
\r
7891 out += info.outJump;
\r
7894 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7895 Int32 *in = (Int32 *)inBuffer;
\r
7896 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7897 for (j=0; j<info.channels; j++) {
\r
7898 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
7900 in += info.inJump;
\r
7901 out += info.outJump;
\r
7904 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7905 Float32 *in = (Float32 *)inBuffer;
\r
7906 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7907 for (j=0; j<info.channels; j++) {
\r
7908 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7910 in += info.inJump;
\r
7911 out += info.outJump;
\r
7914 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7915 Float64 *in = (Float64 *)inBuffer;
\r
7916 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7917 for (j=0; j<info.channels; j++) {
\r
7918 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7920 in += info.inJump;
\r
7921 out += info.outJump;
\r
7927 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
7928 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
7929 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
7931 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
7933 register char val;
\r
7934 register char *ptr;
\r
7937 if ( format == RTAUDIO_SINT16 ) {
\r
7938 for ( unsigned int i=0; i<samples; i++ ) {
\r
7939 // Swap 1st and 2nd bytes.
\r
7941 *(ptr) = *(ptr+1);
\r
7944 // Increment 2 bytes.
\r
7948 else if ( format == RTAUDIO_SINT24 ||
\r
7949 format == RTAUDIO_SINT32 ||
\r
7950 format == RTAUDIO_FLOAT32 ) {
\r
7951 for ( unsigned int i=0; i<samples; i++ ) {
\r
7952 // Swap 1st and 4th bytes.
\r
7954 *(ptr) = *(ptr+3);
\r
7957 // Swap 2nd and 3rd bytes.
\r
7960 *(ptr) = *(ptr+1);
\r
7963 // Increment 3 more bytes.
\r
7967 else if ( format == RTAUDIO_FLOAT64 ) {
\r
7968 for ( unsigned int i=0; i<samples; i++ ) {
\r
7969 // Swap 1st and 8th bytes
\r
7971 *(ptr) = *(ptr+7);
\r
7974 // Swap 2nd and 7th bytes
\r
7977 *(ptr) = *(ptr+5);
\r
7980 // Swap 3rd and 6th bytes
\r
7983 *(ptr) = *(ptr+3);
\r
7986 // Swap 4th and 5th bytes
\r
7989 *(ptr) = *(ptr+1);
\r
7992 // Increment 5 more bytes.
\r
7998 // Indentation settings for Vim and Emacs
\r
8000 // Local Variables:
\r
8001 // c-basic-offset: 2
\r
8002 // indent-tabs-mode: nil
\r
8005 // vim: et sts=2 sw=2
\r