1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_PULSE__)
\r
91 apis.push_back( LINUX_PULSE );
\r
93 #if defined(__LINUX_OSS__)
\r
94 apis.push_back( LINUX_OSS );
\r
96 #if defined(__WINDOWS_ASIO__)
\r
97 apis.push_back( WINDOWS_ASIO );
\r
99 #if defined(__WINDOWS_DS__)
\r
100 apis.push_back( WINDOWS_DS );
\r
102 #if defined(__MACOSX_CORE__)
\r
103 apis.push_back( MACOSX_CORE );
\r
105 #if defined(__RTAUDIO_DUMMY__)
\r
106 apis.push_back( RTAUDIO_DUMMY );
\r
110 void RtAudio :: openRtApi( RtAudio::Api api )
\r
116 #if defined(__UNIX_JACK__)
\r
117 if ( api == UNIX_JACK )
\r
118 rtapi_ = new RtApiJack();
\r
120 #if defined(__LINUX_ALSA__)
\r
121 if ( api == LINUX_ALSA )
\r
122 rtapi_ = new RtApiAlsa();
\r
124 #if defined(__LINUX_PULSE__)
\r
125 if ( api == LINUX_PULSE )
\r
126 rtapi_ = new RtApiPulse();
\r
128 #if defined(__LINUX_OSS__)
\r
129 if ( api == LINUX_OSS )
\r
130 rtapi_ = new RtApiOss();
\r
132 #if defined(__WINDOWS_ASIO__)
\r
133 if ( api == WINDOWS_ASIO )
\r
134 rtapi_ = new RtApiAsio();
\r
136 #if defined(__WINDOWS_DS__)
\r
137 if ( api == WINDOWS_DS )
\r
138 rtapi_ = new RtApiDs();
\r
140 #if defined(__MACOSX_CORE__)
\r
141 if ( api == MACOSX_CORE )
\r
142 rtapi_ = new RtApiCore();
\r
144 #if defined(__RTAUDIO_DUMMY__)
\r
145 if ( api == RTAUDIO_DUMMY )
\r
146 rtapi_ = new RtApiDummy();
\r
150 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
154 if ( api != UNSPECIFIED ) {
\r
155 // Attempt to open the specified API.
\r
157 if ( rtapi_ ) return;
\r
159 // No compiled support for specified API value. Issue a debug
\r
160 // warning and continue as if no API was specified.
\r
161 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
164 // Iterate through the compiled APIs and return as soon as we find
\r
165 // one with at least one device or we reach the end of the list.
\r
166 std::vector< RtAudio::Api > apis;
\r
167 getCompiledApi( apis );
\r
168 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
169 openRtApi( apis[i] );
\r
170 if ( rtapi_->getDeviceCount() ) break;
\r
173 if ( rtapi_ ) return;
\r
175 // It should not be possible to get here because the preprocessor
\r
176 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
177 // API-specific definitions are passed to the compiler. But just in
\r
178 // case something weird happens, we'll print out an error message.
\r
179 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
182 RtAudio :: ~RtAudio() throw()
\r
187 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
188 RtAudio::StreamParameters *inputParameters,
\r
189 RtAudioFormat format, unsigned int sampleRate,
\r
190 unsigned int *bufferFrames,
\r
191 RtAudioCallback callback, void *userData,
\r
192 RtAudio::StreamOptions *options,
\r
193 RtAudioErrorCallback errorCallback )
\r
195 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
196 sampleRate, bufferFrames, callback,
\r
197 userData, options, errorCallback );
\r
200 // *************************************************** //
\r
202 // Public RtApi definitions (see end of file for
\r
203 // private or protected utility functions).
\r
205 // *************************************************** //
\r
209 stream_.state = STREAM_CLOSED;
\r
210 stream_.mode = UNINITIALIZED;
\r
211 stream_.apiHandle = 0;
\r
212 stream_.userBuffer[0] = 0;
\r
213 stream_.userBuffer[1] = 0;
\r
214 MUTEX_INITIALIZE( &stream_.mutex );
\r
215 showWarnings_ = true;
\r
220 MUTEX_DESTROY( &stream_.mutex );
\r
223 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
224 RtAudio::StreamParameters *iParams,
\r
225 RtAudioFormat format, unsigned int sampleRate,
\r
226 unsigned int *bufferFrames,
\r
227 RtAudioCallback callback, void *userData,
\r
228 RtAudio::StreamOptions *options,
\r
229 RtAudioErrorCallback errorCallback )
\r
231 if ( stream_.state != STREAM_CLOSED ) {
\r
232 errorText_ = "RtApi::openStream: a stream is already open!";
\r
233 error( RtError::INVALID_USE );
\r
237 if ( oParams && oParams->nChannels < 1 ) {
\r
238 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
239 error( RtError::INVALID_USE );
\r
243 if ( iParams && iParams->nChannels < 1 ) {
\r
244 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
245 error( RtError::INVALID_USE );
\r
249 if ( oParams == NULL && iParams == NULL ) {
\r
250 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
251 error( RtError::INVALID_USE );
\r
255 if ( formatBytes(format) == 0 ) {
\r
256 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
257 error( RtError::INVALID_USE );
\r
261 unsigned int nDevices = getDeviceCount();
\r
262 unsigned int oChannels = 0;
\r
264 oChannels = oParams->nChannels;
\r
265 if ( oParams->deviceId >= nDevices ) {
\r
266 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
267 error( RtError::INVALID_USE );
\r
272 unsigned int iChannels = 0;
\r
274 iChannels = iParams->nChannels;
\r
275 if ( iParams->deviceId >= nDevices ) {
\r
276 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
277 error( RtError::INVALID_USE );
\r
285 if ( oChannels > 0 ) {
\r
287 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
288 sampleRate, format, bufferFrames, options );
\r
289 if ( result == false ) {
\r
290 error( RtError::SYSTEM_ERROR );
\r
295 if ( iChannels > 0 ) {
\r
297 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
298 sampleRate, format, bufferFrames, options );
\r
299 if ( result == false ) {
\r
300 if ( oChannels > 0 ) closeStream();
\r
301 error( RtError::SYSTEM_ERROR );
\r
306 stream_.callbackInfo.callback = (void *) callback;
\r
307 stream_.callbackInfo.userData = userData;
\r
308 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
310 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
311 stream_.state = STREAM_STOPPED;
\r
314 unsigned int RtApi :: getDefaultInputDevice( void )
\r
316 // Should be implemented in subclasses if possible.
\r
320 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
322 // Should be implemented in subclasses if possible.
\r
326 void RtApi :: closeStream( void )
\r
328 // MUST be implemented in subclasses!
\r
332 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
333 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
334 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
335 RtAudio::StreamOptions * /*options*/ )
\r
337 // MUST be implemented in subclasses!
\r
341 void RtApi :: tickStreamTime( void )
\r
343 // Subclasses that do not provide their own implementation of
\r
344 // getStreamTime should call this function once per buffer I/O to
\r
345 // provide basic stream time support.
\r
347 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
349 #if defined( HAVE_GETTIMEOFDAY )
\r
350 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
354 long RtApi :: getStreamLatency( void )
\r
358 long totalLatency = 0;
\r
359 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
360 totalLatency = stream_.latency[0];
\r
361 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
362 totalLatency += stream_.latency[1];
\r
364 return totalLatency;
\r
367 double RtApi :: getStreamTime( void )
\r
371 #if defined( HAVE_GETTIMEOFDAY )
\r
372 // Return a very accurate estimate of the stream time by
\r
373 // adding in the elapsed time since the last tick.
\r
374 struct timeval then;
\r
375 struct timeval now;
\r
377 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
378 return stream_.streamTime;
\r
380 gettimeofday( &now, NULL );
\r
381 then = stream_.lastTickTimestamp;
\r
382 return stream_.streamTime +
\r
383 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
384 (then.tv_sec + 0.000001 * then.tv_usec));
\r
386 return stream_.streamTime;
\r
390 unsigned int RtApi :: getStreamSampleRate( void )
\r
394 return stream_.sampleRate;
\r
398 // *************************************************** //
\r
400 // OS/API-specific methods.
\r
402 // *************************************************** //
\r
404 #if defined(__MACOSX_CORE__)
\r
406 // The OS X CoreAudio API is designed to use a separate callback
\r
407 // procedure for each of its audio devices. A single RtAudio duplex
\r
408 // stream using two different devices is supported here, though it
\r
409 // cannot be guaranteed to always behave correctly because we cannot
\r
410 // synchronize these two callbacks.
\r
412 // A property listener is installed for over/underrun information.
\r
413 // However, no functionality is currently provided to allow property
\r
414 // listeners to trigger user handlers because it is unclear what could
\r
415 // be done if a critical stream parameter (buffer size, sample rate,
\r
416 // device disconnect) notification arrived. The listeners entail
\r
417 // quite a bit of extra code and most likely, a user program wouldn't
\r
418 // be prepared for the result anyway. However, we do provide a flag
\r
419 // to the client callback function to inform of an over/underrun.
\r
421 // A structure to hold various information related to the CoreAudio API
\r
423 struct CoreHandle {
\r
424 AudioDeviceID id[2]; // device ids
\r
425 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
426 AudioDeviceIOProcID procId[2];
\r
428 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
429 UInt32 nStreams[2]; // number of streams to use
\r
431 char *deviceBuffer;
\r
432 pthread_cond_t condition;
\r
433 int drainCounter; // Tracks callback counts when draining
\r
434 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
437 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
440 RtApiCore:: RtApiCore()
\r
442 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
443 // This is a largely undocumented but absolutely necessary
\r
444 // requirement starting with OS-X 10.6. If not called, queries and
\r
445 // updates to various audio device properties are not handled
\r
447 CFRunLoopRef theRunLoop = NULL;
\r
448 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
449 kAudioObjectPropertyScopeGlobal,
\r
450 kAudioObjectPropertyElementMaster };
\r
451 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
452 if ( result != noErr ) {
\r
453 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
454 error( RtError::WARNING );
\r
459 RtApiCore :: ~RtApiCore()
\r
461 // The subclass destructor gets called before the base class
\r
462 // destructor, so close an existing stream before deallocating
\r
463 // apiDeviceId memory.
\r
464 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
467 unsigned int RtApiCore :: getDeviceCount( void )
\r
469 // Find out how many audio devices there are, if any.
\r
471 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
472 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
473 if ( result != noErr ) {
\r
474 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
475 error( RtError::WARNING );
\r
479 return dataSize / sizeof( AudioDeviceID );
\r
482 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
484 unsigned int nDevices = getDeviceCount();
\r
485 if ( nDevices <= 1 ) return 0;
\r
488 UInt32 dataSize = sizeof( AudioDeviceID );
\r
489 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
490 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
491 if ( result != noErr ) {
\r
492 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
493 error( RtError::WARNING );
\r
497 dataSize *= nDevices;
\r
498 AudioDeviceID deviceList[ nDevices ];
\r
499 property.mSelector = kAudioHardwarePropertyDevices;
\r
500 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
501 if ( result != noErr ) {
\r
502 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
503 error( RtError::WARNING );
\r
507 for ( unsigned int i=0; i<nDevices; i++ )
\r
508 if ( id == deviceList[i] ) return i;
\r
510 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
511 error( RtError::WARNING );
\r
515 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
517 unsigned int nDevices = getDeviceCount();
\r
518 if ( nDevices <= 1 ) return 0;
\r
521 UInt32 dataSize = sizeof( AudioDeviceID );
\r
522 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
523 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
524 if ( result != noErr ) {
\r
525 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
526 error( RtError::WARNING );
\r
530 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
531 AudioDeviceID deviceList[ nDevices ];
\r
532 property.mSelector = kAudioHardwarePropertyDevices;
\r
533 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
534 if ( result != noErr ) {
\r
535 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
536 error( RtError::WARNING );
\r
540 for ( unsigned int i=0; i<nDevices; i++ )
\r
541 if ( id == deviceList[i] ) return i;
\r
543 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
544 error( RtError::WARNING );
\r
548 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
550 RtAudio::DeviceInfo info;
\r
551 info.probed = false;
\r
554 unsigned int nDevices = getDeviceCount();
\r
555 if ( nDevices == 0 ) {
\r
556 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
557 error( RtError::INVALID_USE );
\r
561 if ( device >= nDevices ) {
\r
562 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
563 error( RtError::INVALID_USE );
\r
567 AudioDeviceID deviceList[ nDevices ];
\r
568 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
569 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
570 kAudioObjectPropertyScopeGlobal,
\r
571 kAudioObjectPropertyElementMaster };
\r
572 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
573 0, NULL, &dataSize, (void *) &deviceList );
\r
574 if ( result != noErr ) {
\r
575 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
576 error( RtError::WARNING );
\r
580 AudioDeviceID id = deviceList[ device ];
\r
582 // Get the device name.
\r
584 CFStringRef cfname;
\r
585 dataSize = sizeof( CFStringRef );
\r
586 property.mSelector = kAudioObjectPropertyManufacturer;
\r
587 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
588 if ( result != noErr ) {
\r
589 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
590 errorText_ = errorStream_.str();
\r
591 error( RtError::WARNING );
\r
595 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
596 int length = CFStringGetLength(cfname);
\r
597 char *mname = (char *)malloc(length * 3 + 1);
\r
598 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
599 info.name.append( (const char *)mname, strlen(mname) );
\r
600 info.name.append( ": " );
\r
601 CFRelease( cfname );
\r
604 property.mSelector = kAudioObjectPropertyName;
\r
605 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
606 if ( result != noErr ) {
\r
607 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
608 errorText_ = errorStream_.str();
\r
609 error( RtError::WARNING );
\r
613 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
614 length = CFStringGetLength(cfname);
\r
615 char *name = (char *)malloc(length * 3 + 1);
\r
616 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
617 info.name.append( (const char *)name, strlen(name) );
\r
618 CFRelease( cfname );
\r
621 // Get the output stream "configuration".
\r
622 AudioBufferList *bufferList = nil;
\r
623 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
624 property.mScope = kAudioDevicePropertyScopeOutput;
\r
625 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
627 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
628 if ( result != noErr || dataSize == 0 ) {
\r
629 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
630 errorText_ = errorStream_.str();
\r
631 error( RtError::WARNING );
\r
635 // Allocate the AudioBufferList.
\r
636 bufferList = (AudioBufferList *) malloc( dataSize );
\r
637 if ( bufferList == NULL ) {
\r
638 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
639 error( RtError::WARNING );
\r
643 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
644 if ( result != noErr || dataSize == 0 ) {
\r
645 free( bufferList );
\r
646 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
647 errorText_ = errorStream_.str();
\r
648 error( RtError::WARNING );
\r
652 // Get output channel information.
\r
653 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
654 for ( i=0; i<nStreams; i++ )
\r
655 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
656 free( bufferList );
\r
658 // Get the input stream "configuration".
\r
659 property.mScope = kAudioDevicePropertyScopeInput;
\r
660 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
661 if ( result != noErr || dataSize == 0 ) {
\r
662 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
663 errorText_ = errorStream_.str();
\r
664 error( RtError::WARNING );
\r
668 // Allocate the AudioBufferList.
\r
669 bufferList = (AudioBufferList *) malloc( dataSize );
\r
670 if ( bufferList == NULL ) {
\r
671 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
672 error( RtError::WARNING );
\r
676 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
677 if (result != noErr || dataSize == 0) {
\r
678 free( bufferList );
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtError::WARNING );
\r
685 // Get input channel information.
\r
686 nStreams = bufferList->mNumberBuffers;
\r
687 for ( i=0; i<nStreams; i++ )
\r
688 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
689 free( bufferList );
\r
691 // If device opens for both playback and capture, we determine the channels.
\r
692 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
693 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
695 // Probe the device sample rates.
\r
696 bool isInput = false;
\r
697 if ( info.outputChannels == 0 ) isInput = true;
\r
699 // Determine the supported sample rates.
\r
700 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
701 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
702 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
703 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtError::WARNING );
\r
710 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
711 AudioValueRange rangeList[ nRanges ];
\r
712 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
713 if ( result != kAudioHardwareNoError ) {
\r
714 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
715 errorText_ = errorStream_.str();
\r
716 error( RtError::WARNING );
\r
720 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
721 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
722 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
723 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
726 info.sampleRates.clear();
\r
727 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
728 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
729 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
732 if ( info.sampleRates.size() == 0 ) {
\r
733 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
734 errorText_ = errorStream_.str();
\r
735 error( RtError::WARNING );
\r
739 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
740 // Thus, any other "physical" formats supported by the device are of
\r
741 // no interest to the client.
\r
742 info.nativeFormats = RTAUDIO_FLOAT32;
\r
744 if ( info.outputChannels > 0 )
\r
745 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
746 if ( info.inputChannels > 0 )
\r
747 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
749 info.probed = true;
\r
753 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
754 const AudioTimeStamp* /*inNow*/,
\r
755 const AudioBufferList* inInputData,
\r
756 const AudioTimeStamp* /*inInputTime*/,
\r
757 AudioBufferList* outOutputData,
\r
758 const AudioTimeStamp* /*inOutputTime*/,
\r
759 void* infoPointer )
\r
761 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
763 RtApiCore *object = (RtApiCore *) info->object;
\r
764 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
765 return kAudioHardwareUnspecifiedError;
\r
767 return kAudioHardwareNoError;
\r
770 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
772 const AudioObjectPropertyAddress properties[],
\r
773 void* handlePointer )
\r
775 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
776 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
777 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
778 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
779 handle->xrun[1] = true;
\r
781 handle->xrun[0] = true;
\r
785 return kAudioHardwareNoError;
\r
788 static OSStatus rateListener( AudioObjectID inDevice,
\r
789 UInt32 /*nAddresses*/,
\r
790 const AudioObjectPropertyAddress /*properties*/[],
\r
791 void* ratePointer )
\r
794 Float64 *rate = (Float64 *) ratePointer;
\r
795 UInt32 dataSize = sizeof( Float64 );
\r
796 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
797 kAudioObjectPropertyScopeGlobal,
\r
798 kAudioObjectPropertyElementMaster };
\r
799 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
800 return kAudioHardwareNoError;
\r
803 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
804 unsigned int firstChannel, unsigned int sampleRate,
\r
805 RtAudioFormat format, unsigned int *bufferSize,
\r
806 RtAudio::StreamOptions *options )
\r
809 unsigned int nDevices = getDeviceCount();
\r
810 if ( nDevices == 0 ) {
\r
811 // This should not happen because a check is made before this function is called.
\r
812 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
816 if ( device >= nDevices ) {
\r
817 // This should not happen because a check is made before this function is called.
\r
818 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
822 AudioDeviceID deviceList[ nDevices ];
\r
823 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
824 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
825 kAudioObjectPropertyScopeGlobal,
\r
826 kAudioObjectPropertyElementMaster };
\r
827 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
828 0, NULL, &dataSize, (void *) &deviceList );
\r
829 if ( result != noErr ) {
\r
830 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
834 AudioDeviceID id = deviceList[ device ];
\r
836 // Setup for stream mode.
\r
837 bool isInput = false;
\r
838 if ( mode == INPUT ) {
\r
840 property.mScope = kAudioDevicePropertyScopeInput;
\r
843 property.mScope = kAudioDevicePropertyScopeOutput;
\r
845 // Get the stream "configuration".
\r
846 AudioBufferList *bufferList = nil;
\r
848 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
849 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
850 if ( result != noErr || dataSize == 0 ) {
\r
851 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
852 errorText_ = errorStream_.str();
\r
856 // Allocate the AudioBufferList.
\r
857 bufferList = (AudioBufferList *) malloc( dataSize );
\r
858 if ( bufferList == NULL ) {
\r
859 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
863 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
864 if (result != noErr || dataSize == 0) {
\r
865 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
866 errorText_ = errorStream_.str();
\r
870 // Search for one or more streams that contain the desired number of
\r
871 // channels. CoreAudio devices can have an arbitrary number of
\r
872 // streams and each stream can have an arbitrary number of channels.
\r
873 // For each stream, a single buffer of interleaved samples is
\r
874 // provided. RtAudio prefers the use of one stream of interleaved
\r
875 // data or multiple consecutive single-channel streams. However, we
\r
876 // now support multiple consecutive multi-channel streams of
\r
877 // interleaved data as well.
\r
878 UInt32 iStream, offsetCounter = firstChannel;
\r
879 UInt32 nStreams = bufferList->mNumberBuffers;
\r
880 bool monoMode = false;
\r
881 bool foundStream = false;
\r
883 // First check that the device supports the requested number of
\r
885 UInt32 deviceChannels = 0;
\r
886 for ( iStream=0; iStream<nStreams; iStream++ )
\r
887 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
889 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
890 free( bufferList );
\r
891 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
892 errorText_ = errorStream_.str();
\r
896 // Look for a single stream meeting our needs.
\r
897 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
898 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
899 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
900 if ( streamChannels >= channels + offsetCounter ) {
\r
901 firstStream = iStream;
\r
902 channelOffset = offsetCounter;
\r
903 foundStream = true;
\r
906 if ( streamChannels > offsetCounter ) break;
\r
907 offsetCounter -= streamChannels;
\r
910 // If we didn't find a single stream above, then we should be able
\r
911 // to meet the channel specification with multiple streams.
\r
912 if ( foundStream == false ) {
\r
914 offsetCounter = firstChannel;
\r
915 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
916 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
917 if ( streamChannels > offsetCounter ) break;
\r
918 offsetCounter -= streamChannels;
\r
921 firstStream = iStream;
\r
922 channelOffset = offsetCounter;
\r
923 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
925 if ( streamChannels > 1 ) monoMode = false;
\r
926 while ( channelCounter > 0 ) {
\r
927 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
928 if ( streamChannels > 1 ) monoMode = false;
\r
929 channelCounter -= streamChannels;
\r
934 free( bufferList );
\r
936 // Determine the buffer size.
\r
937 AudioValueRange bufferRange;
\r
938 dataSize = sizeof( AudioValueRange );
\r
939 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
942 if ( result != noErr ) {
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
949 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
950 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
952 // Set the buffer size. For multiple streams, I'm assuming we only
\r
953 // need to make this setting for the master channel.
\r
954 UInt32 theSize = (UInt32) *bufferSize;
\r
955 dataSize = sizeof( UInt32 );
\r
956 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
957 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
959 if ( result != noErr ) {
\r
960 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
961 errorText_ = errorStream_.str();
\r
965 // If attempting to setup a duplex stream, the bufferSize parameter
\r
966 // MUST be the same in both directions!
\r
967 *bufferSize = theSize;
\r
968 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
970 errorText_ = errorStream_.str();
\r
974 stream_.bufferSize = *bufferSize;
\r
975 stream_.nBuffers = 1;
\r
977 // Try to set "hog" mode ... it's not clear to me this is working.
\r
978 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
980 dataSize = sizeof( hog_pid );
\r
981 property.mSelector = kAudioDevicePropertyHogMode;
\r
982 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
983 if ( result != noErr ) {
\r
984 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
985 errorText_ = errorStream_.str();
\r
989 if ( hog_pid != getpid() ) {
\r
990 hog_pid = getpid();
\r
991 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
992 if ( result != noErr ) {
\r
993 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
994 errorText_ = errorStream_.str();
\r
1000 // Check and if necessary, change the sample rate for the device.
\r
1001 Float64 nominalRate;
\r
1002 dataSize = sizeof( Float64 );
\r
1003 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1004 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1006 if ( result != noErr ) {
\r
1007 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1008 errorText_ = errorStream_.str();
\r
1012 // Only change the sample rate if off by more than 1 Hz.
\r
1013 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1015 // Set a property listener for the sample rate change
\r
1016 Float64 reportedRate = 0.0;
\r
1017 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1018 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1019 if ( result != noErr ) {
\r
1020 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1021 errorText_ = errorStream_.str();
\r
1025 nominalRate = (Float64) sampleRate;
\r
1026 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1028 if ( result != noErr ) {
\r
1029 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1030 errorText_ = errorStream_.str();
\r
1034 // Now wait until the reported nominal rate is what we just set.
\r
1035 UInt32 microCounter = 0;
\r
1036 while ( reportedRate != nominalRate ) {
\r
1037 microCounter += 5000;
\r
1038 if ( microCounter > 5000000 ) break;
\r
1042 // Remove the property listener.
\r
1043 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1045 if ( microCounter > 5000000 ) {
\r
1046 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1047 errorText_ = errorStream_.str();
\r
1052 // Now set the stream format for all streams. Also, check the
\r
1053 // physical format of the device and change that if necessary.
\r
1054 AudioStreamBasicDescription description;
\r
1055 dataSize = sizeof( AudioStreamBasicDescription );
\r
1056 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1057 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1058 if ( result != noErr ) {
\r
1059 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1060 errorText_ = errorStream_.str();
\r
1064 // Set the sample rate and data format id. However, only make the
\r
1065 // change if the sample rate is not within 1.0 of the desired
\r
1066 // rate and the format is not linear pcm.
\r
1067 bool updateFormat = false;
\r
1068 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1069 description.mSampleRate = (Float64) sampleRate;
\r
1070 updateFormat = true;
\r
1073 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1074 description.mFormatID = kAudioFormatLinearPCM;
\r
1075 updateFormat = true;
\r
1078 if ( updateFormat ) {
\r
1079 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1080 if ( result != noErr ) {
\r
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1082 errorText_ = errorStream_.str();
\r
1087 // Now check the physical format.
\r
1088 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1089 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1090 if ( result != noErr ) {
\r
1091 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1092 errorText_ = errorStream_.str();
\r
1096 //std::cout << "Current physical stream format:" << std::endl;
\r
1097 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1098 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1099 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1100 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1102 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1103 description.mFormatID = kAudioFormatLinearPCM;
\r
1104 //description.mSampleRate = (Float64) sampleRate;
\r
1105 AudioStreamBasicDescription testDescription = description;
\r
1106 UInt32 formatFlags;
\r
1108 // We'll try higher bit rates first and then work our way down.
\r
1109 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1110 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1111 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1112 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1113 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1114 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1115 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1116 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1117 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1118 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1119 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1120 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1121 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1123 bool setPhysicalFormat = false;
\r
1124 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1125 testDescription = description;
\r
1126 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1127 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1128 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1129 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1131 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1132 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1133 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1134 if ( result == noErr ) {
\r
1135 setPhysicalFormat = true;
\r
1136 //std::cout << "Updated physical stream format:" << std::endl;
\r
1137 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1138 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1139 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1140 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1145 if ( !setPhysicalFormat ) {
\r
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1147 errorText_ = errorStream_.str();
\r
1150 } // done setting virtual/physical formats.
\r
1152 // Get the stream / device latency.
\r
1154 dataSize = sizeof( UInt32 );
\r
1155 property.mSelector = kAudioDevicePropertyLatency;
\r
1156 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1157 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1158 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1160 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1161 errorText_ = errorStream_.str();
\r
1162 error( RtError::WARNING );
\r
1166 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1167 // always be presented in native-endian format, so we should never
\r
1168 // need to byte swap.
\r
1169 stream_.doByteSwap[mode] = false;
\r
1171 // From the CoreAudio documentation, PCM data must be supplied as
\r
1173 stream_.userFormat = format;
\r
1174 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1176 if ( streamCount == 1 )
\r
1177 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1178 else // multiple streams
\r
1179 stream_.nDeviceChannels[mode] = channels;
\r
1180 stream_.nUserChannels[mode] = channels;
\r
1181 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1182 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1183 else stream_.userInterleaved = true;
\r
1184 stream_.deviceInterleaved[mode] = true;
\r
1185 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1187 // Set flags for buffer conversion.
\r
1188 stream_.doConvertBuffer[mode] = false;
\r
1189 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1190 stream_.doConvertBuffer[mode] = true;
\r
1191 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1192 stream_.doConvertBuffer[mode] = true;
\r
1193 if ( streamCount == 1 ) {
\r
1194 if ( stream_.nUserChannels[mode] > 1 &&
\r
1195 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1196 stream_.doConvertBuffer[mode] = true;
\r
1198 else if ( monoMode && stream_.userInterleaved )
\r
1199 stream_.doConvertBuffer[mode] = true;
\r
1201 // Allocate our CoreHandle structure for the stream.
\r
1202 CoreHandle *handle = 0;
\r
1203 if ( stream_.apiHandle == 0 ) {
\r
1205 handle = new CoreHandle;
\r
1207 catch ( std::bad_alloc& ) {
\r
1208 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1212 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1213 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1216 stream_.apiHandle = (void *) handle;
\r
1219 handle = (CoreHandle *) stream_.apiHandle;
\r
1220 handle->iStream[mode] = firstStream;
\r
1221 handle->nStreams[mode] = streamCount;
\r
1222 handle->id[mode] = id;
\r
1224 // Allocate necessary internal buffers.
\r
1225 unsigned long bufferBytes;
\r
1226 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1227 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1228 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1229 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1230 if ( stream_.userBuffer[mode] == NULL ) {
\r
1231 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1235 // If possible, we will make use of the CoreAudio stream buffers as
\r
1236 // "device buffers". However, we can't do this if using multiple
\r
1238 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1240 bool makeBuffer = true;
\r
1241 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1242 if ( mode == INPUT ) {
\r
1243 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1244 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1245 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1249 if ( makeBuffer ) {
\r
1250 bufferBytes *= *bufferSize;
\r
1251 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1252 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1253 if ( stream_.deviceBuffer == NULL ) {
\r
1254 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1260 stream_.sampleRate = sampleRate;
\r
1261 stream_.device[mode] = device;
\r
1262 stream_.state = STREAM_STOPPED;
\r
1263 stream_.callbackInfo.object = (void *) this;
\r
1265 // Setup the buffer conversion information structure.
\r
1266 if ( stream_.doConvertBuffer[mode] ) {
\r
1267 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1268 else setConvertInfo( mode, channelOffset );
\r
1271 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1272 // Only one callback procedure per device.
\r
1273 stream_.mode = DUPLEX;
\r
1275 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1276 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1278 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1279 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1281 if ( result != noErr ) {
\r
1282 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1283 errorText_ = errorStream_.str();
\r
1286 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1287 stream_.mode = DUPLEX;
\r
1289 stream_.mode = mode;
\r
1292 // Setup the device property listener for over/underload.
\r
1293 property.mSelector = kAudioDeviceProcessorOverload;
\r
1294 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1300 pthread_cond_destroy( &handle->condition );
\r
1302 stream_.apiHandle = 0;
\r
1305 for ( int i=0; i<2; i++ ) {
\r
1306 if ( stream_.userBuffer[i] ) {
\r
1307 free( stream_.userBuffer[i] );
\r
1308 stream_.userBuffer[i] = 0;
\r
1312 if ( stream_.deviceBuffer ) {
\r
1313 free( stream_.deviceBuffer );
\r
1314 stream_.deviceBuffer = 0;
\r
1317 stream_.state = STREAM_CLOSED;
\r
1321 void RtApiCore :: closeStream( void )
\r
1323 if ( stream_.state == STREAM_CLOSED ) {
\r
1324 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1325 error( RtError::WARNING );
\r
1329 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1330 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1331 if ( stream_.state == STREAM_RUNNING )
\r
1332 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1333 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1334 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1336 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1337 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1341 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1342 if ( stream_.state == STREAM_RUNNING )
\r
1343 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1344 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1345 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1347 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1348 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1352 for ( int i=0; i<2; i++ ) {
\r
1353 if ( stream_.userBuffer[i] ) {
\r
1354 free( stream_.userBuffer[i] );
\r
1355 stream_.userBuffer[i] = 0;
\r
1359 if ( stream_.deviceBuffer ) {
\r
1360 free( stream_.deviceBuffer );
\r
1361 stream_.deviceBuffer = 0;
\r
1364 // Destroy pthread condition variable.
\r
1365 pthread_cond_destroy( &handle->condition );
\r
1367 stream_.apiHandle = 0;
\r
1369 stream_.mode = UNINITIALIZED;
\r
1370 stream_.state = STREAM_CLOSED;
\r
1373 void RtApiCore :: startStream( void )
\r
1376 if ( stream_.state == STREAM_RUNNING ) {
\r
1377 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1378 error( RtError::WARNING );
\r
1382 OSStatus result = noErr;
\r
1383 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1384 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1386 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1387 if ( result != noErr ) {
\r
1388 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1389 errorText_ = errorStream_.str();
\r
1394 if ( stream_.mode == INPUT ||
\r
1395 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1397 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1398 if ( result != noErr ) {
\r
1399 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1400 errorText_ = errorStream_.str();
\r
1405 handle->drainCounter = 0;
\r
1406 handle->internalDrain = false;
\r
1407 stream_.state = STREAM_RUNNING;
\r
1410 if ( result == noErr ) return;
\r
1411 error( RtError::SYSTEM_ERROR );
\r
1414 void RtApiCore :: stopStream( void )
\r
1417 if ( stream_.state == STREAM_STOPPED ) {
\r
1418 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1419 error( RtError::WARNING );
\r
1423 OSStatus result = noErr;
\r
1424 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1425 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1427 if ( handle->drainCounter == 0 ) {
\r
1428 handle->drainCounter = 2;
\r
1429 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1432 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1433 if ( result != noErr ) {
\r
1434 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1435 errorText_ = errorStream_.str();
\r
1440 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1442 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1443 if ( result != noErr ) {
\r
1444 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1445 errorText_ = errorStream_.str();
\r
1450 stream_.state = STREAM_STOPPED;
\r
1453 if ( result == noErr ) return;
\r
1454 error( RtError::SYSTEM_ERROR );
\r
1457 void RtApiCore :: abortStream( void )
\r
1460 if ( stream_.state == STREAM_STOPPED ) {
\r
1461 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1462 error( RtError::WARNING );
\r
1466 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1467 handle->drainCounter = 2;
\r
1472 // This function will be called by a spawned thread when the user
\r
1473 // callback function signals that the stream should be stopped or
\r
1474 // aborted. It is better to handle it this way because the
\r
1475 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1476 // function is called.
\r
1477 static void *coreStopStream( void *ptr )
\r
1479 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1480 RtApiCore *object = (RtApiCore *) info->object;
\r
1482 object->stopStream();
\r
1483 pthread_exit( NULL );
\r
1486 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1487 const AudioBufferList *inBufferList,
\r
1488 const AudioBufferList *outBufferList )
\r
1490 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1491 if ( stream_.state == STREAM_CLOSED ) {
\r
1492 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1493 error( RtError::WARNING );
\r
1497 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1498 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1500 // Check if we were draining the stream and signal is finished.
\r
1501 if ( handle->drainCounter > 3 ) {
\r
1502 ThreadHandle threadId;
\r
1504 stream_.state = STREAM_STOPPING;
\r
1505 if ( handle->internalDrain == true )
\r
1506 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1507 else // external call to stopStream()
\r
1508 pthread_cond_signal( &handle->condition );
\r
1512 AudioDeviceID outputDevice = handle->id[0];
\r
1514 // Invoke user callback to get fresh output data UNLESS we are
\r
1515 // draining stream or duplex mode AND the input/output devices are
\r
1516 // different AND this function is called for the input device.
\r
1517 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1518 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1519 double streamTime = getStreamTime();
\r
1520 RtAudioStreamStatus status = 0;
\r
1521 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1522 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1523 handle->xrun[0] = false;
\r
1525 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1526 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1527 handle->xrun[1] = false;
\r
1530 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1531 stream_.bufferSize, streamTime, status, info->userData );
\r
1532 if ( cbReturnValue == 2 ) {
\r
1533 stream_.state = STREAM_STOPPING;
\r
1534 handle->drainCounter = 2;
\r
1538 else if ( cbReturnValue == 1 ) {
\r
1539 handle->drainCounter = 1;
\r
1540 handle->internalDrain = true;
\r
1544 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1546 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1548 if ( handle->nStreams[0] == 1 ) {
\r
1549 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1551 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1553 else { // fill multiple streams with zeros
\r
1554 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1555 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1557 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1561 else if ( handle->nStreams[0] == 1 ) {
\r
1562 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1563 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1564 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1566 else { // copy from user buffer
\r
1567 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1568 stream_.userBuffer[0],
\r
1569 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1572 else { // fill multiple streams
\r
1573 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1574 if ( stream_.doConvertBuffer[0] ) {
\r
1575 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1576 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1579 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1580 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1581 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1582 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1583 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1586 else { // fill multiple multi-channel streams with interleaved data
\r
1587 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1588 Float32 *out, *in;
\r
1590 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1591 UInt32 inChannels = stream_.nUserChannels[0];
\r
1592 if ( stream_.doConvertBuffer[0] ) {
\r
1593 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1594 inChannels = stream_.nDeviceChannels[0];
\r
1597 if ( inInterleaved ) inOffset = 1;
\r
1598 else inOffset = stream_.bufferSize;
\r
1600 channelsLeft = inChannels;
\r
1601 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1603 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1604 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1607 // Account for possible channel offset in first stream
\r
1608 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1609 streamChannels -= stream_.channelOffset[0];
\r
1610 outJump = stream_.channelOffset[0];
\r
1614 // Account for possible unfilled channels at end of the last stream
\r
1615 if ( streamChannels > channelsLeft ) {
\r
1616 outJump = streamChannels - channelsLeft;
\r
1617 streamChannels = channelsLeft;
\r
1620 // Determine input buffer offsets and skips
\r
1621 if ( inInterleaved ) {
\r
1622 inJump = inChannels;
\r
1623 in += inChannels - channelsLeft;
\r
1627 in += (inChannels - channelsLeft) * inOffset;
\r
1630 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1631 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1632 *out++ = in[j*inOffset];
\r
1637 channelsLeft -= streamChannels;
\r
1642 if ( handle->drainCounter ) {
\r
1643 handle->drainCounter++;
\r
1648 AudioDeviceID inputDevice;
\r
1649 inputDevice = handle->id[1];
\r
1650 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1652 if ( handle->nStreams[1] == 1 ) {
\r
1653 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1654 convertBuffer( stream_.userBuffer[1],
\r
1655 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1656 stream_.convertInfo[1] );
\r
1658 else { // copy to user buffer
\r
1659 memcpy( stream_.userBuffer[1],
\r
1660 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1661 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1664 else { // read from multiple streams
\r
1665 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1666 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1668 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1669 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1670 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1671 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1672 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1675 else { // read from multiple multi-channel streams
\r
1676 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1677 Float32 *out, *in;
\r
1679 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1680 UInt32 outChannels = stream_.nUserChannels[1];
\r
1681 if ( stream_.doConvertBuffer[1] ) {
\r
1682 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1683 outChannels = stream_.nDeviceChannels[1];
\r
1686 if ( outInterleaved ) outOffset = 1;
\r
1687 else outOffset = stream_.bufferSize;
\r
1689 channelsLeft = outChannels;
\r
1690 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1692 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1693 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1696 // Account for possible channel offset in first stream
\r
1697 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1698 streamChannels -= stream_.channelOffset[1];
\r
1699 inJump = stream_.channelOffset[1];
\r
1703 // Account for possible unread channels at end of the last stream
\r
1704 if ( streamChannels > channelsLeft ) {
\r
1705 inJump = streamChannels - channelsLeft;
\r
1706 streamChannels = channelsLeft;
\r
1709 // Determine output buffer offsets and skips
\r
1710 if ( outInterleaved ) {
\r
1711 outJump = outChannels;
\r
1712 out += outChannels - channelsLeft;
\r
1716 out += (outChannels - channelsLeft) * outOffset;
\r
1719 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1720 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1721 out[j*outOffset] = *in++;
\r
1726 channelsLeft -= streamChannels;
\r
1730 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1731 convertBuffer( stream_.userBuffer[1],
\r
1732 stream_.deviceBuffer,
\r
1733 stream_.convertInfo[1] );
\r
1739 //MUTEX_UNLOCK( &stream_.mutex );
\r
1741 RtApi::tickStreamTime();
\r
1745 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1749 case kAudioHardwareNotRunningError:
\r
1750 return "kAudioHardwareNotRunningError";
\r
1752 case kAudioHardwareUnspecifiedError:
\r
1753 return "kAudioHardwareUnspecifiedError";
\r
1755 case kAudioHardwareUnknownPropertyError:
\r
1756 return "kAudioHardwareUnknownPropertyError";
\r
1758 case kAudioHardwareBadPropertySizeError:
\r
1759 return "kAudioHardwareBadPropertySizeError";
\r
1761 case kAudioHardwareIllegalOperationError:
\r
1762 return "kAudioHardwareIllegalOperationError";
\r
1764 case kAudioHardwareBadObjectError:
\r
1765 return "kAudioHardwareBadObjectError";
\r
1767 case kAudioHardwareBadDeviceError:
\r
1768 return "kAudioHardwareBadDeviceError";
\r
1770 case kAudioHardwareBadStreamError:
\r
1771 return "kAudioHardwareBadStreamError";
\r
1773 case kAudioHardwareUnsupportedOperationError:
\r
1774 return "kAudioHardwareUnsupportedOperationError";
\r
1776 case kAudioDeviceUnsupportedFormatError:
\r
1777 return "kAudioDeviceUnsupportedFormatError";
\r
1779 case kAudioDevicePermissionsError:
\r
1780 return "kAudioDevicePermissionsError";
\r
1783 return "CoreAudio unknown error";
\r
1787 //******************** End of __MACOSX_CORE__ *********************//
\r
1790 #if defined(__UNIX_JACK__)
\r
1792 // JACK is a low-latency audio server, originally written for the
\r
1793 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1794 // connect a number of different applications to an audio device, as
\r
1795 // well as allowing them to share audio between themselves.
\r
1797 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1798 // have ports connected to the server. The JACK server is typically
\r
1799 // started in a terminal as follows:
\r
1801 // .jackd -d alsa -d hw:0
\r
1803 // or through an interface program such as qjackctl. Many of the
\r
1804 // parameters normally set for a stream are fixed by the JACK server
\r
1805 // and can be specified when the JACK server is started. In
\r
1808 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1810 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1811 // frames, and number of buffers = 4. Once the server is running, it
\r
1812 // is not possible to override these values. If the values are not
\r
1813 // specified in the command-line, the JACK server uses default values.
\r
1815 // The JACK server does not have to be running when an instance of
\r
1816 // RtApiJack is created, though the function getDeviceCount() will
\r
1817 // report 0 devices found until JACK has been started. When no
\r
1818 // devices are available (i.e., the JACK server is not running), a
\r
1819 // stream cannot be opened.
\r
1821 #include <jack/jack.h>
\r
1822 #include <unistd.h>
\r
1825 // A structure to hold various information related to the Jack API
\r
1826 // implementation.
\r
1827 struct JackHandle {
\r
1828 jack_client_t *client;
\r
1829 jack_port_t **ports[2];
\r
1830 std::string deviceName[2];
\r
1832 pthread_cond_t condition;
\r
1833 int drainCounter; // Tracks callback counts when draining
\r
1834 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1837 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1840 static void jackSilentError( const char * ) {};
\r
1842 RtApiJack :: RtApiJack()
\r
1844 // Nothing to do here.
\r
1845 #if !defined(__RTAUDIO_DEBUG__)
\r
1846 // Turn off Jack's internal error reporting.
\r
1847 jack_set_error_function( &jackSilentError );
\r
1851 RtApiJack :: ~RtApiJack()
\r
1853 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1856 unsigned int RtApiJack :: getDeviceCount( void )
\r
1858 // See if we can become a jack client.
\r
1859 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1860 jack_status_t *status = NULL;
\r
1861 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1862 if ( client == 0 ) return 0;
\r
1864 const char **ports;
\r
1865 std::string port, previousPort;
\r
1866 unsigned int nChannels = 0, nDevices = 0;
\r
1867 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1869 // Parse the port names up to the first colon (:).
\r
1870 size_t iColon = 0;
\r
1872 port = (char *) ports[ nChannels ];
\r
1873 iColon = port.find(":");
\r
1874 if ( iColon != std::string::npos ) {
\r
1875 port = port.substr( 0, iColon + 1 );
\r
1876 if ( port != previousPort ) {
\r
1878 previousPort = port;
\r
1881 } while ( ports[++nChannels] );
\r
1885 jack_client_close( client );
\r
1889 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1891 RtAudio::DeviceInfo info;
\r
1892 info.probed = false;
\r
1894 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1895 jack_status_t *status = NULL;
\r
1896 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1897 if ( client == 0 ) {
\r
1898 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1899 error( RtError::WARNING );
\r
1903 const char **ports;
\r
1904 std::string port, previousPort;
\r
1905 unsigned int nPorts = 0, nDevices = 0;
\r
1906 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1908 // Parse the port names up to the first colon (:).
\r
1909 size_t iColon = 0;
\r
1911 port = (char *) ports[ nPorts ];
\r
1912 iColon = port.find(":");
\r
1913 if ( iColon != std::string::npos ) {
\r
1914 port = port.substr( 0, iColon );
\r
1915 if ( port != previousPort ) {
\r
1916 if ( nDevices == device ) info.name = port;
\r
1918 previousPort = port;
\r
1921 } while ( ports[++nPorts] );
\r
1925 if ( device >= nDevices ) {
\r
1926 jack_client_close( client );
\r
1927 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1928 error( RtError::INVALID_USE );
\r
1932 // Get the current jack server sample rate.
\r
1933 info.sampleRates.clear();
\r
1934 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1936 // Count the available ports containing the client name as device
\r
1937 // channels. Jack "input ports" equal RtAudio output channels.
\r
1938 unsigned int nChannels = 0;
\r
1939 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1941 while ( ports[ nChannels ] ) nChannels++;
\r
1943 info.outputChannels = nChannels;
\r
1946 // Jack "output ports" equal RtAudio input channels.
\r
1948 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1950 while ( ports[ nChannels ] ) nChannels++;
\r
1952 info.inputChannels = nChannels;
\r
1955 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1956 jack_client_close(client);
\r
1957 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1958 error( RtError::WARNING );
\r
1962 // If device opens for both playback and capture, we determine the channels.
\r
1963 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1964 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1966 // Jack always uses 32-bit floats.
\r
1967 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1969 // Jack doesn't provide default devices so we'll use the first available one.
\r
1970 if ( device == 0 && info.outputChannels > 0 )
\r
1971 info.isDefaultOutput = true;
\r
1972 if ( device == 0 && info.inputChannels > 0 )
\r
1973 info.isDefaultInput = true;
\r
1975 jack_client_close(client);
\r
1976 info.probed = true;
\r
1980 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1982 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1984 RtApiJack *object = (RtApiJack *) info->object;
\r
1985 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1990 // This function will be called by a spawned thread when the Jack
\r
1991 // server signals that it is shutting down. It is necessary to handle
\r
1992 // it this way because the jackShutdown() function must return before
\r
1993 // the jack_deactivate() function (in closeStream()) will return.
\r
1994 static void *jackCloseStream( void *ptr )
\r
1996 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1997 RtApiJack *object = (RtApiJack *) info->object;
\r
1999 object->closeStream();
\r
2001 pthread_exit( NULL );
\r
2003 static void jackShutdown( void *infoPointer )
\r
2005 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2006 RtApiJack *object = (RtApiJack *) info->object;
\r
2008 // Check current stream state. If stopped, then we'll assume this
\r
2009 // was called as a result of a call to RtApiJack::stopStream (the
\r
2010 // deactivation of a client handle causes this function to be called).
\r
2011 // If not, we'll assume the Jack server is shutting down or some
\r
2012 // other problem occurred and we should close the stream.
\r
2013 if ( object->isStreamRunning() == false ) return;
\r
2015 ThreadHandle threadId;
\r
2016 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2017 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2020 static int jackXrun( void *infoPointer )
\r
2022 JackHandle *handle = (JackHandle *) infoPointer;
\r
2024 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2025 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2030 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2031 unsigned int firstChannel, unsigned int sampleRate,
\r
2032 RtAudioFormat format, unsigned int *bufferSize,
\r
2033 RtAudio::StreamOptions *options )
\r
2035 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2037 // Look for jack server and try to become a client (only do once per stream).
\r
2038 jack_client_t *client = 0;
\r
2039 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2040 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2041 jack_status_t *status = NULL;
\r
2042 if ( options && !options->streamName.empty() )
\r
2043 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2045 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2046 if ( client == 0 ) {
\r
2047 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2048 error( RtError::WARNING );
\r
2053 // The handle must have been created on an earlier pass.
\r
2054 client = handle->client;
\r
2057 const char **ports;
\r
2058 std::string port, previousPort, deviceName;
\r
2059 unsigned int nPorts = 0, nDevices = 0;
\r
2060 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2062 // Parse the port names up to the first colon (:).
\r
2063 size_t iColon = 0;
\r
2065 port = (char *) ports[ nPorts ];
\r
2066 iColon = port.find(":");
\r
2067 if ( iColon != std::string::npos ) {
\r
2068 port = port.substr( 0, iColon );
\r
2069 if ( port != previousPort ) {
\r
2070 if ( nDevices == device ) deviceName = port;
\r
2072 previousPort = port;
\r
2075 } while ( ports[++nPorts] );
\r
2079 if ( device >= nDevices ) {
\r
2080 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2084 // Count the available ports containing the client name as device
\r
2085 // channels. Jack "input ports" equal RtAudio output channels.
\r
2086 unsigned int nChannels = 0;
\r
2087 unsigned long flag = JackPortIsInput;
\r
2088 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2089 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2091 while ( ports[ nChannels ] ) nChannels++;
\r
2095 // Compare the jack ports for specified client to the requested number of channels.
\r
2096 if ( nChannels < (channels + firstChannel) ) {
\r
2097 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2098 errorText_ = errorStream_.str();
\r
2102 // Check the jack server sample rate.
\r
2103 unsigned int jackRate = jack_get_sample_rate( client );
\r
2104 if ( sampleRate != jackRate ) {
\r
2105 jack_client_close( client );
\r
2106 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2107 errorText_ = errorStream_.str();
\r
2110 stream_.sampleRate = jackRate;
\r
2112 // Get the latency of the JACK port.
\r
2113 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2114 if ( ports[ firstChannel ] ) {
\r
2115 // Added by Ge Wang
\r
2116 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2117 // the range (usually the min and max are equal)
\r
2118 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2119 // get the latency range
\r
2120 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2121 // be optimistic, use the min!
\r
2122 stream_.latency[mode] = latrange.min;
\r
2123 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2127 // The jack server always uses 32-bit floating-point data.
\r
2128 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2129 stream_.userFormat = format;
\r
2131 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2132 else stream_.userInterleaved = true;
\r
2134 // Jack always uses non-interleaved buffers.
\r
2135 stream_.deviceInterleaved[mode] = false;
\r
2137 // Jack always provides host byte-ordered data.
\r
2138 stream_.doByteSwap[mode] = false;
\r
2140 // Get the buffer size. The buffer size and number of buffers
\r
2141 // (periods) is set when the jack server is started.
\r
2142 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2143 *bufferSize = stream_.bufferSize;
\r
2145 stream_.nDeviceChannels[mode] = channels;
\r
2146 stream_.nUserChannels[mode] = channels;
\r
2148 // Set flags for buffer conversion.
\r
2149 stream_.doConvertBuffer[mode] = false;
\r
2150 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2151 stream_.doConvertBuffer[mode] = true;
\r
2152 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2153 stream_.nUserChannels[mode] > 1 )
\r
2154 stream_.doConvertBuffer[mode] = true;
\r
2156 // Allocate our JackHandle structure for the stream.
\r
2157 if ( handle == 0 ) {
\r
2159 handle = new JackHandle;
\r
2161 catch ( std::bad_alloc& ) {
\r
2162 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2166 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2167 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2170 stream_.apiHandle = (void *) handle;
\r
2171 handle->client = client;
\r
2173 handle->deviceName[mode] = deviceName;
\r
2175 // Allocate necessary internal buffers.
\r
2176 unsigned long bufferBytes;
\r
2177 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2178 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2179 if ( stream_.userBuffer[mode] == NULL ) {
\r
2180 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2184 if ( stream_.doConvertBuffer[mode] ) {
\r
2186 bool makeBuffer = true;
\r
2187 if ( mode == OUTPUT )
\r
2188 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2189 else { // mode == INPUT
\r
2190 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2191 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2192 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2193 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2197 if ( makeBuffer ) {
\r
2198 bufferBytes *= *bufferSize;
\r
2199 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2200 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2201 if ( stream_.deviceBuffer == NULL ) {
\r
2202 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2208 // Allocate memory for the Jack ports (channels) identifiers.
\r
2209 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2210 if ( handle->ports[mode] == NULL ) {
\r
2211 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2215 stream_.device[mode] = device;
\r
2216 stream_.channelOffset[mode] = firstChannel;
\r
2217 stream_.state = STREAM_STOPPED;
\r
2218 stream_.callbackInfo.object = (void *) this;
\r
2220 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2221 // We had already set up the stream for output.
\r
2222 stream_.mode = DUPLEX;
\r
2224 stream_.mode = mode;
\r
2225 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2226 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2227 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2230 // Register our ports.
\r
2232 if ( mode == OUTPUT ) {
\r
2233 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2234 snprintf( label, 64, "outport %d", i );
\r
2235 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2236 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2240 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2241 snprintf( label, 64, "inport %d", i );
\r
2242 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2243 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2247 // Setup the buffer conversion information structure. We don't use
\r
2248 // buffers to do channel offsets, so we override that parameter
\r
2250 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2256 pthread_cond_destroy( &handle->condition );
\r
2257 jack_client_close( handle->client );
\r
2259 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2260 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2263 stream_.apiHandle = 0;
\r
2266 for ( int i=0; i<2; i++ ) {
\r
2267 if ( stream_.userBuffer[i] ) {
\r
2268 free( stream_.userBuffer[i] );
\r
2269 stream_.userBuffer[i] = 0;
\r
2273 if ( stream_.deviceBuffer ) {
\r
2274 free( stream_.deviceBuffer );
\r
2275 stream_.deviceBuffer = 0;
\r
2281 void RtApiJack :: closeStream( void )
\r
2283 if ( stream_.state == STREAM_CLOSED ) {
\r
2284 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2285 error( RtError::WARNING );
\r
2289 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2292 if ( stream_.state == STREAM_RUNNING )
\r
2293 jack_deactivate( handle->client );
\r
2295 jack_client_close( handle->client );
\r
2299 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2300 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2301 pthread_cond_destroy( &handle->condition );
\r
2303 stream_.apiHandle = 0;
\r
2306 for ( int i=0; i<2; i++ ) {
\r
2307 if ( stream_.userBuffer[i] ) {
\r
2308 free( stream_.userBuffer[i] );
\r
2309 stream_.userBuffer[i] = 0;
\r
2313 if ( stream_.deviceBuffer ) {
\r
2314 free( stream_.deviceBuffer );
\r
2315 stream_.deviceBuffer = 0;
\r
2318 stream_.mode = UNINITIALIZED;
\r
2319 stream_.state = STREAM_CLOSED;
\r
2322 void RtApiJack :: startStream( void )
\r
2325 if ( stream_.state == STREAM_RUNNING ) {
\r
2326 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2327 error( RtError::WARNING );
\r
2331 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2332 int result = jack_activate( handle->client );
\r
2334 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2338 const char **ports;
\r
2340 // Get the list of available ports.
\r
2341 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2343 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2344 if ( ports == NULL) {
\r
2345 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2349 // Now make the port connections. Since RtAudio wasn't designed to
\r
2350 // allow the user to select particular channels of a device, we'll
\r
2351 // just open the first "nChannels" ports with offset.
\r
2352 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2354 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2355 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2358 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2365 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2367 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2368 if ( ports == NULL) {
\r
2369 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2373 // Now make the port connections. See note above.
\r
2374 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2376 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2377 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2380 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2387 handle->drainCounter = 0;
\r
2388 handle->internalDrain = false;
\r
2389 stream_.state = STREAM_RUNNING;
\r
2392 if ( result == 0 ) return;
\r
2393 error( RtError::SYSTEM_ERROR );
\r
2396 void RtApiJack :: stopStream( void )
\r
2399 if ( stream_.state == STREAM_STOPPED ) {
\r
2400 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2401 error( RtError::WARNING );
\r
2405 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2406 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2408 if ( handle->drainCounter == 0 ) {
\r
2409 handle->drainCounter = 2;
\r
2410 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2414 jack_deactivate( handle->client );
\r
2415 stream_.state = STREAM_STOPPED;
\r
2418 void RtApiJack :: abortStream( void )
\r
2421 if ( stream_.state == STREAM_STOPPED ) {
\r
2422 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2423 error( RtError::WARNING );
\r
2427 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2428 handle->drainCounter = 2;
\r
2433 // This function will be called by a spawned thread when the user
\r
2434 // callback function signals that the stream should be stopped or
\r
2435 // aborted. It is necessary to handle it this way because the
\r
2436 // callbackEvent() function must return before the jack_deactivate()
\r
2437 // function will return.
\r
2438 static void *jackStopStream( void *ptr )
\r
2440 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2441 RtApiJack *object = (RtApiJack *) info->object;
\r
2443 object->stopStream();
\r
2444 pthread_exit( NULL );
\r
2447 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2449 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2450 if ( stream_.state == STREAM_CLOSED ) {
\r
2451 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2452 error( RtError::WARNING );
\r
2455 if ( stream_.bufferSize != nframes ) {
\r
2456 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2457 error( RtError::WARNING );
\r
2461 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2462 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2464 // Check if we were draining the stream and signal is finished.
\r
2465 if ( handle->drainCounter > 3 ) {
\r
2466 ThreadHandle threadId;
\r
2468 stream_.state = STREAM_STOPPING;
\r
2469 if ( handle->internalDrain == true )
\r
2470 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2472 pthread_cond_signal( &handle->condition );
\r
2476 // Invoke user callback first, to get fresh output data.
\r
2477 if ( handle->drainCounter == 0 ) {
\r
2478 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2479 double streamTime = getStreamTime();
\r
2480 RtAudioStreamStatus status = 0;
\r
2481 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2482 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2483 handle->xrun[0] = false;
\r
2485 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2486 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2487 handle->xrun[1] = false;
\r
2489 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2490 stream_.bufferSize, streamTime, status, info->userData );
\r
2491 if ( cbReturnValue == 2 ) {
\r
2492 stream_.state = STREAM_STOPPING;
\r
2493 handle->drainCounter = 2;
\r
2495 pthread_create( &id, NULL, jackStopStream, info );
\r
2498 else if ( cbReturnValue == 1 ) {
\r
2499 handle->drainCounter = 1;
\r
2500 handle->internalDrain = true;
\r
2504 jack_default_audio_sample_t *jackbuffer;
\r
2505 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2506 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2508 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2510 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2511 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2512 memset( jackbuffer, 0, bufferBytes );
\r
2516 else if ( stream_.doConvertBuffer[0] ) {
\r
2518 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2520 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2521 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2522 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2525 else { // no buffer conversion
\r
2526 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2527 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2528 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2532 if ( handle->drainCounter ) {
\r
2533 handle->drainCounter++;
\r
2538 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2540 if ( stream_.doConvertBuffer[1] ) {
\r
2541 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2542 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2543 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2545 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2547 else { // no buffer conversion
\r
2548 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2549 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2550 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2556 RtApi::tickStreamTime();
\r
2559 //******************** End of __UNIX_JACK__ *********************//
\r
2562 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2564 // The ASIO API is designed around a callback scheme, so this
\r
2565 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2566 // Jack. The primary constraint with ASIO is that it only allows
\r
2567 // access to a single driver at a time. Thus, it is not possible to
\r
2568 // have more than one simultaneous RtAudio stream.
\r
2570 // This implementation also requires a number of external ASIO files
\r
2571 // and a few global variables. The ASIO callback scheme does not
\r
2572 // allow for the passing of user data, so we must create a global
\r
2573 // pointer to our callbackInfo structure.
\r
2575 // On unix systems, we make use of a pthread condition variable.
\r
2576 // Since there is no equivalent in Windows, I hacked something based
\r
2577 // on information found in
\r
2578 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2580 #include "asiosys.h"
\r
2582 #include "iasiothiscallresolver.h"
\r
2583 #include "asiodrivers.h"
\r
2586 static AsioDrivers drivers;
\r
2587 static ASIOCallbacks asioCallbacks;
\r
2588 static ASIODriverInfo driverInfo;
\r
2589 static CallbackInfo *asioCallbackInfo;
\r
2590 static bool asioXRun;
\r
2592 struct AsioHandle {
\r
2593 int drainCounter; // Tracks callback counts when draining
\r
2594 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2595 ASIOBufferInfo *bufferInfos;
\r
2599 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2602 // Function declarations (definitions at end of section)
\r
2603 static const char* getAsioErrorString( ASIOError result );
\r
2604 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2605 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2607 RtApiAsio :: RtApiAsio()
\r
2609 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2610 // CoInitialize beforehand, but it must be for appartment threading
\r
2611 // (in which case, CoInitilialize will return S_FALSE here).
\r
2612 coInitialized_ = false;
\r
2613 HRESULT hr = CoInitialize( NULL );
\r
2614 if ( FAILED(hr) ) {
\r
2615 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2616 error( RtError::WARNING );
\r
2618 coInitialized_ = true;
\r
2620 drivers.removeCurrentDriver();
\r
2621 driverInfo.asioVersion = 2;
\r
2623 // See note in DirectSound implementation about GetDesktopWindow().
\r
2624 driverInfo.sysRef = GetForegroundWindow();
\r
2627 RtApiAsio :: ~RtApiAsio()
\r
2629 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2630 if ( coInitialized_ ) CoUninitialize();
\r
2633 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2635 return (unsigned int) drivers.asioGetNumDev();
\r
2638 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2640 RtAudio::DeviceInfo info;
\r
2641 info.probed = false;
\r
2644 unsigned int nDevices = getDeviceCount();
\r
2645 if ( nDevices == 0 ) {
\r
2646 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2647 error( RtError::INVALID_USE );
\r
2651 if ( device >= nDevices ) {
\r
2652 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2653 error( RtError::INVALID_USE );
\r
2657 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2658 if ( stream_.state != STREAM_CLOSED ) {
\r
2659 if ( device >= devices_.size() ) {
\r
2660 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2661 error( RtError::WARNING );
\r
2664 return devices_[ device ];
\r
2667 char driverName[32];
\r
2668 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2669 if ( result != ASE_OK ) {
\r
2670 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2671 errorText_ = errorStream_.str();
\r
2672 error( RtError::WARNING );
\r
2676 info.name = driverName;
\r
2678 if ( !drivers.loadDriver( driverName ) ) {
\r
2679 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2680 errorText_ = errorStream_.str();
\r
2681 error( RtError::WARNING );
\r
2685 result = ASIOInit( &driverInfo );
\r
2686 if ( result != ASE_OK ) {
\r
2687 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2688 errorText_ = errorStream_.str();
\r
2689 error( RtError::WARNING );
\r
2693 // Determine the device channel information.
\r
2694 long inputChannels, outputChannels;
\r
2695 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2696 if ( result != ASE_OK ) {
\r
2697 drivers.removeCurrentDriver();
\r
2698 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2699 errorText_ = errorStream_.str();
\r
2700 error( RtError::WARNING );
\r
2704 info.outputChannels = outputChannels;
\r
2705 info.inputChannels = inputChannels;
\r
2706 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2707 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2709 // Determine the supported sample rates.
\r
2710 info.sampleRates.clear();
\r
2711 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2712 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2713 if ( result == ASE_OK )
\r
2714 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2717 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2718 ASIOChannelInfo channelInfo;
\r
2719 channelInfo.channel = 0;
\r
2720 channelInfo.isInput = true;
\r
2721 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2722 result = ASIOGetChannelInfo( &channelInfo );
\r
2723 if ( result != ASE_OK ) {
\r
2724 drivers.removeCurrentDriver();
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtError::WARNING );
\r
2731 info.nativeFormats = 0;
\r
2732 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2733 info.nativeFormats |= RTAUDIO_SINT16;
\r
2734 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2735 info.nativeFormats |= RTAUDIO_SINT32;
\r
2736 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2737 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2738 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2739 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2740 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2741 info.nativeFormats |= RTAUDIO_SINT24;
\r
2743 if ( info.outputChannels > 0 )
\r
2744 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2745 if ( info.inputChannels > 0 )
\r
2746 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2748 info.probed = true;
\r
2749 drivers.removeCurrentDriver();
\r
2753 static void bufferSwitch( long index, ASIOBool processNow )
\r
2755 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2756 object->callbackEvent( index );
\r
2759 void RtApiAsio :: saveDeviceInfo( void )
\r
2763 unsigned int nDevices = getDeviceCount();
\r
2764 devices_.resize( nDevices );
\r
2765 for ( unsigned int i=0; i<nDevices; i++ )
\r
2766 devices_[i] = getDeviceInfo( i );
\r
2769 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2770 unsigned int firstChannel, unsigned int sampleRate,
\r
2771 RtAudioFormat format, unsigned int *bufferSize,
\r
2772 RtAudio::StreamOptions *options )
\r
2774 // For ASIO, a duplex stream MUST use the same driver.
\r
2775 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2776 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2780 char driverName[32];
\r
2781 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2782 if ( result != ASE_OK ) {
\r
2783 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2784 errorText_ = errorStream_.str();
\r
2788 // Only load the driver once for duplex stream.
\r
2789 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2790 // The getDeviceInfo() function will not work when a stream is open
\r
2791 // because ASIO does not allow multiple devices to run at the same
\r
2792 // time. Thus, we'll probe the system before opening a stream and
\r
2793 // save the results for use by getDeviceInfo().
\r
2794 this->saveDeviceInfo();
\r
2796 if ( !drivers.loadDriver( driverName ) ) {
\r
2797 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2798 errorText_ = errorStream_.str();
\r
2802 result = ASIOInit( &driverInfo );
\r
2803 if ( result != ASE_OK ) {
\r
2804 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2805 errorText_ = errorStream_.str();
\r
2810 // Check the device channel count.
\r
2811 long inputChannels, outputChannels;
\r
2812 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2813 if ( result != ASE_OK ) {
\r
2814 drivers.removeCurrentDriver();
\r
2815 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2816 errorText_ = errorStream_.str();
\r
2820 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2821 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2822 drivers.removeCurrentDriver();
\r
2823 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2824 errorText_ = errorStream_.str();
\r
2827 stream_.nDeviceChannels[mode] = channels;
\r
2828 stream_.nUserChannels[mode] = channels;
\r
2829 stream_.channelOffset[mode] = firstChannel;
\r
2831 // Verify the sample rate is supported.
\r
2832 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2840 // Get the current sample rate
\r
2841 ASIOSampleRate currentRate;
\r
2842 result = ASIOGetSampleRate( ¤tRate );
\r
2843 if ( result != ASE_OK ) {
\r
2844 drivers.removeCurrentDriver();
\r
2845 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2846 errorText_ = errorStream_.str();
\r
2850 // Set the sample rate only if necessary
\r
2851 if ( currentRate != sampleRate ) {
\r
2852 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2853 if ( result != ASE_OK ) {
\r
2854 drivers.removeCurrentDriver();
\r
2855 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2856 errorText_ = errorStream_.str();
\r
2861 // Determine the driver data type.
\r
2862 ASIOChannelInfo channelInfo;
\r
2863 channelInfo.channel = 0;
\r
2864 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2865 else channelInfo.isInput = true;
\r
2866 result = ASIOGetChannelInfo( &channelInfo );
\r
2867 if ( result != ASE_OK ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2870 errorText_ = errorStream_.str();
\r
2874 // Assuming WINDOWS host is always little-endian.
\r
2875 stream_.doByteSwap[mode] = false;
\r
2876 stream_.userFormat = format;
\r
2877 stream_.deviceFormat[mode] = 0;
\r
2878 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2879 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2880 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2882 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2883 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2884 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2886 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2887 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2888 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2890 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2891 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2892 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2894 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2895 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2896 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2899 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2900 drivers.removeCurrentDriver();
\r
2901 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2902 errorText_ = errorStream_.str();
\r
2906 // Set the buffer size. For a duplex stream, this will end up
\r
2907 // setting the buffer size based on the input constraints, which
\r
2909 long minSize, maxSize, preferSize, granularity;
\r
2910 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2911 if ( result != ASE_OK ) {
\r
2912 drivers.removeCurrentDriver();
\r
2913 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2914 errorText_ = errorStream_.str();
\r
2918 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2919 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2920 else if ( granularity == -1 ) {
\r
2921 // Make sure bufferSize is a power of two.
\r
2922 int log2_of_min_size = 0;
\r
2923 int log2_of_max_size = 0;
\r
2925 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2926 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2927 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2930 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2931 int min_delta_num = log2_of_min_size;
\r
2933 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2934 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2935 if (current_delta < min_delta) {
\r
2936 min_delta = current_delta;
\r
2937 min_delta_num = i;
\r
2941 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2942 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2943 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2945 else if ( granularity != 0 ) {
\r
2946 // Set to an even multiple of granularity, rounding up.
\r
2947 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2950 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2951 drivers.removeCurrentDriver();
\r
2952 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2956 stream_.bufferSize = *bufferSize;
\r
2957 stream_.nBuffers = 2;
\r
2959 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2960 else stream_.userInterleaved = true;
\r
2962 // ASIO always uses non-interleaved buffers.
\r
2963 stream_.deviceInterleaved[mode] = false;
\r
2965 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2966 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2967 if ( handle == 0 ) {
\r
2969 handle = new AsioHandle;
\r
2971 catch ( std::bad_alloc& ) {
\r
2972 //if ( handle == NULL ) {
\r
2973 drivers.removeCurrentDriver();
\r
2974 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2977 handle->bufferInfos = 0;
\r
2979 // Create a manual-reset event.
\r
2980 handle->condition = CreateEvent( NULL, // no security
\r
2981 TRUE, // manual-reset
\r
2982 FALSE, // non-signaled initially
\r
2983 NULL ); // unnamed
\r
2984 stream_.apiHandle = (void *) handle;
\r
2987 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2988 // and output separately, we'll have to dispose of previously
\r
2989 // created output buffers for a duplex stream.
\r
2990 long inputLatency, outputLatency;
\r
2991 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2992 ASIODisposeBuffers();
\r
2993 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2996 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2997 bool buffersAllocated = false;
\r
2998 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2999 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3000 if ( handle->bufferInfos == NULL ) {
\r
3001 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3002 errorText_ = errorStream_.str();
\r
3006 ASIOBufferInfo *infos;
\r
3007 infos = handle->bufferInfos;
\r
3008 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3009 infos->isInput = ASIOFalse;
\r
3010 infos->channelNum = i + stream_.channelOffset[0];
\r
3011 infos->buffers[0] = infos->buffers[1] = 0;
\r
3013 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3014 infos->isInput = ASIOTrue;
\r
3015 infos->channelNum = i + stream_.channelOffset[1];
\r
3016 infos->buffers[0] = infos->buffers[1] = 0;
\r
3019 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3020 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3021 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3022 asioCallbacks.asioMessage = &asioMessages;
\r
3023 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3024 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3025 if ( result != ASE_OK ) {
\r
3026 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3027 errorText_ = errorStream_.str();
\r
3030 buffersAllocated = true;
\r
3032 // Set flags for buffer conversion.
\r
3033 stream_.doConvertBuffer[mode] = false;
\r
3034 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3035 stream_.doConvertBuffer[mode] = true;
\r
3036 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3037 stream_.nUserChannels[mode] > 1 )
\r
3038 stream_.doConvertBuffer[mode] = true;
\r
3040 // Allocate necessary internal buffers
\r
3041 unsigned long bufferBytes;
\r
3042 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3043 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3044 if ( stream_.userBuffer[mode] == NULL ) {
\r
3045 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3049 if ( stream_.doConvertBuffer[mode] ) {
\r
3051 bool makeBuffer = true;
\r
3052 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3053 if ( mode == INPUT ) {
\r
3054 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3055 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3056 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3060 if ( makeBuffer ) {
\r
3061 bufferBytes *= *bufferSize;
\r
3062 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3063 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3064 if ( stream_.deviceBuffer == NULL ) {
\r
3065 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3071 stream_.sampleRate = sampleRate;
\r
3072 stream_.device[mode] = device;
\r
3073 stream_.state = STREAM_STOPPED;
\r
3074 asioCallbackInfo = &stream_.callbackInfo;
\r
3075 stream_.callbackInfo.object = (void *) this;
\r
3076 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3077 // We had already set up an output stream.
\r
3078 stream_.mode = DUPLEX;
\r
3080 stream_.mode = mode;
\r
3082 // Determine device latencies
\r
3083 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3084 if ( result != ASE_OK ) {
\r
3085 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3086 errorText_ = errorStream_.str();
\r
3087 error( RtError::WARNING); // warn but don't fail
\r
3090 stream_.latency[0] = outputLatency;
\r
3091 stream_.latency[1] = inputLatency;
\r
3094 // Setup the buffer conversion information structure. We don't use
\r
3095 // buffers to do channel offsets, so we override that parameter
\r
3097 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3102 if ( buffersAllocated )
\r
3103 ASIODisposeBuffers();
\r
3104 drivers.removeCurrentDriver();
\r
3107 CloseHandle( handle->condition );
\r
3108 if ( handle->bufferInfos )
\r
3109 free( handle->bufferInfos );
\r
3111 stream_.apiHandle = 0;
\r
3114 for ( int i=0; i<2; i++ ) {
\r
3115 if ( stream_.userBuffer[i] ) {
\r
3116 free( stream_.userBuffer[i] );
\r
3117 stream_.userBuffer[i] = 0;
\r
3121 if ( stream_.deviceBuffer ) {
\r
3122 free( stream_.deviceBuffer );
\r
3123 stream_.deviceBuffer = 0;
\r
3129 void RtApiAsio :: closeStream()
\r
3131 if ( stream_.state == STREAM_CLOSED ) {
\r
3132 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3133 error( RtError::WARNING );
\r
3137 if ( stream_.state == STREAM_RUNNING ) {
\r
3138 stream_.state = STREAM_STOPPED;
\r
3141 ASIODisposeBuffers();
\r
3142 drivers.removeCurrentDriver();
\r
3144 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3146 CloseHandle( handle->condition );
\r
3147 if ( handle->bufferInfos )
\r
3148 free( handle->bufferInfos );
\r
3150 stream_.apiHandle = 0;
\r
3153 for ( int i=0; i<2; i++ ) {
\r
3154 if ( stream_.userBuffer[i] ) {
\r
3155 free( stream_.userBuffer[i] );
\r
3156 stream_.userBuffer[i] = 0;
\r
3160 if ( stream_.deviceBuffer ) {
\r
3161 free( stream_.deviceBuffer );
\r
3162 stream_.deviceBuffer = 0;
\r
3165 stream_.mode = UNINITIALIZED;
\r
3166 stream_.state = STREAM_CLOSED;
\r
3169 bool stopThreadCalled = false;
\r
3171 void RtApiAsio :: startStream()
\r
3174 if ( stream_.state == STREAM_RUNNING ) {
\r
3175 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3176 error( RtError::WARNING );
\r
3180 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3181 ASIOError result = ASIOStart();
\r
3182 if ( result != ASE_OK ) {
\r
3183 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3184 errorText_ = errorStream_.str();
\r
3188 handle->drainCounter = 0;
\r
3189 handle->internalDrain = false;
\r
3190 ResetEvent( handle->condition );
\r
3191 stream_.state = STREAM_RUNNING;
\r
3195 stopThreadCalled = false;
\r
3197 if ( result == ASE_OK ) return;
\r
3198 error( RtError::SYSTEM_ERROR );
\r
3201 void RtApiAsio :: stopStream()
\r
3204 if ( stream_.state == STREAM_STOPPED ) {
\r
3205 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3206 error( RtError::WARNING );
\r
3210 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3211 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3212 if ( handle->drainCounter == 0 ) {
\r
3213 handle->drainCounter = 2;
\r
3214 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3218 stream_.state = STREAM_STOPPED;
\r
3220 ASIOError result = ASIOStop();
\r
3221 if ( result != ASE_OK ) {
\r
3222 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3223 errorText_ = errorStream_.str();
\r
3226 if ( result == ASE_OK ) return;
\r
3227 error( RtError::SYSTEM_ERROR );
\r
3230 void RtApiAsio :: abortStream()
\r
3233 if ( stream_.state == STREAM_STOPPED ) {
\r
3234 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3235 error( RtError::WARNING );
\r
3239 // The following lines were commented-out because some behavior was
\r
3240 // noted where the device buffers need to be zeroed to avoid
\r
3241 // continuing sound, even when the device buffers are completely
\r
3242 // disposed. So now, calling abort is the same as calling stop.
\r
3243 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3244 // handle->drainCounter = 2;
\r
3248 // This function will be called by a spawned thread when the user
\r
3249 // callback function signals that the stream should be stopped or
\r
3250 // aborted. It is necessary to handle it this way because the
\r
3251 // callbackEvent() function must return before the ASIOStop()
\r
3252 // function will return.
\r
3253 static unsigned __stdcall asioStopStream( void *ptr )
\r
3255 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3256 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3258 object->stopStream();
\r
3259 _endthreadex( 0 );
\r
3263 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3265 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3266 if ( stream_.state == STREAM_CLOSED ) {
\r
3267 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3268 error( RtError::WARNING );
\r
3272 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3273 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3275 // Check if we were draining the stream and signal if finished.
\r
3276 if ( handle->drainCounter > 3 ) {
\r
3278 stream_.state = STREAM_STOPPING;
\r
3279 if ( handle->internalDrain == false )
\r
3280 SetEvent( handle->condition );
\r
3281 else { // spawn a thread to stop the stream
\r
3282 unsigned threadId;
\r
3283 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3284 &stream_.callbackInfo, 0, &threadId );
\r
3289 // Invoke user callback to get fresh output data UNLESS we are
\r
3290 // draining stream.
\r
3291 if ( handle->drainCounter == 0 ) {
\r
3292 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3293 double streamTime = getStreamTime();
\r
3294 RtAudioStreamStatus status = 0;
\r
3295 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3296 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3299 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3300 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3303 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3304 stream_.bufferSize, streamTime, status, info->userData );
\r
3305 if ( cbReturnValue == 2 ) {
\r
3306 stream_.state = STREAM_STOPPING;
\r
3307 handle->drainCounter = 2;
\r
3308 unsigned threadId;
\r
3309 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3310 &stream_.callbackInfo, 0, &threadId );
\r
3313 else if ( cbReturnValue == 1 ) {
\r
3314 handle->drainCounter = 1;
\r
3315 handle->internalDrain = true;
\r
3319 unsigned int nChannels, bufferBytes, i, j;
\r
3320 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3321 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3323 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3325 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3327 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3328 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3329 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3333 else if ( stream_.doConvertBuffer[0] ) {
\r
3335 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3336 if ( stream_.doByteSwap[0] )
\r
3337 byteSwapBuffer( stream_.deviceBuffer,
\r
3338 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3339 stream_.deviceFormat[0] );
\r
3341 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3342 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3343 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3344 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3350 if ( stream_.doByteSwap[0] )
\r
3351 byteSwapBuffer( stream_.userBuffer[0],
\r
3352 stream_.bufferSize * stream_.nUserChannels[0],
\r
3353 stream_.userFormat );
\r
3355 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3356 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3357 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3358 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3363 if ( handle->drainCounter ) {
\r
3364 handle->drainCounter++;
\r
3369 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3371 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3373 if (stream_.doConvertBuffer[1]) {
\r
3375 // Always interleave ASIO input data.
\r
3376 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3377 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3378 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3379 handle->bufferInfos[i].buffers[bufferIndex],
\r
3383 if ( stream_.doByteSwap[1] )
\r
3384 byteSwapBuffer( stream_.deviceBuffer,
\r
3385 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3386 stream_.deviceFormat[1] );
\r
3387 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3391 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3392 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3393 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3394 handle->bufferInfos[i].buffers[bufferIndex],
\r
3399 if ( stream_.doByteSwap[1] )
\r
3400 byteSwapBuffer( stream_.userBuffer[1],
\r
3401 stream_.bufferSize * stream_.nUserChannels[1],
\r
3402 stream_.userFormat );
\r
3407 // The following call was suggested by Malte Clasen. While the API
\r
3408 // documentation indicates it should not be required, some device
\r
3409 // drivers apparently do not function correctly without it.
\r
3410 ASIOOutputReady();
\r
3412 RtApi::tickStreamTime();
\r
3416 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3418 // The ASIO documentation says that this usually only happens during
\r
3419 // external sync. Audio processing is not stopped by the driver,
\r
3420 // actual sample rate might not have even changed, maybe only the
\r
3421 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3424 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3426 object->stopStream();
\r
3428 catch ( RtError &exception ) {
\r
3429 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3433 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3436 static long asioMessages( long selector, long value, void* message, double* opt )
\r
3440 switch( selector ) {
\r
3441 case kAsioSelectorSupported:
\r
3442 if ( value == kAsioResetRequest
\r
3443 || value == kAsioEngineVersion
\r
3444 || value == kAsioResyncRequest
\r
3445 || value == kAsioLatenciesChanged
\r
3446 // The following three were added for ASIO 2.0, you don't
\r
3447 // necessarily have to support them.
\r
3448 || value == kAsioSupportsTimeInfo
\r
3449 || value == kAsioSupportsTimeCode
\r
3450 || value == kAsioSupportsInputMonitor)
\r
3453 case kAsioResetRequest:
\r
3454 // Defer the task and perform the reset of the driver during the
\r
3455 // next "safe" situation. You cannot reset the driver right now,
\r
3456 // as this code is called from the driver. Reset the driver is
\r
3457 // done by completely destruct is. I.e. ASIOStop(),
\r
3458 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3460 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3463 case kAsioResyncRequest:
\r
3464 // This informs the application that the driver encountered some
\r
3465 // non-fatal data loss. It is used for synchronization purposes
\r
3466 // of different media. Added mainly to work around the Win16Mutex
\r
3467 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3468 // which could lose data because the Mutex was held too long by
\r
3469 // another thread. However a driver can issue it in other
\r
3470 // situations, too.
\r
3471 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3475 case kAsioLatenciesChanged:
\r
3476 // This will inform the host application that the drivers were
\r
3477 // latencies changed. Beware, it this does not mean that the
\r
3478 // buffer sizes have changed! You might need to update internal
\r
3480 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3483 case kAsioEngineVersion:
\r
3484 // Return the supported ASIO version of the host application. If
\r
3485 // a host application does not implement this selector, ASIO 1.0
\r
3486 // is assumed by the driver.
\r
3489 case kAsioSupportsTimeInfo:
\r
3490 // Informs the driver whether the
\r
3491 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3492 // For compatibility with ASIO 1.0 drivers the host application
\r
3493 // should always support the "old" bufferSwitch method, too.
\r
3496 case kAsioSupportsTimeCode:
\r
3497 // Informs the driver whether application is interested in time
\r
3498 // code info. If an application does not need to know about time
\r
3499 // code, the driver has less work to do.
\r
3506 static const char* getAsioErrorString( ASIOError result )
\r
3511 const char*message;
\r
3514 static const Messages m[] =
\r
3516 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3517 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3518 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3519 { ASE_InvalidMode, "Invalid mode." },
\r
3520 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3521 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3522 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3525 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3526 if ( m[i].value == result ) return m[i].message;
\r
3528 return "Unknown error.";
\r
3530 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3534 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3536 // Modified by Robin Davies, October 2005
\r
3537 // - Improvements to DirectX pointer chasing.
\r
3538 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3539 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3540 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3541 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3543 #include <dsound.h>
\r
3544 #include <assert.h>
\r
3545 #include <algorithm>
\r
3547 #if defined(__MINGW32__)
\r
3548 // missing from latest mingw winapi
\r
3549 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3550 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3551 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3552 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3555 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3557 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3558 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3561 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3563 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3564 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3565 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3566 return pointer >= earlierPointer && pointer < laterPointer;
\r
3569 // A structure to hold various information related to the DirectSound
\r
3570 // API implementation.
\r
3572 unsigned int drainCounter; // Tracks callback counts when draining
\r
3573 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3577 UINT bufferPointer[2];
\r
3578 DWORD dsBufferSize[2];
\r
3579 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3583 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3586 // Declarations for utility functions, callbacks, and structures
\r
3587 // specific to the DirectSound implementation.
\r
3588 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3589 LPCTSTR description,
\r
3591 LPVOID lpContext );
\r
3593 static const char* getErrorString( int code );
\r
3595 static unsigned __stdcall callbackHandler( void *ptr );
\r
3604 : found(false) { validId[0] = false; validId[1] = false; }
\r
3607 struct DsProbeData {
\r
3609 std::vector<struct DsDevice>* dsDevices;
\r
3612 RtApiDs :: RtApiDs()
\r
3614 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3615 // accept whatever the mainline chose for a threading model.
\r
3616 coInitialized_ = false;
\r
3617 HRESULT hr = CoInitialize( NULL );
\r
3618 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3621 RtApiDs :: ~RtApiDs()
\r
3623 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3624 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3627 // The DirectSound default output is always the first device.
\r
3628 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3633 // The DirectSound default input is always the first input device,
\r
3634 // which is the first capture device enumerated.
\r
3635 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3640 unsigned int RtApiDs :: getDeviceCount( void )
\r
3642 // Set query flag for previously found devices to false, so that we
\r
3643 // can check for any devices that have disappeared.
\r
3644 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3645 dsDevices[i].found = false;
\r
3647 // Query DirectSound devices.
\r
3648 struct DsProbeData probeInfo;
\r
3649 probeInfo.isInput = false;
\r
3650 probeInfo.dsDevices = &dsDevices;
\r
3651 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3652 if ( FAILED( result ) ) {
\r
3653 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3654 errorText_ = errorStream_.str();
\r
3655 error( RtError::WARNING );
\r
3658 // Query DirectSoundCapture devices.
\r
3659 probeInfo.isInput = true;
\r
3660 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3661 if ( FAILED( result ) ) {
\r
3662 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3663 errorText_ = errorStream_.str();
\r
3664 error( RtError::WARNING );
\r
3667 // Clean out any devices that may have disappeared.
\r
3668 std::vector< int > indices;
\r
3669 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3670 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3671 unsigned int nErased = 0;
\r
3672 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3673 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3675 return dsDevices.size();
\r
3678 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3680 RtAudio::DeviceInfo info;
\r
3681 info.probed = false;
\r
3683 if ( dsDevices.size() == 0 ) {
\r
3684 // Force a query of all devices
\r
3686 if ( dsDevices.size() == 0 ) {
\r
3687 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3688 error( RtError::INVALID_USE );
\r
3693 if ( device >= dsDevices.size() ) {
\r
3694 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3695 error( RtError::INVALID_USE );
\r
3700 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3702 LPDIRECTSOUND output;
\r
3704 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3705 if ( FAILED( result ) ) {
\r
3706 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3707 errorText_ = errorStream_.str();
\r
3708 error( RtError::WARNING );
\r
3712 outCaps.dwSize = sizeof( outCaps );
\r
3713 result = output->GetCaps( &outCaps );
\r
3714 if ( FAILED( result ) ) {
\r
3715 output->Release();
\r
3716 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3717 errorText_ = errorStream_.str();
\r
3718 error( RtError::WARNING );
\r
3722 // Get output channel information.
\r
3723 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3725 // Get sample rate information.
\r
3726 info.sampleRates.clear();
\r
3727 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3728 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3729 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3730 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3733 // Get format information.
\r
3734 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3735 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3737 output->Release();
\r
3739 if ( getDefaultOutputDevice() == device )
\r
3740 info.isDefaultOutput = true;
\r
3742 if ( dsDevices[ device ].validId[1] == false ) {
\r
3743 info.name = dsDevices[ device ].name;
\r
3744 info.probed = true;
\r
3750 LPDIRECTSOUNDCAPTURE input;
\r
3751 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3752 if ( FAILED( result ) ) {
\r
3753 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3754 errorText_ = errorStream_.str();
\r
3755 error( RtError::WARNING );
\r
3760 inCaps.dwSize = sizeof( inCaps );
\r
3761 result = input->GetCaps( &inCaps );
\r
3762 if ( FAILED( result ) ) {
\r
3764 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3765 errorText_ = errorStream_.str();
\r
3766 error( RtError::WARNING );
\r
3770 // Get input channel information.
\r
3771 info.inputChannels = inCaps.dwChannels;
\r
3773 // Get sample rate and format information.
\r
3774 std::vector<unsigned int> rates;
\r
3775 if ( inCaps.dwChannels >= 2 ) {
\r
3776 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3781 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3785 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3786 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3787 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3791 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3792 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3793 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3794 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3795 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3798 else if ( inCaps.dwChannels == 1 ) {
\r
3799 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3800 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3801 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3802 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3803 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3804 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3805 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3806 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3808 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3809 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3814 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3815 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3817 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3818 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3821 else info.inputChannels = 0; // technically, this would be an error
\r
3825 if ( info.inputChannels == 0 ) return info;
\r
3827 // Copy the supported rates to the info structure but avoid duplication.
\r
3829 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3831 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3832 if ( rates[i] == info.sampleRates[j] ) {
\r
3837 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3839 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3841 // If device opens for both playback and capture, we determine the channels.
\r
3842 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3843 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3845 if ( device == 0 ) info.isDefaultInput = true;
\r
3847 // Copy name and return.
\r
3848 info.name = dsDevices[ device ].name;
\r
3849 info.probed = true;
\r
3853 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3854 unsigned int firstChannel, unsigned int sampleRate,
\r
3855 RtAudioFormat format, unsigned int *bufferSize,
\r
3856 RtAudio::StreamOptions *options )
\r
3858 if ( channels + firstChannel > 2 ) {
\r
3859 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3863 unsigned int nDevices = dsDevices.size();
\r
3864 if ( nDevices == 0 ) {
\r
3865 // This should not happen because a check is made before this function is called.
\r
3866 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3870 if ( device >= nDevices ) {
\r
3871 // This should not happen because a check is made before this function is called.
\r
3872 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3876 if ( mode == OUTPUT ) {
\r
3877 if ( dsDevices[ device ].validId[0] == false ) {
\r
3878 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3879 errorText_ = errorStream_.str();
\r
3883 else { // mode == INPUT
\r
3884 if ( dsDevices[ device ].validId[1] == false ) {
\r
3885 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3886 errorText_ = errorStream_.str();
\r
3891 // According to a note in PortAudio, using GetDesktopWindow()
\r
3892 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3893 // that occur when the application's window is not the foreground
\r
3894 // window. Also, if the application window closes before the
\r
3895 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3896 // problems when using GetDesktopWindow() but it seems fine now
\r
3897 // (January 2010). I'll leave it commented here.
\r
3898 // HWND hWnd = GetForegroundWindow();
\r
3899 HWND hWnd = GetDesktopWindow();
\r
3901 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3902 // two. This is a judgement call and a value of two is probably too
\r
3903 // low for capture, but it should work for playback.
\r
3905 if ( options ) nBuffers = options->numberOfBuffers;
\r
3906 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3907 if ( nBuffers < 2 ) nBuffers = 3;
\r
3909 // Check the lower range of the user-specified buffer size and set
\r
3910 // (arbitrarily) to a lower bound of 32.
\r
3911 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3913 // Create the wave format structure. The data format setting will
\r
3914 // be determined later.
\r
3915 WAVEFORMATEX waveFormat;
\r
3916 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3917 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3918 waveFormat.nChannels = channels + firstChannel;
\r
3919 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3921 // Determine the device buffer size. By default, we'll use the value
\r
3922 // defined above (32K), but we will grow it to make allowances for
\r
3923 // very large software buffer sizes.
\r
3924 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3925 DWORD dsPointerLeadTime = 0;
\r
3927 void *ohandle = 0, *bhandle = 0;
\r
3929 if ( mode == OUTPUT ) {
\r
3931 LPDIRECTSOUND output;
\r
3932 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3933 if ( FAILED( result ) ) {
\r
3934 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3935 errorText_ = errorStream_.str();
\r
3940 outCaps.dwSize = sizeof( outCaps );
\r
3941 result = output->GetCaps( &outCaps );
\r
3942 if ( FAILED( result ) ) {
\r
3943 output->Release();
\r
3944 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3945 errorText_ = errorStream_.str();
\r
3949 // Check channel information.
\r
3950 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3951 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3952 errorText_ = errorStream_.str();
\r
3956 // Check format information. Use 16-bit format unless not
\r
3957 // supported or user requests 8-bit.
\r
3958 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3959 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3960 waveFormat.wBitsPerSample = 16;
\r
3961 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3964 waveFormat.wBitsPerSample = 8;
\r
3965 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3967 stream_.userFormat = format;
\r
3969 // Update wave format structure and buffer information.
\r
3970 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3971 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3972 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3974 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3975 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3976 dsBufferSize *= 2;
\r
3978 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3979 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3980 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3981 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3982 if ( FAILED( result ) ) {
\r
3983 output->Release();
\r
3984 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3985 errorText_ = errorStream_.str();
\r
3989 // Even though we will write to the secondary buffer, we need to
\r
3990 // access the primary buffer to set the correct output format
\r
3991 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3992 // buffer description.
\r
3993 DSBUFFERDESC bufferDescription;
\r
3994 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3995 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3996 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3998 // Obtain the primary buffer
\r
3999 LPDIRECTSOUNDBUFFER buffer;
\r
4000 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4001 if ( FAILED( result ) ) {
\r
4002 output->Release();
\r
4003 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4004 errorText_ = errorStream_.str();
\r
4008 // Set the primary DS buffer sound format.
\r
4009 result = buffer->SetFormat( &waveFormat );
\r
4010 if ( FAILED( result ) ) {
\r
4011 output->Release();
\r
4012 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4013 errorText_ = errorStream_.str();
\r
4017 // Setup the secondary DS buffer description.
\r
4018 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4019 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4020 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4021 DSBCAPS_GLOBALFOCUS |
\r
4022 DSBCAPS_GETCURRENTPOSITION2 |
\r
4023 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4024 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4025 bufferDescription.lpwfxFormat = &waveFormat;
\r
4027 // Try to create the secondary DS buffer. If that doesn't work,
\r
4028 // try to use software mixing. Otherwise, there's a problem.
\r
4029 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4030 if ( FAILED( result ) ) {
\r
4031 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4032 DSBCAPS_GLOBALFOCUS |
\r
4033 DSBCAPS_GETCURRENTPOSITION2 |
\r
4034 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4035 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4036 if ( FAILED( result ) ) {
\r
4037 output->Release();
\r
4038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4039 errorText_ = errorStream_.str();
\r
4044 // Get the buffer size ... might be different from what we specified.
\r
4046 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4047 result = buffer->GetCaps( &dsbcaps );
\r
4048 if ( FAILED( result ) ) {
\r
4049 output->Release();
\r
4050 buffer->Release();
\r
4051 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4052 errorText_ = errorStream_.str();
\r
4056 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4058 // Lock the DS buffer
\r
4061 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4062 if ( FAILED( result ) ) {
\r
4063 output->Release();
\r
4064 buffer->Release();
\r
4065 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4066 errorText_ = errorStream_.str();
\r
4070 // Zero the DS buffer
\r
4071 ZeroMemory( audioPtr, dataLen );
\r
4073 // Unlock the DS buffer
\r
4074 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4075 if ( FAILED( result ) ) {
\r
4076 output->Release();
\r
4077 buffer->Release();
\r
4078 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4079 errorText_ = errorStream_.str();
\r
4083 ohandle = (void *) output;
\r
4084 bhandle = (void *) buffer;
\r
4087 if ( mode == INPUT ) {
\r
4089 LPDIRECTSOUNDCAPTURE input;
\r
4090 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4091 if ( FAILED( result ) ) {
\r
4092 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4093 errorText_ = errorStream_.str();
\r
4098 inCaps.dwSize = sizeof( inCaps );
\r
4099 result = input->GetCaps( &inCaps );
\r
4100 if ( FAILED( result ) ) {
\r
4102 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4103 errorText_ = errorStream_.str();
\r
4107 // Check channel information.
\r
4108 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4109 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4113 // Check format information. Use 16-bit format unless user
\r
4114 // requests 8-bit.
\r
4115 DWORD deviceFormats;
\r
4116 if ( channels + firstChannel == 2 ) {
\r
4117 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4118 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4119 waveFormat.wBitsPerSample = 8;
\r
4120 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4122 else { // assume 16-bit is supported
\r
4123 waveFormat.wBitsPerSample = 16;
\r
4124 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4127 else { // channel == 1
\r
4128 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4129 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4130 waveFormat.wBitsPerSample = 8;
\r
4131 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4133 else { // assume 16-bit is supported
\r
4134 waveFormat.wBitsPerSample = 16;
\r
4135 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4138 stream_.userFormat = format;
\r
4140 // Update wave format structure and buffer information.
\r
4141 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4142 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4143 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4145 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4146 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4147 dsBufferSize *= 2;
\r
4149 // Setup the secondary DS buffer description.
\r
4150 DSCBUFFERDESC bufferDescription;
\r
4151 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4152 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4153 bufferDescription.dwFlags = 0;
\r
4154 bufferDescription.dwReserved = 0;
\r
4155 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4156 bufferDescription.lpwfxFormat = &waveFormat;
\r
4158 // Create the capture buffer.
\r
4159 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4160 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4161 if ( FAILED( result ) ) {
\r
4163 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4164 errorText_ = errorStream_.str();
\r
4168 // Get the buffer size ... might be different from what we specified.
\r
4169 DSCBCAPS dscbcaps;
\r
4170 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4171 result = buffer->GetCaps( &dscbcaps );
\r
4172 if ( FAILED( result ) ) {
\r
4174 buffer->Release();
\r
4175 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4176 errorText_ = errorStream_.str();
\r
4180 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4182 // NOTE: We could have a problem here if this is a duplex stream
\r
4183 // and the play and capture hardware buffer sizes are different
\r
4184 // (I'm actually not sure if that is a problem or not).
\r
4185 // Currently, we are not verifying that.
\r
4187 // Lock the capture buffer
\r
4190 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4191 if ( FAILED( result ) ) {
\r
4193 buffer->Release();
\r
4194 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4195 errorText_ = errorStream_.str();
\r
4199 // Zero the buffer
\r
4200 ZeroMemory( audioPtr, dataLen );
\r
4202 // Unlock the buffer
\r
4203 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4204 if ( FAILED( result ) ) {
\r
4206 buffer->Release();
\r
4207 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4208 errorText_ = errorStream_.str();
\r
4212 ohandle = (void *) input;
\r
4213 bhandle = (void *) buffer;
\r
4216 // Set various stream parameters
\r
4217 DsHandle *handle = 0;
\r
4218 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4219 stream_.nUserChannels[mode] = channels;
\r
4220 stream_.bufferSize = *bufferSize;
\r
4221 stream_.channelOffset[mode] = firstChannel;
\r
4222 stream_.deviceInterleaved[mode] = true;
\r
4223 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4224 else stream_.userInterleaved = true;
\r
4226 // Set flag for buffer conversion
\r
4227 stream_.doConvertBuffer[mode] = false;
\r
4228 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4229 stream_.doConvertBuffer[mode] = true;
\r
4230 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4231 stream_.doConvertBuffer[mode] = true;
\r
4232 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4233 stream_.nUserChannels[mode] > 1 )
\r
4234 stream_.doConvertBuffer[mode] = true;
\r
4236 // Allocate necessary internal buffers
\r
4237 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4238 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4239 if ( stream_.userBuffer[mode] == NULL ) {
\r
4240 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4244 if ( stream_.doConvertBuffer[mode] ) {
\r
4246 bool makeBuffer = true;
\r
4247 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4248 if ( mode == INPUT ) {
\r
4249 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4250 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4251 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4255 if ( makeBuffer ) {
\r
4256 bufferBytes *= *bufferSize;
\r
4257 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4258 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4259 if ( stream_.deviceBuffer == NULL ) {
\r
4260 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4266 // Allocate our DsHandle structures for the stream.
\r
4267 if ( stream_.apiHandle == 0 ) {
\r
4269 handle = new DsHandle;
\r
4271 catch ( std::bad_alloc& ) {
\r
4272 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4276 // Create a manual-reset event.
\r
4277 handle->condition = CreateEvent( NULL, // no security
\r
4278 TRUE, // manual-reset
\r
4279 FALSE, // non-signaled initially
\r
4280 NULL ); // unnamed
\r
4281 stream_.apiHandle = (void *) handle;
\r
4284 handle = (DsHandle *) stream_.apiHandle;
\r
4285 handle->id[mode] = ohandle;
\r
4286 handle->buffer[mode] = bhandle;
\r
4287 handle->dsBufferSize[mode] = dsBufferSize;
\r
4288 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4290 stream_.device[mode] = device;
\r
4291 stream_.state = STREAM_STOPPED;
\r
4292 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4293 // We had already set up an output stream.
\r
4294 stream_.mode = DUPLEX;
\r
4296 stream_.mode = mode;
\r
4297 stream_.nBuffers = nBuffers;
\r
4298 stream_.sampleRate = sampleRate;
\r
4300 // Setup the buffer conversion information structure.
\r
4301 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4303 // Setup the callback thread.
\r
4304 if ( stream_.callbackInfo.isRunning == false ) {
\r
4305 unsigned threadId;
\r
4306 stream_.callbackInfo.isRunning = true;
\r
4307 stream_.callbackInfo.object = (void *) this;
\r
4308 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4309 &stream_.callbackInfo, 0, &threadId );
\r
4310 if ( stream_.callbackInfo.thread == 0 ) {
\r
4311 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4315 // Boost DS thread priority
\r
4316 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4322 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4323 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4324 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4325 if ( buffer ) buffer->Release();
\r
4326 object->Release();
\r
4328 if ( handle->buffer[1] ) {
\r
4329 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4330 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4331 if ( buffer ) buffer->Release();
\r
4332 object->Release();
\r
4334 CloseHandle( handle->condition );
\r
4336 stream_.apiHandle = 0;
\r
4339 for ( int i=0; i<2; i++ ) {
\r
4340 if ( stream_.userBuffer[i] ) {
\r
4341 free( stream_.userBuffer[i] );
\r
4342 stream_.userBuffer[i] = 0;
\r
4346 if ( stream_.deviceBuffer ) {
\r
4347 free( stream_.deviceBuffer );
\r
4348 stream_.deviceBuffer = 0;
\r
4351 stream_.state = STREAM_CLOSED;
\r
4355 void RtApiDs :: closeStream()
\r
4357 if ( stream_.state == STREAM_CLOSED ) {
\r
4358 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4359 error( RtError::WARNING );
\r
4363 // Stop the callback thread.
\r
4364 stream_.callbackInfo.isRunning = false;
\r
4365 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4366 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4368 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4370 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4371 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4372 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4375 buffer->Release();
\r
4377 object->Release();
\r
4379 if ( handle->buffer[1] ) {
\r
4380 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4381 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4384 buffer->Release();
\r
4386 object->Release();
\r
4388 CloseHandle( handle->condition );
\r
4390 stream_.apiHandle = 0;
\r
4393 for ( int i=0; i<2; i++ ) {
\r
4394 if ( stream_.userBuffer[i] ) {
\r
4395 free( stream_.userBuffer[i] );
\r
4396 stream_.userBuffer[i] = 0;
\r
4400 if ( stream_.deviceBuffer ) {
\r
4401 free( stream_.deviceBuffer );
\r
4402 stream_.deviceBuffer = 0;
\r
4405 stream_.mode = UNINITIALIZED;
\r
4406 stream_.state = STREAM_CLOSED;
\r
4409 void RtApiDs :: startStream()
\r
4412 if ( stream_.state == STREAM_RUNNING ) {
\r
4413 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4414 error( RtError::WARNING );
\r
4418 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4420 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4421 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4422 // this is already in effect.
\r
4423 timeBeginPeriod( 1 );
\r
4425 buffersRolling = false;
\r
4426 duplexPrerollBytes = 0;
\r
4428 if ( stream_.mode == DUPLEX ) {
\r
4429 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4430 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4433 HRESULT result = 0;
\r
4434 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4436 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4437 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4438 if ( FAILED( result ) ) {
\r
4439 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4440 errorText_ = errorStream_.str();
\r
4445 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4447 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4448 result = buffer->Start( DSCBSTART_LOOPING );
\r
4449 if ( FAILED( result ) ) {
\r
4450 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4451 errorText_ = errorStream_.str();
\r
4456 handle->drainCounter = 0;
\r
4457 handle->internalDrain = false;
\r
4458 ResetEvent( handle->condition );
\r
4459 stream_.state = STREAM_RUNNING;
\r
4462 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4465 void RtApiDs :: stopStream()
\r
4468 if ( stream_.state == STREAM_STOPPED ) {
\r
4469 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4470 error( RtError::WARNING );
\r
4474 HRESULT result = 0;
\r
4477 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4478 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4479 if ( handle->drainCounter == 0 ) {
\r
4480 handle->drainCounter = 2;
\r
4481 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4484 stream_.state = STREAM_STOPPED;
\r
4486 // Stop the buffer and clear memory
\r
4487 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4488 result = buffer->Stop();
\r
4489 if ( FAILED( result ) ) {
\r
4490 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4491 errorText_ = errorStream_.str();
\r
4495 // Lock the buffer and clear it so that if we start to play again,
\r
4496 // we won't have old data playing.
\r
4497 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4498 if ( FAILED( result ) ) {
\r
4499 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4500 errorText_ = errorStream_.str();
\r
4504 // Zero the DS buffer
\r
4505 ZeroMemory( audioPtr, dataLen );
\r
4507 // Unlock the DS buffer
\r
4508 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4509 if ( FAILED( result ) ) {
\r
4510 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4511 errorText_ = errorStream_.str();
\r
4515 // If we start playing again, we must begin at beginning of buffer.
\r
4516 handle->bufferPointer[0] = 0;
\r
4519 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4520 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4524 stream_.state = STREAM_STOPPED;
\r
4526 result = buffer->Stop();
\r
4527 if ( FAILED( result ) ) {
\r
4528 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4529 errorText_ = errorStream_.str();
\r
4533 // Lock the buffer and clear it so that if we start to play again,
\r
4534 // we won't have old data playing.
\r
4535 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4536 if ( FAILED( result ) ) {
\r
4537 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4538 errorText_ = errorStream_.str();
\r
4542 // Zero the DS buffer
\r
4543 ZeroMemory( audioPtr, dataLen );
\r
4545 // Unlock the DS buffer
\r
4546 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4547 if ( FAILED( result ) ) {
\r
4548 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4549 errorText_ = errorStream_.str();
\r
4553 // If we start recording again, we must begin at beginning of buffer.
\r
4554 handle->bufferPointer[1] = 0;
\r
4558 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4559 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4562 void RtApiDs :: abortStream()
\r
4565 if ( stream_.state == STREAM_STOPPED ) {
\r
4566 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4567 error( RtError::WARNING );
\r
4571 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4572 handle->drainCounter = 2;
\r
4577 void RtApiDs :: callbackEvent()
\r
4579 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4580 Sleep( 50 ); // sleep 50 milliseconds
\r
4584 if ( stream_.state == STREAM_CLOSED ) {
\r
4585 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4586 error( RtError::WARNING );
\r
4590 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4591 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4593 // Check if we were draining the stream and signal is finished.
\r
4594 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4596 stream_.state = STREAM_STOPPING;
\r
4597 if ( handle->internalDrain == false )
\r
4598 SetEvent( handle->condition );
\r
4604 // Invoke user callback to get fresh output data UNLESS we are
\r
4605 // draining stream.
\r
4606 if ( handle->drainCounter == 0 ) {
\r
4607 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4608 double streamTime = getStreamTime();
\r
4609 RtAudioStreamStatus status = 0;
\r
4610 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4611 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4612 handle->xrun[0] = false;
\r
4614 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4615 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4616 handle->xrun[1] = false;
\r
4618 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4619 stream_.bufferSize, streamTime, status, info->userData );
\r
4620 if ( cbReturnValue == 2 ) {
\r
4621 stream_.state = STREAM_STOPPING;
\r
4622 handle->drainCounter = 2;
\r
4626 else if ( cbReturnValue == 1 ) {
\r
4627 handle->drainCounter = 1;
\r
4628 handle->internalDrain = true;
\r
4633 DWORD currentWritePointer, safeWritePointer;
\r
4634 DWORD currentReadPointer, safeReadPointer;
\r
4635 UINT nextWritePointer;
\r
4637 LPVOID buffer1 = NULL;
\r
4638 LPVOID buffer2 = NULL;
\r
4639 DWORD bufferSize1 = 0;
\r
4640 DWORD bufferSize2 = 0;
\r
4645 if ( buffersRolling == false ) {
\r
4646 if ( stream_.mode == DUPLEX ) {
\r
4647 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4649 // It takes a while for the devices to get rolling. As a result,
\r
4650 // there's no guarantee that the capture and write device pointers
\r
4651 // will move in lockstep. Wait here for both devices to start
\r
4652 // rolling, and then set our buffer pointers accordingly.
\r
4653 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4654 // bytes later than the write buffer.
\r
4656 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4657 // take place between the two GetCurrentPosition calls... but I'm
\r
4658 // really not sure how to solve the problem. Temporarily boost to
\r
4659 // Realtime priority, maybe; but I'm not sure what priority the
\r
4660 // DirectSound service threads run at. We *should* be roughly
\r
4661 // within a ms or so of correct.
\r
4663 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4664 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4666 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4668 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4669 if ( FAILED( result ) ) {
\r
4670 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4671 errorText_ = errorStream_.str();
\r
4672 error( RtError::SYSTEM_ERROR );
\r
4675 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4676 if ( FAILED( result ) ) {
\r
4677 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4678 errorText_ = errorStream_.str();
\r
4679 error( RtError::SYSTEM_ERROR );
\r
4683 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4684 if ( FAILED( result ) ) {
\r
4685 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4686 errorText_ = errorStream_.str();
\r
4687 error( RtError::SYSTEM_ERROR );
\r
4690 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4691 if ( FAILED( result ) ) {
\r
4692 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4693 errorText_ = errorStream_.str();
\r
4694 error( RtError::SYSTEM_ERROR );
\r
4697 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4701 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4703 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4704 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4705 handle->bufferPointer[1] = safeReadPointer;
\r
4707 else if ( stream_.mode == OUTPUT ) {
\r
4709 // Set the proper nextWritePosition after initial startup.
\r
4710 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4711 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4712 if ( FAILED( result ) ) {
\r
4713 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4714 errorText_ = errorStream_.str();
\r
4715 error( RtError::SYSTEM_ERROR );
\r
4718 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4719 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4722 buffersRolling = true;
\r
4725 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4727 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4729 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4730 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4731 bufferBytes *= formatBytes( stream_.userFormat );
\r
4732 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4735 // Setup parameters and do buffer conversion if necessary.
\r
4736 if ( stream_.doConvertBuffer[0] ) {
\r
4737 buffer = stream_.deviceBuffer;
\r
4738 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4739 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4740 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4743 buffer = stream_.userBuffer[0];
\r
4744 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4745 bufferBytes *= formatBytes( stream_.userFormat );
\r
4748 // No byte swapping necessary in DirectSound implementation.
\r
4750 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4751 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4753 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4754 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4756 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4757 nextWritePointer = handle->bufferPointer[0];
\r
4759 DWORD endWrite, leadPointer;
\r
4761 // Find out where the read and "safe write" pointers are.
\r
4762 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4763 if ( FAILED( result ) ) {
\r
4764 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4765 errorText_ = errorStream_.str();
\r
4766 error( RtError::SYSTEM_ERROR );
\r
4770 // We will copy our output buffer into the region between
\r
4771 // safeWritePointer and leadPointer. If leadPointer is not
\r
4772 // beyond the next endWrite position, wait until it is.
\r
4773 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4774 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4775 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4776 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4777 endWrite = nextWritePointer + bufferBytes;
\r
4779 // Check whether the entire write region is behind the play pointer.
\r
4780 if ( leadPointer >= endWrite ) break;
\r
4782 // If we are here, then we must wait until the leadPointer advances
\r
4783 // beyond the end of our next write region. We use the
\r
4784 // Sleep() function to suspend operation until that happens.
\r
4785 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4786 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4787 if ( millis < 1.0 ) millis = 1.0;
\r
4788 Sleep( (DWORD) millis );
\r
4791 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4792 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4793 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4794 handle->xrun[0] = true;
\r
4795 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4796 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4797 handle->bufferPointer[0] = nextWritePointer;
\r
4798 endWrite = nextWritePointer + bufferBytes;
\r
4801 // Lock free space in the buffer
\r
4802 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4803 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4804 if ( FAILED( result ) ) {
\r
4805 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4806 errorText_ = errorStream_.str();
\r
4807 error( RtError::SYSTEM_ERROR );
\r
4811 // Copy our buffer into the DS buffer
\r
4812 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4813 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4815 // Update our buffer offset and unlock sound buffer
\r
4816 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4817 if ( FAILED( result ) ) {
\r
4818 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4819 errorText_ = errorStream_.str();
\r
4820 error( RtError::SYSTEM_ERROR );
\r
4823 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4824 handle->bufferPointer[0] = nextWritePointer;
\r
4826 if ( handle->drainCounter ) {
\r
4827 handle->drainCounter++;
\r
4832 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4834 // Setup parameters.
\r
4835 if ( stream_.doConvertBuffer[1] ) {
\r
4836 buffer = stream_.deviceBuffer;
\r
4837 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4838 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4841 buffer = stream_.userBuffer[1];
\r
4842 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4843 bufferBytes *= formatBytes( stream_.userFormat );
\r
4846 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4847 long nextReadPointer = handle->bufferPointer[1];
\r
4848 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4850 // Find out where the write and "safe read" pointers are.
\r
4851 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4852 if ( FAILED( result ) ) {
\r
4853 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4854 errorText_ = errorStream_.str();
\r
4855 error( RtError::SYSTEM_ERROR );
\r
4859 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4860 DWORD endRead = nextReadPointer + bufferBytes;
\r
4862 // Handling depends on whether we are INPUT or DUPLEX.
\r
4863 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4864 // then a wait here will drag the write pointers into the forbidden zone.
\r
4866 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4867 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4868 // practical way to sync up the read and write pointers reliably, given the
\r
4869 // the very complex relationship between phase and increment of the read and write
\r
4872 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4873 // provide a pre-roll period of 0.5 seconds in which we return
\r
4874 // zeros from the read buffer while the pointers sync up.
\r
4876 if ( stream_.mode == DUPLEX ) {
\r
4877 if ( safeReadPointer < endRead ) {
\r
4878 if ( duplexPrerollBytes <= 0 ) {
\r
4879 // Pre-roll time over. Be more agressive.
\r
4880 int adjustment = endRead-safeReadPointer;
\r
4882 handle->xrun[1] = true;
\r
4884 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4885 // and perform fine adjustments later.
\r
4886 // - small adjustments: back off by twice as much.
\r
4887 if ( adjustment >= 2*bufferBytes )
\r
4888 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4890 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4892 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4896 // In pre=roll time. Just do it.
\r
4897 nextReadPointer = safeReadPointer - bufferBytes;
\r
4898 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4900 endRead = nextReadPointer + bufferBytes;
\r
4903 else { // mode == INPUT
\r
4904 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4905 // See comments for playback.
\r
4906 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4907 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4908 if ( millis < 1.0 ) millis = 1.0;
\r
4909 Sleep( (DWORD) millis );
\r
4911 // Wake up and find out where we are now.
\r
4912 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4913 if ( FAILED( result ) ) {
\r
4914 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4915 errorText_ = errorStream_.str();
\r
4916 error( RtError::SYSTEM_ERROR );
\r
4920 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4924 // Lock free space in the buffer
\r
4925 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4926 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4927 if ( FAILED( result ) ) {
\r
4928 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4929 errorText_ = errorStream_.str();
\r
4930 error( RtError::SYSTEM_ERROR );
\r
4934 if ( duplexPrerollBytes <= 0 ) {
\r
4935 // Copy our buffer into the DS buffer
\r
4936 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4937 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4940 memset( buffer, 0, bufferSize1 );
\r
4941 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4942 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4945 // Update our buffer offset and unlock sound buffer
\r
4946 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4947 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4948 if ( FAILED( result ) ) {
\r
4949 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4950 errorText_ = errorStream_.str();
\r
4951 error( RtError::SYSTEM_ERROR );
\r
4954 handle->bufferPointer[1] = nextReadPointer;
\r
4956 // No byte swapping necessary in DirectSound implementation.
\r
4958 // If necessary, convert 8-bit data from unsigned to signed.
\r
4959 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4960 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4962 // Do buffer conversion if necessary.
\r
4963 if ( stream_.doConvertBuffer[1] )
\r
4964 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4968 RtApi::tickStreamTime();
\r
4971 // Definitions for utility functions and callbacks
\r
4972 // specific to the DirectSound implementation.
\r
4974 static unsigned __stdcall callbackHandler( void *ptr )
\r
4976 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4977 RtApiDs *object = (RtApiDs *) info->object;
\r
4978 bool* isRunning = &info->isRunning;
\r
4980 while ( *isRunning == true ) {
\r
4981 object->callbackEvent();
\r
4984 _endthreadex( 0 );
\r
4988 #include "tchar.h"
\r
4990 static std::string convertTChar( LPCTSTR name )
\r
4992 #if defined( UNICODE ) || defined( _UNICODE )
\r
4993 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4994 std::string s( length-1, '\0' );
\r
4995 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
4997 std::string s( name );
\r
5003 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5004 LPCTSTR description,
\r
5006 LPVOID lpContext )
\r
5008 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
5009 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
5012 bool validDevice = false;
\r
5013 if ( probeInfo.isInput == true ) {
\r
5015 LPDIRECTSOUNDCAPTURE object;
\r
5017 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5018 if ( hr != DS_OK ) return TRUE;
\r
5020 caps.dwSize = sizeof(caps);
\r
5021 hr = object->GetCaps( &caps );
\r
5022 if ( hr == DS_OK ) {
\r
5023 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5024 validDevice = true;
\r
5026 object->Release();
\r
5030 LPDIRECTSOUND object;
\r
5031 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5032 if ( hr != DS_OK ) return TRUE;
\r
5034 caps.dwSize = sizeof(caps);
\r
5035 hr = object->GetCaps( &caps );
\r
5036 if ( hr == DS_OK ) {
\r
5037 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5038 validDevice = true;
\r
5040 object->Release();
\r
5043 // If good device, then save its name and guid.
\r
5044 std::string name = convertTChar( description );
\r
5045 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5046 if ( lpguid == NULL )
\r
5047 name = "Default Device";
\r
5048 if ( validDevice ) {
\r
5049 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5050 if ( dsDevices[i].name == name ) {
\r
5051 dsDevices[i].found = true;
\r
5052 if ( probeInfo.isInput ) {
\r
5053 dsDevices[i].id[1] = lpguid;
\r
5054 dsDevices[i].validId[1] = true;
\r
5057 dsDevices[i].id[0] = lpguid;
\r
5058 dsDevices[i].validId[0] = true;
\r
5065 device.name = name;
\r
5066 device.found = true;
\r
5067 if ( probeInfo.isInput ) {
\r
5068 device.id[1] = lpguid;
\r
5069 device.validId[1] = true;
\r
5072 device.id[0] = lpguid;
\r
5073 device.validId[0] = true;
\r
5075 dsDevices.push_back( device );
\r
5081 static const char* getErrorString( int code )
\r
5085 case DSERR_ALLOCATED:
\r
5086 return "Already allocated";
\r
5088 case DSERR_CONTROLUNAVAIL:
\r
5089 return "Control unavailable";
\r
5091 case DSERR_INVALIDPARAM:
\r
5092 return "Invalid parameter";
\r
5094 case DSERR_INVALIDCALL:
\r
5095 return "Invalid call";
\r
5097 case DSERR_GENERIC:
\r
5098 return "Generic error";
\r
5100 case DSERR_PRIOLEVELNEEDED:
\r
5101 return "Priority level needed";
\r
5103 case DSERR_OUTOFMEMORY:
\r
5104 return "Out of memory";
\r
5106 case DSERR_BADFORMAT:
\r
5107 return "The sample rate or the channel format is not supported";
\r
5109 case DSERR_UNSUPPORTED:
\r
5110 return "Not supported";
\r
5112 case DSERR_NODRIVER:
\r
5113 return "No driver";
\r
5115 case DSERR_ALREADYINITIALIZED:
\r
5116 return "Already initialized";
\r
5118 case DSERR_NOAGGREGATION:
\r
5119 return "No aggregation";
\r
5121 case DSERR_BUFFERLOST:
\r
5122 return "Buffer lost";
\r
5124 case DSERR_OTHERAPPHASPRIO:
\r
5125 return "Another application already has priority";
\r
5127 case DSERR_UNINITIALIZED:
\r
5128 return "Uninitialized";
\r
5131 return "DirectSound unknown error";
\r
5134 //******************** End of __WINDOWS_DS__ *********************//
\r
5138 #if defined(__LINUX_ALSA__)
\r
5140 #include <alsa/asoundlib.h>
\r
5141 #include <unistd.h>
\r
5143 // A structure to hold various information related to the ALSA API
\r
5144 // implementation.
\r
5145 struct AlsaHandle {
\r
5146 snd_pcm_t *handles[2];
\r
5147 bool synchronized;
\r
5149 pthread_cond_t runnable_cv;
\r
5153 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5156 static void *alsaCallbackHandler( void * ptr );
\r
5158 RtApiAlsa :: RtApiAlsa()
\r
5160 // Nothing to do here.
\r
5163 RtApiAlsa :: ~RtApiAlsa()
\r
5165 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5168 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5170 unsigned nDevices = 0;
\r
5171 int result, subdevice, card;
\r
5173 snd_ctl_t *handle;
\r
5175 // Count cards and devices
\r
5177 snd_card_next( &card );
\r
5178 while ( card >= 0 ) {
\r
5179 sprintf( name, "hw:%d", card );
\r
5180 result = snd_ctl_open( &handle, name, 0 );
\r
5181 if ( result < 0 ) {
\r
5182 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5183 errorText_ = errorStream_.str();
\r
5184 error( RtError::WARNING );
\r
5189 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5190 if ( result < 0 ) {
\r
5191 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5192 errorText_ = errorStream_.str();
\r
5193 error( RtError::WARNING );
\r
5196 if ( subdevice < 0 )
\r
5201 snd_ctl_close( handle );
\r
5202 snd_card_next( &card );
\r
5205 result = snd_ctl_open( &handle, "default", 0 );
\r
5206 if (result == 0) {
\r
5208 snd_ctl_close( handle );
\r
5214 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5216 RtAudio::DeviceInfo info;
\r
5217 info.probed = false;
\r
5219 unsigned nDevices = 0;
\r
5220 int result, subdevice, card;
\r
5222 snd_ctl_t *chandle;
\r
5224 // Count cards and devices
\r
5226 snd_card_next( &card );
\r
5227 while ( card >= 0 ) {
\r
5228 sprintf( name, "hw:%d", card );
\r
5229 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5230 if ( result < 0 ) {
\r
5231 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5232 errorText_ = errorStream_.str();
\r
5233 error( RtError::WARNING );
\r
5238 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5239 if ( result < 0 ) {
\r
5240 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5241 errorText_ = errorStream_.str();
\r
5242 error( RtError::WARNING );
\r
5245 if ( subdevice < 0 ) break;
\r
5246 if ( nDevices == device ) {
\r
5247 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5253 snd_ctl_close( chandle );
\r
5254 snd_card_next( &card );
\r
5257 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5258 if ( result == 0 ) {
\r
5259 if ( nDevices == device ) {
\r
5260 strcpy( name, "default" );
\r
5266 if ( nDevices == 0 ) {
\r
5267 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5268 error( RtError::INVALID_USE );
\r
5272 if ( device >= nDevices ) {
\r
5273 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5274 error( RtError::INVALID_USE );
\r
5280 // If a stream is already open, we cannot probe the stream devices.
\r
5281 // Thus, use the saved results.
\r
5282 if ( stream_.state != STREAM_CLOSED &&
\r
5283 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5284 snd_ctl_close( chandle );
\r
5285 if ( device >= devices_.size() ) {
\r
5286 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5287 error( RtError::WARNING );
\r
5290 return devices_[ device ];
\r
5293 int openMode = SND_PCM_ASYNC;
\r
5294 snd_pcm_stream_t stream;
\r
5295 snd_pcm_info_t *pcminfo;
\r
5296 snd_pcm_info_alloca( &pcminfo );
\r
5297 snd_pcm_t *phandle;
\r
5298 snd_pcm_hw_params_t *params;
\r
5299 snd_pcm_hw_params_alloca( ¶ms );
\r
5301 // First try for playback unless default device (which has subdev -1)
\r
5302 stream = SND_PCM_STREAM_PLAYBACK;
\r
5303 snd_pcm_info_set_stream( pcminfo, stream );
\r
5304 if ( subdevice != -1 ) {
\r
5305 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5306 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5308 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5309 if ( result < 0 ) {
\r
5310 // Device probably doesn't support playback.
\r
5311 goto captureProbe;
\r
5315 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5316 if ( result < 0 ) {
\r
5317 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5318 errorText_ = errorStream_.str();
\r
5319 error( RtError::WARNING );
\r
5320 goto captureProbe;
\r
5323 // The device is open ... fill the parameter structure.
\r
5324 result = snd_pcm_hw_params_any( phandle, params );
\r
5325 if ( result < 0 ) {
\r
5326 snd_pcm_close( phandle );
\r
5327 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5328 errorText_ = errorStream_.str();
\r
5329 error( RtError::WARNING );
\r
5330 goto captureProbe;
\r
5333 // Get output channel information.
\r
5334 unsigned int value;
\r
5335 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5336 if ( result < 0 ) {
\r
5337 snd_pcm_close( phandle );
\r
5338 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5339 errorText_ = errorStream_.str();
\r
5340 error( RtError::WARNING );
\r
5341 goto captureProbe;
\r
5343 info.outputChannels = value;
\r
5344 snd_pcm_close( phandle );
\r
5347 stream = SND_PCM_STREAM_CAPTURE;
\r
5348 snd_pcm_info_set_stream( pcminfo, stream );
\r
5350 // Now try for capture unless default device (with subdev = -1)
\r
5351 if ( subdevice != -1 ) {
\r
5352 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5353 snd_ctl_close( chandle );
\r
5354 if ( result < 0 ) {
\r
5355 // Device probably doesn't support capture.
\r
5356 if ( info.outputChannels == 0 ) return info;
\r
5357 goto probeParameters;
\r
5361 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5362 if ( result < 0 ) {
\r
5363 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5364 errorText_ = errorStream_.str();
\r
5365 error( RtError::WARNING );
\r
5366 if ( info.outputChannels == 0 ) return info;
\r
5367 goto probeParameters;
\r
5370 // The device is open ... fill the parameter structure.
\r
5371 result = snd_pcm_hw_params_any( phandle, params );
\r
5372 if ( result < 0 ) {
\r
5373 snd_pcm_close( phandle );
\r
5374 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5375 errorText_ = errorStream_.str();
\r
5376 error( RtError::WARNING );
\r
5377 if ( info.outputChannels == 0 ) return info;
\r
5378 goto probeParameters;
\r
5381 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5382 if ( result < 0 ) {
\r
5383 snd_pcm_close( phandle );
\r
5384 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5385 errorText_ = errorStream_.str();
\r
5386 error( RtError::WARNING );
\r
5387 if ( info.outputChannels == 0 ) return info;
\r
5388 goto probeParameters;
\r
5390 info.inputChannels = value;
\r
5391 snd_pcm_close( phandle );
\r
5393 // If device opens for both playback and capture, we determine the channels.
\r
5394 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5395 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5397 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5398 if ( device == 0 && info.outputChannels > 0 )
\r
5399 info.isDefaultOutput = true;
\r
5400 if ( device == 0 && info.inputChannels > 0 )
\r
5401 info.isDefaultInput = true;
\r
5404 // At this point, we just need to figure out the supported data
\r
5405 // formats and sample rates. We'll proceed by opening the device in
\r
5406 // the direction with the maximum number of channels, or playback if
\r
5407 // they are equal. This might limit our sample rate options, but so
\r
5410 if ( info.outputChannels >= info.inputChannels )
\r
5411 stream = SND_PCM_STREAM_PLAYBACK;
\r
5413 stream = SND_PCM_STREAM_CAPTURE;
\r
5414 snd_pcm_info_set_stream( pcminfo, stream );
\r
5416 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5417 if ( result < 0 ) {
\r
5418 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5419 errorText_ = errorStream_.str();
\r
5420 error( RtError::WARNING );
\r
5424 // The device is open ... fill the parameter structure.
\r
5425 result = snd_pcm_hw_params_any( phandle, params );
\r
5426 if ( result < 0 ) {
\r
5427 snd_pcm_close( phandle );
\r
5428 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5429 errorText_ = errorStream_.str();
\r
5430 error( RtError::WARNING );
\r
5434 // Test our discrete set of sample rate values.
\r
5435 info.sampleRates.clear();
\r
5436 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5437 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5438 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5440 if ( info.sampleRates.size() == 0 ) {
\r
5441 snd_pcm_close( phandle );
\r
5442 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5443 errorText_ = errorStream_.str();
\r
5444 error( RtError::WARNING );
\r
5448 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5449 snd_pcm_format_t format;
\r
5450 info.nativeFormats = 0;
\r
5451 format = SND_PCM_FORMAT_S8;
\r
5452 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5453 info.nativeFormats |= RTAUDIO_SINT8;
\r
5454 format = SND_PCM_FORMAT_S16;
\r
5455 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5456 info.nativeFormats |= RTAUDIO_SINT16;
\r
5457 format = SND_PCM_FORMAT_S24;
\r
5458 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5459 info.nativeFormats |= RTAUDIO_SINT24;
\r
5460 format = SND_PCM_FORMAT_S32;
\r
5461 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5462 info.nativeFormats |= RTAUDIO_SINT32;
\r
5463 format = SND_PCM_FORMAT_FLOAT;
\r
5464 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5465 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5466 format = SND_PCM_FORMAT_FLOAT64;
\r
5467 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5468 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5470 // Check that we have at least one supported format
\r
5471 if ( info.nativeFormats == 0 ) {
\r
5472 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5473 errorText_ = errorStream_.str();
\r
5474 error( RtError::WARNING );
\r
5478 // Get the device name
\r
5480 result = snd_card_get_name( card, &cardname );
\r
5481 if ( result >= 0 )
\r
5482 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5485 // That's all ... close the device and return
\r
5486 snd_pcm_close( phandle );
\r
5487 info.probed = true;
\r
5491 void RtApiAlsa :: saveDeviceInfo( void )
\r
5495 unsigned int nDevices = getDeviceCount();
\r
5496 devices_.resize( nDevices );
\r
5497 for ( unsigned int i=0; i<nDevices; i++ )
\r
5498 devices_[i] = getDeviceInfo( i );
\r
5501 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5502 unsigned int firstChannel, unsigned int sampleRate,
\r
5503 RtAudioFormat format, unsigned int *bufferSize,
\r
5504 RtAudio::StreamOptions *options )
\r
5507 #if defined(__RTAUDIO_DEBUG__)
\r
5508 snd_output_t *out;
\r
5509 snd_output_stdio_attach(&out, stderr, 0);
\r
5512 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5514 unsigned nDevices = 0;
\r
5515 int result, subdevice, card;
\r
5517 snd_ctl_t *chandle;
\r
5519 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5520 snprintf(name, sizeof(name), "%s", "default");
\r
5522 // Count cards and devices
\r
5524 snd_card_next( &card );
\r
5525 while ( card >= 0 ) {
\r
5526 sprintf( name, "hw:%d", card );
\r
5527 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5528 if ( result < 0 ) {
\r
5529 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5530 errorText_ = errorStream_.str();
\r
5535 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5536 if ( result < 0 ) break;
\r
5537 if ( subdevice < 0 ) break;
\r
5538 if ( nDevices == device ) {
\r
5539 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5540 snd_ctl_close( chandle );
\r
5545 snd_ctl_close( chandle );
\r
5546 snd_card_next( &card );
\r
5549 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5550 if ( result == 0 ) {
\r
5551 if ( nDevices == device ) {
\r
5552 strcpy( name, "default" );
\r
5558 if ( nDevices == 0 ) {
\r
5559 // This should not happen because a check is made before this function is called.
\r
5560 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5564 if ( device >= nDevices ) {
\r
5565 // This should not happen because a check is made before this function is called.
\r
5566 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5573 // The getDeviceInfo() function will not work for a device that is
\r
5574 // already open. Thus, we'll probe the system before opening a
\r
5575 // stream and save the results for use by getDeviceInfo().
\r
5576 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5577 this->saveDeviceInfo();
\r
5579 snd_pcm_stream_t stream;
\r
5580 if ( mode == OUTPUT )
\r
5581 stream = SND_PCM_STREAM_PLAYBACK;
\r
5583 stream = SND_PCM_STREAM_CAPTURE;
\r
5585 snd_pcm_t *phandle;
\r
5586 int openMode = SND_PCM_ASYNC;
\r
5587 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5588 if ( result < 0 ) {
\r
5589 if ( mode == OUTPUT )
\r
5590 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5592 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5593 errorText_ = errorStream_.str();
\r
5597 // Fill the parameter structure.
\r
5598 snd_pcm_hw_params_t *hw_params;
\r
5599 snd_pcm_hw_params_alloca( &hw_params );
\r
5600 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5601 if ( result < 0 ) {
\r
5602 snd_pcm_close( phandle );
\r
5603 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5604 errorText_ = errorStream_.str();
\r
5608 #if defined(__RTAUDIO_DEBUG__)
\r
5609 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5610 snd_pcm_hw_params_dump( hw_params, out );
\r
5613 // Set access ... check user preference.
\r
5614 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5615 stream_.userInterleaved = false;
\r
5616 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5617 if ( result < 0 ) {
\r
5618 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5619 stream_.deviceInterleaved[mode] = true;
\r
5622 stream_.deviceInterleaved[mode] = false;
\r
5625 stream_.userInterleaved = true;
\r
5626 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5627 if ( result < 0 ) {
\r
5628 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5629 stream_.deviceInterleaved[mode] = false;
\r
5632 stream_.deviceInterleaved[mode] = true;
\r
5635 if ( result < 0 ) {
\r
5636 snd_pcm_close( phandle );
\r
5637 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5638 errorText_ = errorStream_.str();
\r
5642 // Determine how to set the device format.
\r
5643 stream_.userFormat = format;
\r
5644 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5646 if ( format == RTAUDIO_SINT8 )
\r
5647 deviceFormat = SND_PCM_FORMAT_S8;
\r
5648 else if ( format == RTAUDIO_SINT16 )
\r
5649 deviceFormat = SND_PCM_FORMAT_S16;
\r
5650 else if ( format == RTAUDIO_SINT24 )
\r
5651 deviceFormat = SND_PCM_FORMAT_S24;
\r
5652 else if ( format == RTAUDIO_SINT32 )
\r
5653 deviceFormat = SND_PCM_FORMAT_S32;
\r
5654 else if ( format == RTAUDIO_FLOAT32 )
\r
5655 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5656 else if ( format == RTAUDIO_FLOAT64 )
\r
5657 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5659 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5660 stream_.deviceFormat[mode] = format;
\r
5664 // The user requested format is not natively supported by the device.
\r
5665 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5666 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5667 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5671 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5672 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5673 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5677 deviceFormat = SND_PCM_FORMAT_S32;
\r
5678 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5679 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5683 deviceFormat = SND_PCM_FORMAT_S24;
\r
5684 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5685 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5689 deviceFormat = SND_PCM_FORMAT_S16;
\r
5690 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5691 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5695 deviceFormat = SND_PCM_FORMAT_S8;
\r
5696 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5697 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5701 // If we get here, no supported format was found.
\r
5702 snd_pcm_close( phandle );
\r
5703 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5704 errorText_ = errorStream_.str();
\r
5708 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5709 if ( result < 0 ) {
\r
5710 snd_pcm_close( phandle );
\r
5711 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5712 errorText_ = errorStream_.str();
\r
5716 // Determine whether byte-swaping is necessary.
\r
5717 stream_.doByteSwap[mode] = false;
\r
5718 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5719 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5720 if ( result == 0 )
\r
5721 stream_.doByteSwap[mode] = true;
\r
5722 else if (result < 0) {
\r
5723 snd_pcm_close( phandle );
\r
5724 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5725 errorText_ = errorStream_.str();
\r
5730 // Set the sample rate.
\r
5731 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5732 if ( result < 0 ) {
\r
5733 snd_pcm_close( phandle );
\r
5734 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5735 errorText_ = errorStream_.str();
\r
5739 // Determine the number of channels for this device. We support a possible
\r
5740 // minimum device channel number > than the value requested by the user.
\r
5741 stream_.nUserChannels[mode] = channels;
\r
5742 unsigned int value;
\r
5743 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5744 unsigned int deviceChannels = value;
\r
5745 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5746 snd_pcm_close( phandle );
\r
5747 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5748 errorText_ = errorStream_.str();
\r
5752 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5753 if ( result < 0 ) {
\r
5754 snd_pcm_close( phandle );
\r
5755 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5756 errorText_ = errorStream_.str();
\r
5759 deviceChannels = value;
\r
5760 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5761 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5763 // Set the device channels.
\r
5764 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5765 if ( result < 0 ) {
\r
5766 snd_pcm_close( phandle );
\r
5767 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5768 errorText_ = errorStream_.str();
\r
5772 // Set the buffer (or period) size.
\r
5774 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5775 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5776 if ( result < 0 ) {
\r
5777 snd_pcm_close( phandle );
\r
5778 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5779 errorText_ = errorStream_.str();
\r
5782 *bufferSize = periodSize;
\r
5784 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5785 unsigned int periods = 0;
\r
5786 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5787 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5788 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5789 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5790 if ( result < 0 ) {
\r
5791 snd_pcm_close( phandle );
\r
5792 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5793 errorText_ = errorStream_.str();
\r
5797 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5798 // MUST be the same in both directions!
\r
5799 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5800 snd_pcm_close( phandle );
\r
5801 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5802 errorText_ = errorStream_.str();
\r
5806 stream_.bufferSize = *bufferSize;
\r
5808 // Install the hardware configuration
\r
5809 result = snd_pcm_hw_params( phandle, hw_params );
\r
5810 if ( result < 0 ) {
\r
5811 snd_pcm_close( phandle );
\r
5812 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5813 errorText_ = errorStream_.str();
\r
5817 #if defined(__RTAUDIO_DEBUG__)
\r
5818 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5819 snd_pcm_hw_params_dump( hw_params, out );
\r
5822 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5823 snd_pcm_sw_params_t *sw_params = NULL;
\r
5824 snd_pcm_sw_params_alloca( &sw_params );
\r
5825 snd_pcm_sw_params_current( phandle, sw_params );
\r
5826 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5827 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5828 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5830 // The following two settings were suggested by Theo Veenker
\r
5831 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5832 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5834 // here are two options for a fix
\r
5835 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5836 snd_pcm_uframes_t val;
\r
5837 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5838 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5840 result = snd_pcm_sw_params( phandle, sw_params );
\r
5841 if ( result < 0 ) {
\r
5842 snd_pcm_close( phandle );
\r
5843 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5844 errorText_ = errorStream_.str();
\r
5848 #if defined(__RTAUDIO_DEBUG__)
\r
5849 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5850 snd_pcm_sw_params_dump( sw_params, out );
\r
5853 // Set flags for buffer conversion
\r
5854 stream_.doConvertBuffer[mode] = false;
\r
5855 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5856 stream_.doConvertBuffer[mode] = true;
\r
5857 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5858 stream_.doConvertBuffer[mode] = true;
\r
5859 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5860 stream_.nUserChannels[mode] > 1 )
\r
5861 stream_.doConvertBuffer[mode] = true;
\r
5863 // Allocate the ApiHandle if necessary and then save.
\r
5864 AlsaHandle *apiInfo = 0;
\r
5865 if ( stream_.apiHandle == 0 ) {
\r
5867 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5869 catch ( std::bad_alloc& ) {
\r
5870 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5874 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5875 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5879 stream_.apiHandle = (void *) apiInfo;
\r
5880 apiInfo->handles[0] = 0;
\r
5881 apiInfo->handles[1] = 0;
\r
5884 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5886 apiInfo->handles[mode] = phandle;
\r
5889 // Allocate necessary internal buffers.
\r
5890 unsigned long bufferBytes;
\r
5891 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5892 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5893 if ( stream_.userBuffer[mode] == NULL ) {
\r
5894 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5898 if ( stream_.doConvertBuffer[mode] ) {
\r
5900 bool makeBuffer = true;
\r
5901 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5902 if ( mode == INPUT ) {
\r
5903 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5904 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5905 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5909 if ( makeBuffer ) {
\r
5910 bufferBytes *= *bufferSize;
\r
5911 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5912 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5913 if ( stream_.deviceBuffer == NULL ) {
\r
5914 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5920 stream_.sampleRate = sampleRate;
\r
5921 stream_.nBuffers = periods;
\r
5922 stream_.device[mode] = device;
\r
5923 stream_.state = STREAM_STOPPED;
\r
5925 // Setup the buffer conversion information structure.
\r
5926 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5928 // Setup thread if necessary.
\r
5929 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5930 // We had already set up an output stream.
\r
5931 stream_.mode = DUPLEX;
\r
5932 // Link the streams if possible.
\r
5933 apiInfo->synchronized = false;
\r
5934 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5935 apiInfo->synchronized = true;
\r
5937 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5938 error( RtError::WARNING );
\r
5942 stream_.mode = mode;
\r
5944 // Setup callback thread.
\r
5945 stream_.callbackInfo.object = (void *) this;
\r
5947 // Set the thread attributes for joinable and realtime scheduling
\r
5948 // priority (optional). The higher priority will only take affect
\r
5949 // if the program is run as root or suid. Note, under Linux
\r
5950 // processes with CAP_SYS_NICE privilege, a user can change
\r
5951 // scheduling policy and priority (thus need not be root). See
\r
5952 // POSIX "capabilities".
\r
5953 pthread_attr_t attr;
\r
5954 pthread_attr_init( &attr );
\r
5955 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5957 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5958 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5959 // We previously attempted to increase the audio callback priority
\r
5960 // to SCHED_RR here via the attributes. However, while no errors
\r
5961 // were reported in doing so, it did not work. So, now this is
\r
5962 // done in the alsaCallbackHandler function.
\r
5963 stream_.callbackInfo.doRealtime = true;
\r
5964 int priority = options->priority;
\r
5965 int min = sched_get_priority_min( SCHED_RR );
\r
5966 int max = sched_get_priority_max( SCHED_RR );
\r
5967 if ( priority < min ) priority = min;
\r
5968 else if ( priority > max ) priority = max;
\r
5969 stream_.callbackInfo.priority = priority;
\r
5973 stream_.callbackInfo.isRunning = true;
\r
5974 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5975 pthread_attr_destroy( &attr );
\r
5977 stream_.callbackInfo.isRunning = false;
\r
5978 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5987 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5988 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5989 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5991 stream_.apiHandle = 0;
\r
5994 if ( phandle) snd_pcm_close( phandle );
\r
5996 for ( int i=0; i<2; i++ ) {
\r
5997 if ( stream_.userBuffer[i] ) {
\r
5998 free( stream_.userBuffer[i] );
\r
5999 stream_.userBuffer[i] = 0;
\r
6003 if ( stream_.deviceBuffer ) {
\r
6004 free( stream_.deviceBuffer );
\r
6005 stream_.deviceBuffer = 0;
\r
6008 stream_.state = STREAM_CLOSED;
\r
6012 void RtApiAlsa :: closeStream()
\r
6014 if ( stream_.state == STREAM_CLOSED ) {
\r
6015 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6016 error( RtError::WARNING );
\r
6020 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6021 stream_.callbackInfo.isRunning = false;
\r
6022 MUTEX_LOCK( &stream_.mutex );
\r
6023 if ( stream_.state == STREAM_STOPPED ) {
\r
6024 apiInfo->runnable = true;
\r
6025 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6027 MUTEX_UNLOCK( &stream_.mutex );
\r
6028 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6030 if ( stream_.state == STREAM_RUNNING ) {
\r
6031 stream_.state = STREAM_STOPPED;
\r
6032 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6033 snd_pcm_drop( apiInfo->handles[0] );
\r
6034 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6035 snd_pcm_drop( apiInfo->handles[1] );
\r
6039 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6040 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6041 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6043 stream_.apiHandle = 0;
\r
6046 for ( int i=0; i<2; i++ ) {
\r
6047 if ( stream_.userBuffer[i] ) {
\r
6048 free( stream_.userBuffer[i] );
\r
6049 stream_.userBuffer[i] = 0;
\r
6053 if ( stream_.deviceBuffer ) {
\r
6054 free( stream_.deviceBuffer );
\r
6055 stream_.deviceBuffer = 0;
\r
6058 stream_.mode = UNINITIALIZED;
\r
6059 stream_.state = STREAM_CLOSED;
\r
6062 void RtApiAlsa :: startStream()
\r
6064 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6067 if ( stream_.state == STREAM_RUNNING ) {
\r
6068 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6069 error( RtError::WARNING );
\r
6073 MUTEX_LOCK( &stream_.mutex );
\r
6076 snd_pcm_state_t state;
\r
6077 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6078 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6079 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6080 state = snd_pcm_state( handle[0] );
\r
6081 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6082 result = snd_pcm_prepare( handle[0] );
\r
6083 if ( result < 0 ) {
\r
6084 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6085 errorText_ = errorStream_.str();
\r
6091 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6092 state = snd_pcm_state( handle[1] );
\r
6093 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6094 result = snd_pcm_prepare( handle[1] );
\r
6095 if ( result < 0 ) {
\r
6096 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6097 errorText_ = errorStream_.str();
\r
6103 stream_.state = STREAM_RUNNING;
\r
6106 apiInfo->runnable = true;
\r
6107 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6108 MUTEX_UNLOCK( &stream_.mutex );
\r
6110 if ( result >= 0 ) return;
\r
6111 error( RtError::SYSTEM_ERROR );
\r
6114 void RtApiAlsa :: stopStream()
\r
6117 if ( stream_.state == STREAM_STOPPED ) {
\r
6118 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6119 error( RtError::WARNING );
\r
6123 stream_.state = STREAM_STOPPED;
\r
6124 MUTEX_LOCK( &stream_.mutex );
\r
6127 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6128 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6129 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6130 if ( apiInfo->synchronized )
\r
6131 result = snd_pcm_drop( handle[0] );
\r
6133 result = snd_pcm_drain( handle[0] );
\r
6134 if ( result < 0 ) {
\r
6135 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6136 errorText_ = errorStream_.str();
\r
6141 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6142 result = snd_pcm_drop( handle[1] );
\r
6143 if ( result < 0 ) {
\r
6144 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6145 errorText_ = errorStream_.str();
\r
6151 MUTEX_UNLOCK( &stream_.mutex );
\r
6153 if ( result >= 0 ) return;
\r
6154 error( RtError::SYSTEM_ERROR );
\r
6157 void RtApiAlsa :: abortStream()
\r
6160 if ( stream_.state == STREAM_STOPPED ) {
\r
6161 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6162 error( RtError::WARNING );
\r
6166 stream_.state = STREAM_STOPPED;
\r
6167 MUTEX_LOCK( &stream_.mutex );
\r
6170 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6171 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6172 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6173 result = snd_pcm_drop( handle[0] );
\r
6174 if ( result < 0 ) {
\r
6175 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6176 errorText_ = errorStream_.str();
\r
6181 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6182 result = snd_pcm_drop( handle[1] );
\r
6183 if ( result < 0 ) {
\r
6184 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6185 errorText_ = errorStream_.str();
\r
6191 MUTEX_UNLOCK( &stream_.mutex );
\r
6193 if ( result >= 0 ) return;
\r
6194 error( RtError::SYSTEM_ERROR );
\r
6197 void RtApiAlsa :: callbackEvent()
\r
6199 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6200 if ( stream_.state == STREAM_STOPPED ) {
\r
6201 MUTEX_LOCK( &stream_.mutex );
\r
6202 while ( !apiInfo->runnable )
\r
6203 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6205 if ( stream_.state != STREAM_RUNNING ) {
\r
6206 MUTEX_UNLOCK( &stream_.mutex );
\r
6209 MUTEX_UNLOCK( &stream_.mutex );
\r
6212 if ( stream_.state == STREAM_CLOSED ) {
\r
6213 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6214 error( RtError::WARNING );
\r
6218 int doStopStream = 0;
\r
6219 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6220 double streamTime = getStreamTime();
\r
6221 RtAudioStreamStatus status = 0;
\r
6222 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6223 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6224 apiInfo->xrun[0] = false;
\r
6226 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6227 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6228 apiInfo->xrun[1] = false;
\r
6230 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6231 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6233 if ( doStopStream == 2 ) {
\r
6238 MUTEX_LOCK( &stream_.mutex );
\r
6240 // The state might change while waiting on a mutex.
\r
6241 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6246 snd_pcm_t **handle;
\r
6247 snd_pcm_sframes_t frames;
\r
6248 RtAudioFormat format;
\r
6249 handle = (snd_pcm_t **) apiInfo->handles;
\r
6251 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6253 // Setup parameters.
\r
6254 if ( stream_.doConvertBuffer[1] ) {
\r
6255 buffer = stream_.deviceBuffer;
\r
6256 channels = stream_.nDeviceChannels[1];
\r
6257 format = stream_.deviceFormat[1];
\r
6260 buffer = stream_.userBuffer[1];
\r
6261 channels = stream_.nUserChannels[1];
\r
6262 format = stream_.userFormat;
\r
6265 // Read samples from device in interleaved/non-interleaved format.
\r
6266 if ( stream_.deviceInterleaved[1] )
\r
6267 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6269 void *bufs[channels];
\r
6270 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6271 for ( int i=0; i<channels; i++ )
\r
6272 bufs[i] = (void *) (buffer + (i * offset));
\r
6273 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6276 if ( result < (int) stream_.bufferSize ) {
\r
6277 // Either an error or overrun occured.
\r
6278 if ( result == -EPIPE ) {
\r
6279 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6280 if ( state == SND_PCM_STATE_XRUN ) {
\r
6281 apiInfo->xrun[1] = true;
\r
6282 result = snd_pcm_prepare( handle[1] );
\r
6283 if ( result < 0 ) {
\r
6284 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6285 errorText_ = errorStream_.str();
\r
6289 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6290 errorText_ = errorStream_.str();
\r
6294 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6295 errorText_ = errorStream_.str();
\r
6297 error( RtError::WARNING );
\r
6301 // Do byte swapping if necessary.
\r
6302 if ( stream_.doByteSwap[1] )
\r
6303 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6305 // Do buffer conversion if necessary.
\r
6306 if ( stream_.doConvertBuffer[1] )
\r
6307 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6309 // Check stream latency
\r
6310 result = snd_pcm_delay( handle[1], &frames );
\r
6311 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6316 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6318 // Setup parameters and do buffer conversion if necessary.
\r
6319 if ( stream_.doConvertBuffer[0] ) {
\r
6320 buffer = stream_.deviceBuffer;
\r
6321 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6322 channels = stream_.nDeviceChannels[0];
\r
6323 format = stream_.deviceFormat[0];
\r
6326 buffer = stream_.userBuffer[0];
\r
6327 channels = stream_.nUserChannels[0];
\r
6328 format = stream_.userFormat;
\r
6331 // Do byte swapping if necessary.
\r
6332 if ( stream_.doByteSwap[0] )
\r
6333 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6335 // Write samples to device in interleaved/non-interleaved format.
\r
6336 if ( stream_.deviceInterleaved[0] )
\r
6337 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6339 void *bufs[channels];
\r
6340 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6341 for ( int i=0; i<channels; i++ )
\r
6342 bufs[i] = (void *) (buffer + (i * offset));
\r
6343 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6346 if ( result < (int) stream_.bufferSize ) {
\r
6347 // Either an error or underrun occured.
\r
6348 if ( result == -EPIPE ) {
\r
6349 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6350 if ( state == SND_PCM_STATE_XRUN ) {
\r
6351 apiInfo->xrun[0] = true;
\r
6352 result = snd_pcm_prepare( handle[0] );
\r
6353 if ( result < 0 ) {
\r
6354 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6355 errorText_ = errorStream_.str();
\r
6359 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6360 errorText_ = errorStream_.str();
\r
6364 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6365 errorText_ = errorStream_.str();
\r
6367 error( RtError::WARNING );
\r
6371 // Check stream latency
\r
6372 result = snd_pcm_delay( handle[0], &frames );
\r
6373 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6377 MUTEX_UNLOCK( &stream_.mutex );
\r
6379 RtApi::tickStreamTime();
\r
6380 if ( doStopStream == 1 ) this->stopStream();
\r
6383 static void *alsaCallbackHandler( void *ptr )
\r
6385 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6386 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6387 bool *isRunning = &info->isRunning;
\r
6389 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6390 if ( &info->doRealtime ) {
\r
6391 pthread_t tID = pthread_self(); // ID of this thread
\r
6392 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6393 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6397 while ( *isRunning == true ) {
\r
6398 pthread_testcancel();
\r
6399 object->callbackEvent();
\r
6402 pthread_exit( NULL );
\r
6405 //******************** End of __LINUX_ALSA__ *********************//
\r
6408 #if defined(__LINUX_PULSE__)
\r
6410 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6411 // and Tristan Matthews.
\r
6413 #include <pulse/error.h>
\r
6414 #include <pulse/simple.h>
\r
6417 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6418 44100, 48000, 96000, 0};
\r
6420 struct rtaudio_pa_format_mapping_t {
\r
6421 RtAudioFormat rtaudio_format;
\r
6422 pa_sample_format_t pa_format;
\r
6425 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6426 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6427 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6428 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6429 {0, PA_SAMPLE_INVALID}};
\r
6431 struct PulseAudioHandle {
\r
6432 pa_simple *s_play;
\r
6435 pthread_cond_t runnable_cv;
\r
6437 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6440 RtApiPulse::~RtApiPulse()
\r
6442 if ( stream_.state != STREAM_CLOSED )
\r
6446 unsigned int RtApiPulse::getDeviceCount( void )
\r
6451 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6453 RtAudio::DeviceInfo info;
\r
6454 info.probed = true;
\r
6455 info.name = "PulseAudio";
\r
6456 info.outputChannels = 2;
\r
6457 info.inputChannels = 2;
\r
6458 info.duplexChannels = 2;
\r
6459 info.isDefaultOutput = true;
\r
6460 info.isDefaultInput = true;
\r
6462 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6463 info.sampleRates.push_back( *sr );
\r
6465 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6470 static void *pulseaudio_callback( void * user )
\r
6472 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6473 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6474 volatile bool *isRunning = &cbi->isRunning;
\r
6476 while ( *isRunning ) {
\r
6477 pthread_testcancel();
\r
6478 context->callbackEvent();
\r
6481 pthread_exit( NULL );
\r
6484 void RtApiPulse::closeStream( void )
\r
6486 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6488 stream_.callbackInfo.isRunning = false;
\r
6490 MUTEX_LOCK( &stream_.mutex );
\r
6491 if ( stream_.state == STREAM_STOPPED ) {
\r
6492 pah->runnable = true;
\r
6493 pthread_cond_signal( &pah->runnable_cv );
\r
6495 MUTEX_UNLOCK( &stream_.mutex );
\r
6497 pthread_join( pah->thread, 0 );
\r
6498 if ( pah->s_play ) {
\r
6499 pa_simple_flush( pah->s_play, NULL );
\r
6500 pa_simple_free( pah->s_play );
\r
6503 pa_simple_free( pah->s_rec );
\r
6505 pthread_cond_destroy( &pah->runnable_cv );
\r
6507 stream_.apiHandle = 0;
\r
6510 if ( stream_.userBuffer[0] ) {
\r
6511 free( stream_.userBuffer[0] );
\r
6512 stream_.userBuffer[0] = 0;
\r
6514 if ( stream_.userBuffer[1] ) {
\r
6515 free( stream_.userBuffer[1] );
\r
6516 stream_.userBuffer[1] = 0;
\r
6519 stream_.state = STREAM_CLOSED;
\r
6520 stream_.mode = UNINITIALIZED;
\r
6523 void RtApiPulse::callbackEvent( void )
\r
6525 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6527 if ( stream_.state == STREAM_STOPPED ) {
\r
6528 MUTEX_LOCK( &stream_.mutex );
\r
6529 while ( !pah->runnable )
\r
6530 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6532 if ( stream_.state != STREAM_RUNNING ) {
\r
6533 MUTEX_UNLOCK( &stream_.mutex );
\r
6536 MUTEX_UNLOCK( &stream_.mutex );
\r
6539 if ( stream_.state == STREAM_CLOSED ) {
\r
6540 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6541 "this shouldn't happen!";
\r
6542 error( RtError::WARNING );
\r
6546 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6547 double streamTime = getStreamTime();
\r
6548 RtAudioStreamStatus status = 0;
\r
6549 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6550 stream_.bufferSize, streamTime, status,
\r
6551 stream_.callbackInfo.userData );
\r
6553 if ( doStopStream == 2 ) {
\r
6558 MUTEX_LOCK( &stream_.mutex );
\r
6559 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6560 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6562 if ( stream_.state != STREAM_RUNNING )
\r
6567 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6568 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6569 convertBuffer( stream_.deviceBuffer,
\r
6570 stream_.userBuffer[OUTPUT],
\r
6571 stream_.convertInfo[OUTPUT] );
\r
6572 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6573 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6575 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6576 formatBytes( stream_.userFormat );
\r
6578 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6579 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6580 pa_strerror( pa_error ) << ".";
\r
6581 errorText_ = errorStream_.str();
\r
6582 error( RtError::WARNING );
\r
6586 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6587 if ( stream_.doConvertBuffer[INPUT] )
\r
6588 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6589 formatBytes( stream_.deviceFormat[INPUT] );
\r
6591 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6592 formatBytes( stream_.userFormat );
\r
6594 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6595 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6596 pa_strerror( pa_error ) << ".";
\r
6597 errorText_ = errorStream_.str();
\r
6598 error( RtError::WARNING );
\r
6600 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6601 convertBuffer( stream_.userBuffer[INPUT],
\r
6602 stream_.deviceBuffer,
\r
6603 stream_.convertInfo[INPUT] );
\r
6608 MUTEX_UNLOCK( &stream_.mutex );
\r
6609 RtApi::tickStreamTime();
\r
6611 if ( doStopStream == 1 )
\r
6615 void RtApiPulse::startStream( void )
\r
6617 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6619 if ( stream_.state == STREAM_CLOSED ) {
\r
6620 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6621 error( RtError::INVALID_USE );
\r
6624 if ( stream_.state == STREAM_RUNNING ) {
\r
6625 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6626 error( RtError::WARNING );
\r
6630 MUTEX_LOCK( &stream_.mutex );
\r
6632 stream_.state = STREAM_RUNNING;
\r
6634 pah->runnable = true;
\r
6635 pthread_cond_signal( &pah->runnable_cv );
\r
6636 MUTEX_UNLOCK( &stream_.mutex );
\r
6639 void RtApiPulse::stopStream( void )
\r
6641 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6643 if ( stream_.state == STREAM_CLOSED ) {
\r
6644 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6645 error( RtError::INVALID_USE );
\r
6648 if ( stream_.state == STREAM_STOPPED ) {
\r
6649 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6650 error( RtError::WARNING );
\r
6654 stream_.state = STREAM_STOPPED;
\r
6655 MUTEX_LOCK( &stream_.mutex );
\r
6657 if ( pah && pah->s_play ) {
\r
6659 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6660 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6661 pa_strerror( pa_error ) << ".";
\r
6662 errorText_ = errorStream_.str();
\r
6663 MUTEX_UNLOCK( &stream_.mutex );
\r
6664 error( RtError::SYSTEM_ERROR );
\r
6669 stream_.state = STREAM_STOPPED;
\r
6670 MUTEX_UNLOCK( &stream_.mutex );
\r
6673 void RtApiPulse::abortStream( void )
\r
6675 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6677 if ( stream_.state == STREAM_CLOSED ) {
\r
6678 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6679 error( RtError::INVALID_USE );
\r
6682 if ( stream_.state == STREAM_STOPPED ) {
\r
6683 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6684 error( RtError::WARNING );
\r
6688 stream_.state = STREAM_STOPPED;
\r
6689 MUTEX_LOCK( &stream_.mutex );
\r
6691 if ( pah && pah->s_play ) {
\r
6693 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6694 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6695 pa_strerror( pa_error ) << ".";
\r
6696 errorText_ = errorStream_.str();
\r
6697 MUTEX_UNLOCK( &stream_.mutex );
\r
6698 error( RtError::SYSTEM_ERROR );
\r
6703 stream_.state = STREAM_STOPPED;
\r
6704 MUTEX_UNLOCK( &stream_.mutex );
\r
6707 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6708 unsigned int channels, unsigned int firstChannel,
\r
6709 unsigned int sampleRate, RtAudioFormat format,
\r
6710 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6712 PulseAudioHandle *pah = 0;
\r
6713 unsigned long bufferBytes = 0;
\r
6714 pa_sample_spec ss;
\r
6716 if ( device != 0 ) return false;
\r
6717 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6718 if ( channels != 1 && channels != 2 ) {
\r
6719 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6722 ss.channels = channels;
\r
6724 if ( firstChannel != 0 ) return false;
\r
6726 bool sr_found = false;
\r
6727 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6728 if ( sampleRate == *sr ) {
\r
6730 stream_.sampleRate = sampleRate;
\r
6731 ss.rate = sampleRate;
\r
6735 if ( !sr_found ) {
\r
6736 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6740 bool sf_found = 0;
\r
6741 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6742 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6743 if ( format == sf->rtaudio_format ) {
\r
6745 stream_.userFormat = sf->rtaudio_format;
\r
6746 ss.format = sf->pa_format;
\r
6750 if ( !sf_found ) {
\r
6751 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6755 // Set interleaving parameters.
\r
6756 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6757 else stream_.userInterleaved = true;
\r
6758 stream_.deviceInterleaved[mode] = true;
\r
6759 stream_.nBuffers = 1;
\r
6760 stream_.doByteSwap[mode] = false;
\r
6761 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6762 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6763 stream_.nUserChannels[mode] = channels;
\r
6764 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6765 stream_.channelOffset[mode] = 0;
\r
6767 // Allocate necessary internal buffers.
\r
6768 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6769 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6770 if ( stream_.userBuffer[mode] == NULL ) {
\r
6771 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6774 stream_.bufferSize = *bufferSize;
\r
6776 if ( stream_.doConvertBuffer[mode] ) {
\r
6778 bool makeBuffer = true;
\r
6779 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6780 if ( mode == INPUT ) {
\r
6781 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6782 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6783 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6787 if ( makeBuffer ) {
\r
6788 bufferBytes *= *bufferSize;
\r
6789 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6790 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6791 if ( stream_.deviceBuffer == NULL ) {
\r
6792 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6798 stream_.device[mode] = device;
\r
6800 // Setup the buffer conversion information structure.
\r
6801 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6803 if ( !stream_.apiHandle ) {
\r
6804 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6806 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6810 stream_.apiHandle = pah;
\r
6811 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6812 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6816 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6821 pah->s_rec = pa_simple_new( NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error );
\r
6822 if ( !pah->s_rec ) {
\r
6823 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6828 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6829 if ( !pah->s_play ) {
\r
6830 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6838 if ( stream_.mode == UNINITIALIZED )
\r
6839 stream_.mode = mode;
\r
6840 else if ( stream_.mode == mode )
\r
6843 stream_.mode = DUPLEX;
\r
6845 if ( !stream_.callbackInfo.isRunning ) {
\r
6846 stream_.callbackInfo.object = this;
\r
6847 stream_.callbackInfo.isRunning = true;
\r
6848 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6849 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6854 stream_.state = STREAM_STOPPED;
\r
6858 if ( pah && stream_.callbackInfo.isRunning ) {
\r
6859 pthread_cond_destroy( &pah->runnable_cv );
\r
6861 stream_.apiHandle = 0;
\r
6864 for ( int i=0; i<2; i++ ) {
\r
6865 if ( stream_.userBuffer[i] ) {
\r
6866 free( stream_.userBuffer[i] );
\r
6867 stream_.userBuffer[i] = 0;
\r
6871 if ( stream_.deviceBuffer ) {
\r
6872 free( stream_.deviceBuffer );
\r
6873 stream_.deviceBuffer = 0;
\r
6879 //******************** End of __LINUX_PULSE__ *********************//
\r
6882 #if defined(__LINUX_OSS__)
\r
6884 #include <unistd.h>
\r
6885 #include <sys/ioctl.h>
\r
6886 #include <unistd.h>
\r
6887 #include <fcntl.h>
\r
6888 #include "soundcard.h"
\r
6889 #include <errno.h>
\r
6892 static void *ossCallbackHandler(void * ptr);
\r
6894 // A structure to hold various information related to the OSS API
\r
6895 // implementation.
\r
6896 struct OssHandle {
\r
6897 int id[2]; // device ids
\r
6900 pthread_cond_t runnable;
\r
6903 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6906 RtApiOss :: RtApiOss()
\r
6908 // Nothing to do here.
\r
6911 RtApiOss :: ~RtApiOss()
\r
6913 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6916 unsigned int RtApiOss :: getDeviceCount( void )
\r
6918 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6919 if ( mixerfd == -1 ) {
\r
6920 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6921 error( RtError::WARNING );
\r
6925 oss_sysinfo sysinfo;
\r
6926 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6928 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6929 error( RtError::WARNING );
\r
6934 return sysinfo.numaudios;
\r
6937 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6939 RtAudio::DeviceInfo info;
\r
6940 info.probed = false;
\r
6942 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6943 if ( mixerfd == -1 ) {
\r
6944 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6945 error( RtError::WARNING );
\r
6949 oss_sysinfo sysinfo;
\r
6950 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6951 if ( result == -1 ) {
\r
6953 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6954 error( RtError::WARNING );
\r
6958 unsigned nDevices = sysinfo.numaudios;
\r
6959 if ( nDevices == 0 ) {
\r
6961 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6962 error( RtError::INVALID_USE );
\r
6966 if ( device >= nDevices ) {
\r
6968 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6969 error( RtError::INVALID_USE );
\r
6973 oss_audioinfo ainfo;
\r
6974 ainfo.dev = device;
\r
6975 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6977 if ( result == -1 ) {
\r
6978 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6979 errorText_ = errorStream_.str();
\r
6980 error( RtError::WARNING );
\r
6985 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6986 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6987 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6988 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6989 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6992 // Probe data formats ... do for input
\r
6993 unsigned long mask = ainfo.iformats;
\r
6994 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6995 info.nativeFormats |= RTAUDIO_SINT16;
\r
6996 if ( mask & AFMT_S8 )
\r
6997 info.nativeFormats |= RTAUDIO_SINT8;
\r
6998 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6999 info.nativeFormats |= RTAUDIO_SINT32;
\r
7000 if ( mask & AFMT_FLOAT )
\r
7001 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7002 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
7003 info.nativeFormats |= RTAUDIO_SINT24;
\r
7005 // Check that we have at least one supported format
\r
7006 if ( info.nativeFormats == 0 ) {
\r
7007 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7008 errorText_ = errorStream_.str();
\r
7009 error( RtError::WARNING );
\r
7013 // Probe the supported sample rates.
\r
7014 info.sampleRates.clear();
\r
7015 if ( ainfo.nrates ) {
\r
7016 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
7017 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7018 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
7019 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7026 // Check min and max rate values;
\r
7027 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7028 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
7029 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7033 if ( info.sampleRates.size() == 0 ) {
\r
7034 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
7035 errorText_ = errorStream_.str();
\r
7036 error( RtError::WARNING );
\r
7039 info.probed = true;
\r
7040 info.name = ainfo.name;
\r
7047 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7048 unsigned int firstChannel, unsigned int sampleRate,
\r
7049 RtAudioFormat format, unsigned int *bufferSize,
\r
7050 RtAudio::StreamOptions *options )
\r
7052 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
7053 if ( mixerfd == -1 ) {
\r
7054 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
7058 oss_sysinfo sysinfo;
\r
7059 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
7060 if ( result == -1 ) {
\r
7062 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7066 unsigned nDevices = sysinfo.numaudios;
\r
7067 if ( nDevices == 0 ) {
\r
7068 // This should not happen because a check is made before this function is called.
\r
7070 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7074 if ( device >= nDevices ) {
\r
7075 // This should not happen because a check is made before this function is called.
\r
7077 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7081 oss_audioinfo ainfo;
\r
7082 ainfo.dev = device;
\r
7083 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7085 if ( result == -1 ) {
\r
7086 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7087 errorText_ = errorStream_.str();
\r
7091 // Check if device supports input or output
\r
7092 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7093 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7094 if ( mode == OUTPUT )
\r
7095 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7097 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7098 errorText_ = errorStream_.str();
\r
7103 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7104 if ( mode == OUTPUT )
\r
7105 flags |= O_WRONLY;
\r
7106 else { // mode == INPUT
\r
7107 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7108 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7109 close( handle->id[0] );
\r
7110 handle->id[0] = 0;
\r
7111 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7112 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7113 errorText_ = errorStream_.str();
\r
7116 // Check that the number previously set channels is the same.
\r
7117 if ( stream_.nUserChannels[0] != channels ) {
\r
7118 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7119 errorText_ = errorStream_.str();
\r
7125 flags |= O_RDONLY;
\r
7128 // Set exclusive access if specified.
\r
7129 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7131 // Try to open the device.
\r
7133 fd = open( ainfo.devnode, flags, 0 );
\r
7135 if ( errno == EBUSY )
\r
7136 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7138 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7139 errorText_ = errorStream_.str();
\r
7143 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7145 if ( flags | O_RDWR ) {
\r
7146 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7147 if ( result == -1) {
\r
7148 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7149 errorText_ = errorStream_.str();
\r
7155 // Check the device channel support.
\r
7156 stream_.nUserChannels[mode] = channels;
\r
7157 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7159 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7160 errorText_ = errorStream_.str();
\r
7164 // Set the number of channels.
\r
7165 int deviceChannels = channels + firstChannel;
\r
7166 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7167 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7169 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7170 errorText_ = errorStream_.str();
\r
7173 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7175 // Get the data format mask
\r
7177 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7178 if ( result == -1 ) {
\r
7180 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7181 errorText_ = errorStream_.str();
\r
7185 // Determine how to set the device format.
\r
7186 stream_.userFormat = format;
\r
7187 int deviceFormat = -1;
\r
7188 stream_.doByteSwap[mode] = false;
\r
7189 if ( format == RTAUDIO_SINT8 ) {
\r
7190 if ( mask & AFMT_S8 ) {
\r
7191 deviceFormat = AFMT_S8;
\r
7192 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7195 else if ( format == RTAUDIO_SINT16 ) {
\r
7196 if ( mask & AFMT_S16_NE ) {
\r
7197 deviceFormat = AFMT_S16_NE;
\r
7198 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7200 else if ( mask & AFMT_S16_OE ) {
\r
7201 deviceFormat = AFMT_S16_OE;
\r
7202 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7203 stream_.doByteSwap[mode] = true;
\r
7206 else if ( format == RTAUDIO_SINT24 ) {
\r
7207 if ( mask & AFMT_S24_NE ) {
\r
7208 deviceFormat = AFMT_S24_NE;
\r
7209 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7211 else if ( mask & AFMT_S24_OE ) {
\r
7212 deviceFormat = AFMT_S24_OE;
\r
7213 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7214 stream_.doByteSwap[mode] = true;
\r
7217 else if ( format == RTAUDIO_SINT32 ) {
\r
7218 if ( mask & AFMT_S32_NE ) {
\r
7219 deviceFormat = AFMT_S32_NE;
\r
7220 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7222 else if ( mask & AFMT_S32_OE ) {
\r
7223 deviceFormat = AFMT_S32_OE;
\r
7224 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7225 stream_.doByteSwap[mode] = true;
\r
7229 if ( deviceFormat == -1 ) {
\r
7230 // The user requested format is not natively supported by the device.
\r
7231 if ( mask & AFMT_S16_NE ) {
\r
7232 deviceFormat = AFMT_S16_NE;
\r
7233 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7235 else if ( mask & AFMT_S32_NE ) {
\r
7236 deviceFormat = AFMT_S32_NE;
\r
7237 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7239 else if ( mask & AFMT_S24_NE ) {
\r
7240 deviceFormat = AFMT_S24_NE;
\r
7241 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7243 else if ( mask & AFMT_S16_OE ) {
\r
7244 deviceFormat = AFMT_S16_OE;
\r
7245 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7246 stream_.doByteSwap[mode] = true;
\r
7248 else if ( mask & AFMT_S32_OE ) {
\r
7249 deviceFormat = AFMT_S32_OE;
\r
7250 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7251 stream_.doByteSwap[mode] = true;
\r
7253 else if ( mask & AFMT_S24_OE ) {
\r
7254 deviceFormat = AFMT_S24_OE;
\r
7255 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7256 stream_.doByteSwap[mode] = true;
\r
7258 else if ( mask & AFMT_S8) {
\r
7259 deviceFormat = AFMT_S8;
\r
7260 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7264 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7265 // This really shouldn't happen ...
\r
7267 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7268 errorText_ = errorStream_.str();
\r
7272 // Set the data format.
\r
7273 int temp = deviceFormat;
\r
7274 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7275 if ( result == -1 || deviceFormat != temp ) {
\r
7277 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7278 errorText_ = errorStream_.str();
\r
7282 // Attempt to set the buffer size. According to OSS, the minimum
\r
7283 // number of buffers is two. The supposed minimum buffer size is 16
\r
7284 // bytes, so that will be our lower bound. The argument to this
\r
7285 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7286 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7287 // We'll check the actual value used near the end of the setup
\r
7289 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7290 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7292 if ( options ) buffers = options->numberOfBuffers;
\r
7293 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7294 if ( buffers < 2 ) buffers = 3;
\r
7295 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7296 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7297 if ( result == -1 ) {
\r
7299 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7300 errorText_ = errorStream_.str();
\r
7303 stream_.nBuffers = buffers;
\r
7305 // Save buffer size (in sample frames).
\r
7306 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7307 stream_.bufferSize = *bufferSize;
\r
7309 // Set the sample rate.
\r
7310 int srate = sampleRate;
\r
7311 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7312 if ( result == -1 ) {
\r
7314 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7315 errorText_ = errorStream_.str();
\r
7319 // Verify the sample rate setup worked.
\r
7320 if ( abs( srate - sampleRate ) > 100 ) {
\r
7322 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7323 errorText_ = errorStream_.str();
\r
7326 stream_.sampleRate = sampleRate;
\r
7328 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7329 // We're doing duplex setup here.
\r
7330 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7331 stream_.nDeviceChannels[0] = deviceChannels;
\r
7334 // Set interleaving parameters.
\r
7335 stream_.userInterleaved = true;
\r
7336 stream_.deviceInterleaved[mode] = true;
\r
7337 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7338 stream_.userInterleaved = false;
\r
7340 // Set flags for buffer conversion
\r
7341 stream_.doConvertBuffer[mode] = false;
\r
7342 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7343 stream_.doConvertBuffer[mode] = true;
\r
7344 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7345 stream_.doConvertBuffer[mode] = true;
\r
7346 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7347 stream_.nUserChannels[mode] > 1 )
\r
7348 stream_.doConvertBuffer[mode] = true;
\r
7350 // Allocate the stream handles if necessary and then save.
\r
7351 if ( stream_.apiHandle == 0 ) {
\r
7353 handle = new OssHandle;
\r
7355 catch ( std::bad_alloc& ) {
\r
7356 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7360 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7361 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7365 stream_.apiHandle = (void *) handle;
\r
7368 handle = (OssHandle *) stream_.apiHandle;
\r
7370 handle->id[mode] = fd;
\r
7372 // Allocate necessary internal buffers.
\r
7373 unsigned long bufferBytes;
\r
7374 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7375 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7376 if ( stream_.userBuffer[mode] == NULL ) {
\r
7377 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7381 if ( stream_.doConvertBuffer[mode] ) {
\r
7383 bool makeBuffer = true;
\r
7384 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7385 if ( mode == INPUT ) {
\r
7386 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7387 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7388 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7392 if ( makeBuffer ) {
\r
7393 bufferBytes *= *bufferSize;
\r
7394 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7395 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7396 if ( stream_.deviceBuffer == NULL ) {
\r
7397 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7403 stream_.device[mode] = device;
\r
7404 stream_.state = STREAM_STOPPED;
\r
7406 // Setup the buffer conversion information structure.
\r
7407 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7409 // Setup thread if necessary.
\r
7410 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7411 // We had already set up an output stream.
\r
7412 stream_.mode = DUPLEX;
\r
7413 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7416 stream_.mode = mode;
\r
7418 // Setup callback thread.
\r
7419 stream_.callbackInfo.object = (void *) this;
\r
7421 // Set the thread attributes for joinable and realtime scheduling
\r
7422 // priority. The higher priority will only take affect if the
\r
7423 // program is run as root or suid.
\r
7424 pthread_attr_t attr;
\r
7425 pthread_attr_init( &attr );
\r
7426 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7427 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7428 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7429 struct sched_param param;
\r
7430 int priority = options->priority;
\r
7431 int min = sched_get_priority_min( SCHED_RR );
\r
7432 int max = sched_get_priority_max( SCHED_RR );
\r
7433 if ( priority < min ) priority = min;
\r
7434 else if ( priority > max ) priority = max;
\r
7435 param.sched_priority = priority;
\r
7436 pthread_attr_setschedparam( &attr, ¶m );
\r
7437 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7440 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7442 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7445 stream_.callbackInfo.isRunning = true;
\r
7446 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7447 pthread_attr_destroy( &attr );
\r
7449 stream_.callbackInfo.isRunning = false;
\r
7450 errorText_ = "RtApiOss::error creating callback thread!";
\r
7459 pthread_cond_destroy( &handle->runnable );
\r
7460 if ( handle->id[0] ) close( handle->id[0] );
\r
7461 if ( handle->id[1] ) close( handle->id[1] );
\r
7463 stream_.apiHandle = 0;
\r
7466 for ( int i=0; i<2; i++ ) {
\r
7467 if ( stream_.userBuffer[i] ) {
\r
7468 free( stream_.userBuffer[i] );
\r
7469 stream_.userBuffer[i] = 0;
\r
7473 if ( stream_.deviceBuffer ) {
\r
7474 free( stream_.deviceBuffer );
\r
7475 stream_.deviceBuffer = 0;
\r
7481 void RtApiOss :: closeStream()
\r
7483 if ( stream_.state == STREAM_CLOSED ) {
\r
7484 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7485 error( RtError::WARNING );
\r
7489 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7490 stream_.callbackInfo.isRunning = false;
\r
7491 MUTEX_LOCK( &stream_.mutex );
\r
7492 if ( stream_.state == STREAM_STOPPED )
\r
7493 pthread_cond_signal( &handle->runnable );
\r
7494 MUTEX_UNLOCK( &stream_.mutex );
\r
7495 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7497 if ( stream_.state == STREAM_RUNNING ) {
\r
7498 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7499 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7501 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7502 stream_.state = STREAM_STOPPED;
\r
7506 pthread_cond_destroy( &handle->runnable );
\r
7507 if ( handle->id[0] ) close( handle->id[0] );
\r
7508 if ( handle->id[1] ) close( handle->id[1] );
\r
7510 stream_.apiHandle = 0;
\r
7513 for ( int i=0; i<2; i++ ) {
\r
7514 if ( stream_.userBuffer[i] ) {
\r
7515 free( stream_.userBuffer[i] );
\r
7516 stream_.userBuffer[i] = 0;
\r
7520 if ( stream_.deviceBuffer ) {
\r
7521 free( stream_.deviceBuffer );
\r
7522 stream_.deviceBuffer = 0;
\r
7525 stream_.mode = UNINITIALIZED;
\r
7526 stream_.state = STREAM_CLOSED;
\r
7529 void RtApiOss :: startStream()
\r
7532 if ( stream_.state == STREAM_RUNNING ) {
\r
7533 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7534 error( RtError::WARNING );
\r
7538 MUTEX_LOCK( &stream_.mutex );
\r
7540 stream_.state = STREAM_RUNNING;
\r
7542 // No need to do anything else here ... OSS automatically starts
\r
7543 // when fed samples.
\r
7545 MUTEX_UNLOCK( &stream_.mutex );
\r
7547 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7548 pthread_cond_signal( &handle->runnable );
\r
7551 void RtApiOss :: stopStream()
\r
7554 if ( stream_.state == STREAM_STOPPED ) {
\r
7555 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7556 error( RtError::WARNING );
\r
7560 MUTEX_LOCK( &stream_.mutex );
\r
7562 // The state might change while waiting on a mutex.
\r
7563 if ( stream_.state == STREAM_STOPPED ) {
\r
7564 MUTEX_UNLOCK( &stream_.mutex );
\r
7569 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7570 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7572 // Flush the output with zeros a few times.
\r
7575 RtAudioFormat format;
\r
7577 if ( stream_.doConvertBuffer[0] ) {
\r
7578 buffer = stream_.deviceBuffer;
\r
7579 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7580 format = stream_.deviceFormat[0];
\r
7583 buffer = stream_.userBuffer[0];
\r
7584 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7585 format = stream_.userFormat;
\r
7588 memset( buffer, 0, samples * formatBytes(format) );
\r
7589 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7590 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7591 if ( result == -1 ) {
\r
7592 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7593 error( RtError::WARNING );
\r
7597 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7598 if ( result == -1 ) {
\r
7599 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7600 errorText_ = errorStream_.str();
\r
7603 handle->triggered = false;
\r
7606 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7607 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7608 if ( result == -1 ) {
\r
7609 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7610 errorText_ = errorStream_.str();
\r
7616 stream_.state = STREAM_STOPPED;
\r
7617 MUTEX_UNLOCK( &stream_.mutex );
\r
7619 if ( result != -1 ) return;
\r
7620 error( RtError::SYSTEM_ERROR );
\r
7623 void RtApiOss :: abortStream()
\r
7626 if ( stream_.state == STREAM_STOPPED ) {
\r
7627 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7628 error( RtError::WARNING );
\r
7632 MUTEX_LOCK( &stream_.mutex );
\r
7634 // The state might change while waiting on a mutex.
\r
7635 if ( stream_.state == STREAM_STOPPED ) {
\r
7636 MUTEX_UNLOCK( &stream_.mutex );
\r
7641 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7642 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7643 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7644 if ( result == -1 ) {
\r
7645 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7646 errorText_ = errorStream_.str();
\r
7649 handle->triggered = false;
\r
7652 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7653 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7654 if ( result == -1 ) {
\r
7655 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7656 errorText_ = errorStream_.str();
\r
7662 stream_.state = STREAM_STOPPED;
\r
7663 MUTEX_UNLOCK( &stream_.mutex );
\r
7665 if ( result != -1 ) return;
\r
7666 error( RtError::SYSTEM_ERROR );
\r
7669 void RtApiOss :: callbackEvent()
\r
7671 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7672 if ( stream_.state == STREAM_STOPPED ) {
\r
7673 MUTEX_LOCK( &stream_.mutex );
\r
7674 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7675 if ( stream_.state != STREAM_RUNNING ) {
\r
7676 MUTEX_UNLOCK( &stream_.mutex );
\r
7679 MUTEX_UNLOCK( &stream_.mutex );
\r
7682 if ( stream_.state == STREAM_CLOSED ) {
\r
7683 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7684 error( RtError::WARNING );
\r
7688 // Invoke user callback to get fresh output data.
\r
7689 int doStopStream = 0;
\r
7690 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7691 double streamTime = getStreamTime();
\r
7692 RtAudioStreamStatus status = 0;
\r
7693 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7694 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7695 handle->xrun[0] = false;
\r
7697 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7698 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7699 handle->xrun[1] = false;
\r
7701 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7702 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7703 if ( doStopStream == 2 ) {
\r
7704 this->abortStream();
\r
7708 MUTEX_LOCK( &stream_.mutex );
\r
7710 // The state might change while waiting on a mutex.
\r
7711 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7716 RtAudioFormat format;
\r
7718 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7720 // Setup parameters and do buffer conversion if necessary.
\r
7721 if ( stream_.doConvertBuffer[0] ) {
\r
7722 buffer = stream_.deviceBuffer;
\r
7723 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7724 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7725 format = stream_.deviceFormat[0];
\r
7728 buffer = stream_.userBuffer[0];
\r
7729 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7730 format = stream_.userFormat;
\r
7733 // Do byte swapping if necessary.
\r
7734 if ( stream_.doByteSwap[0] )
\r
7735 byteSwapBuffer( buffer, samples, format );
\r
7737 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7739 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7740 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7741 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7742 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7743 handle->triggered = true;
\r
7746 // Write samples to device.
\r
7747 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7749 if ( result == -1 ) {
\r
7750 // We'll assume this is an underrun, though there isn't a
\r
7751 // specific means for determining that.
\r
7752 handle->xrun[0] = true;
\r
7753 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7754 error( RtError::WARNING );
\r
7755 // Continue on to input section.
\r
7759 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7761 // Setup parameters.
\r
7762 if ( stream_.doConvertBuffer[1] ) {
\r
7763 buffer = stream_.deviceBuffer;
\r
7764 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7765 format = stream_.deviceFormat[1];
\r
7768 buffer = stream_.userBuffer[1];
\r
7769 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7770 format = stream_.userFormat;
\r
7773 // Read samples from device.
\r
7774 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7776 if ( result == -1 ) {
\r
7777 // We'll assume this is an overrun, though there isn't a
\r
7778 // specific means for determining that.
\r
7779 handle->xrun[1] = true;
\r
7780 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7781 error( RtError::WARNING );
\r
7785 // Do byte swapping if necessary.
\r
7786 if ( stream_.doByteSwap[1] )
\r
7787 byteSwapBuffer( buffer, samples, format );
\r
7789 // Do buffer conversion if necessary.
\r
7790 if ( stream_.doConvertBuffer[1] )
\r
7791 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7795 MUTEX_UNLOCK( &stream_.mutex );
\r
7797 RtApi::tickStreamTime();
\r
7798 if ( doStopStream == 1 ) this->stopStream();
\r
7801 static void *ossCallbackHandler( void *ptr )
\r
7803 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7804 RtApiOss *object = (RtApiOss *) info->object;
\r
7805 bool *isRunning = &info->isRunning;
\r
7807 while ( *isRunning == true ) {
\r
7808 pthread_testcancel();
\r
7809 object->callbackEvent();
\r
7812 pthread_exit( NULL );
\r
7815 //******************** End of __LINUX_OSS__ *********************//
\r
7819 // *************************************************** //
\r
7821 // Protected common (OS-independent) RtAudio methods.
\r
7823 // *************************************************** //
\r
7825 // This method can be modified to control the behavior of error
\r
7826 // message printing.
\r
7827 void RtApi :: error( RtError::Type type )
\r
7829 errorStream_.str(""); // clear the ostringstream
\r
7831 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
7832 if ( errorCallback ) {
\r
7833 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
7834 static bool firstErrorOccured = false;
\r
7836 if ( firstErrorOccured )
\r
7839 firstErrorOccured = true;
\r
7840 const std::string errorMessage = errorText_;
\r
7842 if ( type != RtError::WARNING && stream_.state != STREAM_STOPPED) {
\r
7843 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
7847 errorCallback( type, errorMessage );
\r
7848 firstErrorOccured = false;
\r
7852 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7853 std::cerr << '\n' << errorText_ << "\n\n";
\r
7854 else if ( type != RtError::WARNING )
\r
7855 throw( RtError( errorText_, type ) );
\r
7858 void RtApi :: verifyStream()
\r
7860 if ( stream_.state == STREAM_CLOSED ) {
\r
7861 errorText_ = "RtApi:: a stream is not open!";
\r
7862 error( RtError::INVALID_USE );
\r
7866 void RtApi :: clearStreamInfo()
\r
7868 stream_.mode = UNINITIALIZED;
\r
7869 stream_.state = STREAM_CLOSED;
\r
7870 stream_.sampleRate = 0;
\r
7871 stream_.bufferSize = 0;
\r
7872 stream_.nBuffers = 0;
\r
7873 stream_.userFormat = 0;
\r
7874 stream_.userInterleaved = true;
\r
7875 stream_.streamTime = 0.0;
\r
7876 stream_.apiHandle = 0;
\r
7877 stream_.deviceBuffer = 0;
\r
7878 stream_.callbackInfo.callback = 0;
\r
7879 stream_.callbackInfo.userData = 0;
\r
7880 stream_.callbackInfo.isRunning = false;
\r
7881 stream_.callbackInfo.errorCallback = 0;
\r
7882 for ( int i=0; i<2; i++ ) {
\r
7883 stream_.device[i] = 11111;
\r
7884 stream_.doConvertBuffer[i] = false;
\r
7885 stream_.deviceInterleaved[i] = true;
\r
7886 stream_.doByteSwap[i] = false;
\r
7887 stream_.nUserChannels[i] = 0;
\r
7888 stream_.nDeviceChannels[i] = 0;
\r
7889 stream_.channelOffset[i] = 0;
\r
7890 stream_.deviceFormat[i] = 0;
\r
7891 stream_.latency[i] = 0;
\r
7892 stream_.userBuffer[i] = 0;
\r
7893 stream_.convertInfo[i].channels = 0;
\r
7894 stream_.convertInfo[i].inJump = 0;
\r
7895 stream_.convertInfo[i].outJump = 0;
\r
7896 stream_.convertInfo[i].inFormat = 0;
\r
7897 stream_.convertInfo[i].outFormat = 0;
\r
7898 stream_.convertInfo[i].inOffset.clear();
\r
7899 stream_.convertInfo[i].outOffset.clear();
\r
7903 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7905 if ( format == RTAUDIO_SINT16 )
\r
7907 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7909 else if ( format == RTAUDIO_FLOAT64 )
\r
7911 else if ( format == RTAUDIO_SINT24 )
\r
7913 else if ( format == RTAUDIO_SINT8 )
\r
7916 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7917 error( RtError::WARNING );
\r
7922 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7924 if ( mode == INPUT ) { // convert device to user buffer
\r
7925 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7926 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7927 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7928 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7930 else { // convert user to device buffer
\r
7931 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7932 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7933 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7934 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7937 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7938 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7940 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7942 // Set up the interleave/deinterleave offsets.
\r
7943 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7944 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7945 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7946 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7947 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7948 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7949 stream_.convertInfo[mode].inJump = 1;
\r
7953 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7954 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7955 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7956 stream_.convertInfo[mode].outJump = 1;
\r
7960 else { // no (de)interleaving
\r
7961 if ( stream_.userInterleaved ) {
\r
7962 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7963 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7964 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7968 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7969 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7970 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7971 stream_.convertInfo[mode].inJump = 1;
\r
7972 stream_.convertInfo[mode].outJump = 1;
\r
7977 // Add channel offset.
\r
7978 if ( firstChannel > 0 ) {
\r
7979 if ( stream_.deviceInterleaved[mode] ) {
\r
7980 if ( mode == OUTPUT ) {
\r
7981 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7982 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7985 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7986 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7990 if ( mode == OUTPUT ) {
\r
7991 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7992 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7995 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7996 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8002 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
8004 // This function does format conversion, input/output channel compensation, and
\r
8005 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
8006 // the lower three bytes of a 32-bit integer.
\r
8008 // Clear our device buffer when in/out duplex device channels are different
\r
8009 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
8010 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
8011 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
8014 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
8016 Float64 *out = (Float64 *)outBuffer;
\r
8018 if (info.inFormat == RTAUDIO_SINT8) {
\r
8019 signed char *in = (signed char *)inBuffer;
\r
8020 scale = 1.0 / 127.5;
\r
8021 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8022 for (j=0; j<info.channels; j++) {
\r
8023 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8024 out[info.outOffset[j]] += 0.5;
\r
8025 out[info.outOffset[j]] *= scale;
\r
8027 in += info.inJump;
\r
8028 out += info.outJump;
\r
8031 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8032 Int16 *in = (Int16 *)inBuffer;
\r
8033 scale = 1.0 / 32767.5;
\r
8034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8035 for (j=0; j<info.channels; j++) {
\r
8036 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8037 out[info.outOffset[j]] += 0.5;
\r
8038 out[info.outOffset[j]] *= scale;
\r
8040 in += info.inJump;
\r
8041 out += info.outJump;
\r
8044 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8045 Int24 *in = (Int24 *)inBuffer;
\r
8046 scale = 1.0 / 8388607.5;
\r
8047 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8048 for (j=0; j<info.channels; j++) {
\r
8049 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
8050 out[info.outOffset[j]] += 0.5;
\r
8051 out[info.outOffset[j]] *= scale;
\r
8053 in += info.inJump;
\r
8054 out += info.outJump;
\r
8057 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8058 Int32 *in = (Int32 *)inBuffer;
\r
8059 scale = 1.0 / 2147483647.5;
\r
8060 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8061 for (j=0; j<info.channels; j++) {
\r
8062 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8063 out[info.outOffset[j]] += 0.5;
\r
8064 out[info.outOffset[j]] *= scale;
\r
8066 in += info.inJump;
\r
8067 out += info.outJump;
\r
8070 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8071 Float32 *in = (Float32 *)inBuffer;
\r
8072 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8073 for (j=0; j<info.channels; j++) {
\r
8074 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8076 in += info.inJump;
\r
8077 out += info.outJump;
\r
8080 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8081 // Channel compensation and/or (de)interleaving only.
\r
8082 Float64 *in = (Float64 *)inBuffer;
\r
8083 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8084 for (j=0; j<info.channels; j++) {
\r
8085 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8087 in += info.inJump;
\r
8088 out += info.outJump;
\r
8092 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8094 Float32 *out = (Float32 *)outBuffer;
\r
8096 if (info.inFormat == RTAUDIO_SINT8) {
\r
8097 signed char *in = (signed char *)inBuffer;
\r
8098 scale = (Float32) ( 1.0 / 127.5 );
\r
8099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8100 for (j=0; j<info.channels; j++) {
\r
8101 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8102 out[info.outOffset[j]] += 0.5;
\r
8103 out[info.outOffset[j]] *= scale;
\r
8105 in += info.inJump;
\r
8106 out += info.outJump;
\r
8109 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8110 Int16 *in = (Int16 *)inBuffer;
\r
8111 scale = (Float32) ( 1.0 / 32767.5 );
\r
8112 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8113 for (j=0; j<info.channels; j++) {
\r
8114 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8115 out[info.outOffset[j]] += 0.5;
\r
8116 out[info.outOffset[j]] *= scale;
\r
8118 in += info.inJump;
\r
8119 out += info.outJump;
\r
8122 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8123 Int24 *in = (Int24 *)inBuffer;
\r
8124 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8125 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8126 for (j=0; j<info.channels; j++) {
\r
8127 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8128 out[info.outOffset[j]] += 0.5;
\r
8129 out[info.outOffset[j]] *= scale;
\r
8131 in += info.inJump;
\r
8132 out += info.outJump;
\r
8135 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8136 Int32 *in = (Int32 *)inBuffer;
\r
8137 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8138 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8139 for (j=0; j<info.channels; j++) {
\r
8140 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8141 out[info.outOffset[j]] += 0.5;
\r
8142 out[info.outOffset[j]] *= scale;
\r
8144 in += info.inJump;
\r
8145 out += info.outJump;
\r
8148 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8149 // Channel compensation and/or (de)interleaving only.
\r
8150 Float32 *in = (Float32 *)inBuffer;
\r
8151 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8152 for (j=0; j<info.channels; j++) {
\r
8153 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8155 in += info.inJump;
\r
8156 out += info.outJump;
\r
8159 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8160 Float64 *in = (Float64 *)inBuffer;
\r
8161 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8162 for (j=0; j<info.channels; j++) {
\r
8163 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8165 in += info.inJump;
\r
8166 out += info.outJump;
\r
8170 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8171 Int32 *out = (Int32 *)outBuffer;
\r
8172 if (info.inFormat == RTAUDIO_SINT8) {
\r
8173 signed char *in = (signed char *)inBuffer;
\r
8174 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8175 for (j=0; j<info.channels; j++) {
\r
8176 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8177 out[info.outOffset[j]] <<= 24;
\r
8179 in += info.inJump;
\r
8180 out += info.outJump;
\r
8183 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8184 Int16 *in = (Int16 *)inBuffer;
\r
8185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8186 for (j=0; j<info.channels; j++) {
\r
8187 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8188 out[info.outOffset[j]] <<= 16;
\r
8190 in += info.inJump;
\r
8191 out += info.outJump;
\r
8194 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8195 Int24 *in = (Int24 *)inBuffer;
\r
8196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8197 for (j=0; j<info.channels; j++) {
\r
8198 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8199 out[info.outOffset[j]] <<= 8;
\r
8201 in += info.inJump;
\r
8202 out += info.outJump;
\r
8205 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8206 // Channel compensation and/or (de)interleaving only.
\r
8207 Int32 *in = (Int32 *)inBuffer;
\r
8208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8209 for (j=0; j<info.channels; j++) {
\r
8210 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8212 in += info.inJump;
\r
8213 out += info.outJump;
\r
8216 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8217 Float32 *in = (Float32 *)inBuffer;
\r
8218 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8219 for (j=0; j<info.channels; j++) {
\r
8220 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8222 in += info.inJump;
\r
8223 out += info.outJump;
\r
8226 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8227 Float64 *in = (Float64 *)inBuffer;
\r
8228 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8229 for (j=0; j<info.channels; j++) {
\r
8230 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8232 in += info.inJump;
\r
8233 out += info.outJump;
\r
8237 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8238 Int24 *out = (Int24 *)outBuffer;
\r
8239 if (info.inFormat == RTAUDIO_SINT8) {
\r
8240 signed char *in = (signed char *)inBuffer;
\r
8241 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8242 for (j=0; j<info.channels; j++) {
\r
8243 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8244 //out[info.outOffset[j]] <<= 16;
\r
8246 in += info.inJump;
\r
8247 out += info.outJump;
\r
8250 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8251 Int16 *in = (Int16 *)inBuffer;
\r
8252 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8253 for (j=0; j<info.channels; j++) {
\r
8254 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8255 //out[info.outOffset[j]] <<= 8;
\r
8257 in += info.inJump;
\r
8258 out += info.outJump;
\r
8261 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8262 // Channel compensation and/or (de)interleaving only.
\r
8263 Int24 *in = (Int24 *)inBuffer;
\r
8264 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8265 for (j=0; j<info.channels; j++) {
\r
8266 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8268 in += info.inJump;
\r
8269 out += info.outJump;
\r
8272 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8273 Int32 *in = (Int32 *)inBuffer;
\r
8274 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8275 for (j=0; j<info.channels; j++) {
\r
8276 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8277 //out[info.outOffset[j]] >>= 8;
\r
8279 in += info.inJump;
\r
8280 out += info.outJump;
\r
8283 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8284 Float32 *in = (Float32 *)inBuffer;
\r
8285 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8286 for (j=0; j<info.channels; j++) {
\r
8287 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8289 in += info.inJump;
\r
8290 out += info.outJump;
\r
8293 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8294 Float64 *in = (Float64 *)inBuffer;
\r
8295 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8296 for (j=0; j<info.channels; j++) {
\r
8297 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8299 in += info.inJump;
\r
8300 out += info.outJump;
\r
8304 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8305 Int16 *out = (Int16 *)outBuffer;
\r
8306 if (info.inFormat == RTAUDIO_SINT8) {
\r
8307 signed char *in = (signed char *)inBuffer;
\r
8308 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8309 for (j=0; j<info.channels; j++) {
\r
8310 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8311 out[info.outOffset[j]] <<= 8;
\r
8313 in += info.inJump;
\r
8314 out += info.outJump;
\r
8317 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8318 // Channel compensation and/or (de)interleaving only.
\r
8319 Int16 *in = (Int16 *)inBuffer;
\r
8320 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8321 for (j=0; j<info.channels; j++) {
\r
8322 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8324 in += info.inJump;
\r
8325 out += info.outJump;
\r
8328 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8329 Int24 *in = (Int24 *)inBuffer;
\r
8330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8331 for (j=0; j<info.channels; j++) {
\r
8332 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8334 in += info.inJump;
\r
8335 out += info.outJump;
\r
8338 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8339 Int32 *in = (Int32 *)inBuffer;
\r
8340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8341 for (j=0; j<info.channels; j++) {
\r
8342 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8344 in += info.inJump;
\r
8345 out += info.outJump;
\r
8348 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8349 Float32 *in = (Float32 *)inBuffer;
\r
8350 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8351 for (j=0; j<info.channels; j++) {
\r
8352 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8354 in += info.inJump;
\r
8355 out += info.outJump;
\r
8358 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8359 Float64 *in = (Float64 *)inBuffer;
\r
8360 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8361 for (j=0; j<info.channels; j++) {
\r
8362 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8364 in += info.inJump;
\r
8365 out += info.outJump;
\r
8369 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8370 signed char *out = (signed char *)outBuffer;
\r
8371 if (info.inFormat == RTAUDIO_SINT8) {
\r
8372 // Channel compensation and/or (de)interleaving only.
\r
8373 signed char *in = (signed char *)inBuffer;
\r
8374 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8375 for (j=0; j<info.channels; j++) {
\r
8376 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8378 in += info.inJump;
\r
8379 out += info.outJump;
\r
8382 if (info.inFormat == RTAUDIO_SINT16) {
\r
8383 Int16 *in = (Int16 *)inBuffer;
\r
8384 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8385 for (j=0; j<info.channels; j++) {
\r
8386 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8388 in += info.inJump;
\r
8389 out += info.outJump;
\r
8392 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8393 Int24 *in = (Int24 *)inBuffer;
\r
8394 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8395 for (j=0; j<info.channels; j++) {
\r
8396 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8398 in += info.inJump;
\r
8399 out += info.outJump;
\r
8402 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8403 Int32 *in = (Int32 *)inBuffer;
\r
8404 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8405 for (j=0; j<info.channels; j++) {
\r
8406 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8408 in += info.inJump;
\r
8409 out += info.outJump;
\r
8412 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8413 Float32 *in = (Float32 *)inBuffer;
\r
8414 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8415 for (j=0; j<info.channels; j++) {
\r
8416 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8418 in += info.inJump;
\r
8419 out += info.outJump;
\r
8422 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8423 Float64 *in = (Float64 *)inBuffer;
\r
8424 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8425 for (j=0; j<info.channels; j++) {
\r
8426 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8428 in += info.inJump;
\r
8429 out += info.outJump;
\r
8435 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8436 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8437 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8439 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8441 register char val;
\r
8442 register char *ptr;
\r
8445 if ( format == RTAUDIO_SINT16 ) {
\r
8446 for ( unsigned int i=0; i<samples; i++ ) {
\r
8447 // Swap 1st and 2nd bytes.
\r
8449 *(ptr) = *(ptr+1);
\r
8452 // Increment 2 bytes.
\r
8456 else if ( format == RTAUDIO_SINT32 ||
\r
8457 format == RTAUDIO_FLOAT32 ) {
\r
8458 for ( unsigned int i=0; i<samples; i++ ) {
\r
8459 // Swap 1st and 4th bytes.
\r
8461 *(ptr) = *(ptr+3);
\r
8464 // Swap 2nd and 3rd bytes.
\r
8467 *(ptr) = *(ptr+1);
\r
8470 // Increment 3 more bytes.
\r
8474 else if ( format == RTAUDIO_SINT24 ) {
\r
8475 for ( unsigned int i=0; i<samples; i++ ) {
\r
8476 // Swap 1st and 3rd bytes.
\r
8478 *(ptr) = *(ptr+2);
\r
8481 // Increment 2 more bytes.
\r
8485 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8486 for ( unsigned int i=0; i<samples; i++ ) {
\r
8487 // Swap 1st and 8th bytes
\r
8489 *(ptr) = *(ptr+7);
\r
8492 // Swap 2nd and 7th bytes
\r
8495 *(ptr) = *(ptr+5);
\r
8498 // Swap 3rd and 6th bytes
\r
8501 *(ptr) = *(ptr+3);
\r
8504 // Swap 4th and 5th bytes
\r
8507 *(ptr) = *(ptr+1);
\r
8510 // Increment 5 more bytes.
\r
8516 // Indentation settings for Vim and Emacs
\r
8518 // Local Variables:
\r
8519 // c-basic-offset: 2
\r
8520 // indent-tabs-mode: nil
\r
8523 // vim: et sts=2 sw=2
\r