1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_PULSE__)
\r
91 apis.push_back( LINUX_PULSE );
\r
93 #if defined(__LINUX_OSS__)
\r
94 apis.push_back( LINUX_OSS );
\r
96 #if defined(__WINDOWS_ASIO__)
\r
97 apis.push_back( WINDOWS_ASIO );
\r
99 #if defined(__WINDOWS_DS__)
\r
100 apis.push_back( WINDOWS_DS );
\r
102 #if defined(__MACOSX_CORE__)
\r
103 apis.push_back( MACOSX_CORE );
\r
105 #if defined(__RTAUDIO_DUMMY__)
\r
106 apis.push_back( RTAUDIO_DUMMY );
\r
110 void RtAudio :: openRtApi( RtAudio::Api api )
\r
116 #if defined(__UNIX_JACK__)
\r
117 if ( api == UNIX_JACK )
\r
118 rtapi_ = new RtApiJack();
\r
120 #if defined(__LINUX_ALSA__)
\r
121 if ( api == LINUX_ALSA )
\r
122 rtapi_ = new RtApiAlsa();
\r
124 #if defined(__LINUX_PULSE__)
\r
125 if ( api == LINUX_PULSE )
\r
126 rtapi_ = new RtApiPulse();
\r
128 #if defined(__LINUX_OSS__)
\r
129 if ( api == LINUX_OSS )
\r
130 rtapi_ = new RtApiOss();
\r
132 #if defined(__WINDOWS_ASIO__)
\r
133 if ( api == WINDOWS_ASIO )
\r
134 rtapi_ = new RtApiAsio();
\r
136 #if defined(__WINDOWS_DS__)
\r
137 if ( api == WINDOWS_DS )
\r
138 rtapi_ = new RtApiDs();
\r
140 #if defined(__MACOSX_CORE__)
\r
141 if ( api == MACOSX_CORE )
\r
142 rtapi_ = new RtApiCore();
\r
144 #if defined(__RTAUDIO_DUMMY__)
\r
145 if ( api == RTAUDIO_DUMMY )
\r
146 rtapi_ = new RtApiDummy();
\r
150 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
154 if ( api != UNSPECIFIED ) {
\r
155 // Attempt to open the specified API.
\r
157 if ( rtapi_ ) return;
\r
159 // No compiled support for specified API value. Issue a debug
\r
160 // warning and continue as if no API was specified.
\r
161 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
164 // Iterate through the compiled APIs and return as soon as we find
\r
165 // one with at least one device or we reach the end of the list.
\r
166 std::vector< RtAudio::Api > apis;
\r
167 getCompiledApi( apis );
\r
168 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
169 openRtApi( apis[i] );
\r
170 if ( rtapi_->getDeviceCount() ) break;
\r
173 if ( rtapi_ ) return;
\r
175 // It should not be possible to get here because the preprocessor
\r
176 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
177 // API-specific definitions are passed to the compiler. But just in
\r
178 // case something weird happens, we'll print out an error message.
\r
179 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
182 RtAudio :: ~RtAudio() throw()
\r
187 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
188 RtAudio::StreamParameters *inputParameters,
\r
189 RtAudioFormat format, unsigned int sampleRate,
\r
190 unsigned int *bufferFrames,
\r
191 RtAudioCallback callback, void *userData,
\r
192 RtAudio::StreamOptions *options,
\r
193 RtAudioErrorCallback errorCallback )
\r
195 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
196 sampleRate, bufferFrames, callback,
\r
197 userData, options, errorCallback );
\r
200 // *************************************************** //
\r
202 // Public RtApi definitions (see end of file for
\r
203 // private or protected utility functions).
\r
205 // *************************************************** //
\r
209 stream_.state = STREAM_CLOSED;
\r
210 stream_.mode = UNINITIALIZED;
\r
211 stream_.apiHandle = 0;
\r
212 stream_.userBuffer[0] = 0;
\r
213 stream_.userBuffer[1] = 0;
\r
214 MUTEX_INITIALIZE( &stream_.mutex );
\r
215 showWarnings_ = true;
\r
216 firstErrorOccurred = false;
\r
221 MUTEX_DESTROY( &stream_.mutex );
\r
224 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
225 RtAudio::StreamParameters *iParams,
\r
226 RtAudioFormat format, unsigned int sampleRate,
\r
227 unsigned int *bufferFrames,
\r
228 RtAudioCallback callback, void *userData,
\r
229 RtAudio::StreamOptions *options,
\r
230 RtAudioErrorCallback errorCallback )
\r
232 if ( stream_.state != STREAM_CLOSED ) {
\r
233 errorText_ = "RtApi::openStream: a stream is already open!";
\r
234 error( RtAudioError::INVALID_USE );
\r
238 if ( oParams && oParams->nChannels < 1 ) {
\r
239 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
240 error( RtAudioError::INVALID_USE );
\r
244 if ( iParams && iParams->nChannels < 1 ) {
\r
245 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
246 error( RtAudioError::INVALID_USE );
\r
250 if ( oParams == NULL && iParams == NULL ) {
\r
251 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
252 error( RtAudioError::INVALID_USE );
\r
256 if ( formatBytes(format) == 0 ) {
\r
257 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
258 error( RtAudioError::INVALID_USE );
\r
262 unsigned int nDevices = getDeviceCount();
\r
263 unsigned int oChannels = 0;
\r
265 oChannels = oParams->nChannels;
\r
266 if ( oParams->deviceId >= nDevices ) {
\r
267 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
268 error( RtAudioError::INVALID_USE );
\r
273 unsigned int iChannels = 0;
\r
275 iChannels = iParams->nChannels;
\r
276 if ( iParams->deviceId >= nDevices ) {
\r
277 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
278 error( RtAudioError::INVALID_USE );
\r
286 if ( oChannels > 0 ) {
\r
288 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
289 sampleRate, format, bufferFrames, options );
\r
290 if ( result == false ) {
\r
291 error( RtAudioError::SYSTEM_ERROR );
\r
296 if ( iChannels > 0 ) {
\r
298 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
299 sampleRate, format, bufferFrames, options );
\r
300 if ( result == false ) {
\r
301 if ( oChannels > 0 ) closeStream();
\r
302 error( RtAudioError::SYSTEM_ERROR );
\r
307 stream_.callbackInfo.callback = (void *) callback;
\r
308 stream_.callbackInfo.userData = userData;
\r
309 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
311 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
312 stream_.state = STREAM_STOPPED;
\r
315 unsigned int RtApi :: getDefaultInputDevice( void )
\r
317 // Should be implemented in subclasses if possible.
\r
321 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
323 // Should be implemented in subclasses if possible.
\r
327 void RtApi :: closeStream( void )
\r
329 // MUST be implemented in subclasses!
\r
333 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
334 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
335 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
336 RtAudio::StreamOptions * /*options*/ )
\r
338 // MUST be implemented in subclasses!
\r
342 void RtApi :: tickStreamTime( void )
\r
344 // Subclasses that do not provide their own implementation of
\r
345 // getStreamTime should call this function once per buffer I/O to
\r
346 // provide basic stream time support.
\r
348 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
350 #if defined( HAVE_GETTIMEOFDAY )
\r
351 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
355 long RtApi :: getStreamLatency( void )
\r
359 long totalLatency = 0;
\r
360 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
361 totalLatency = stream_.latency[0];
\r
362 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
363 totalLatency += stream_.latency[1];
\r
365 return totalLatency;
\r
368 double RtApi :: getStreamTime( void )
\r
372 #if defined( HAVE_GETTIMEOFDAY )
\r
373 // Return a very accurate estimate of the stream time by
\r
374 // adding in the elapsed time since the last tick.
\r
375 struct timeval then;
\r
376 struct timeval now;
\r
378 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
379 return stream_.streamTime;
\r
381 gettimeofday( &now, NULL );
\r
382 then = stream_.lastTickTimestamp;
\r
383 return stream_.streamTime +
\r
384 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
385 (then.tv_sec + 0.000001 * then.tv_usec));
\r
387 return stream_.streamTime;
\r
391 unsigned int RtApi :: getStreamSampleRate( void )
\r
395 return stream_.sampleRate;
\r
399 // *************************************************** //
\r
401 // OS/API-specific methods.
\r
403 // *************************************************** //
\r
405 #if defined(__MACOSX_CORE__)
\r
407 // The OS X CoreAudio API is designed to use a separate callback
\r
408 // procedure for each of its audio devices. A single RtAudio duplex
\r
409 // stream using two different devices is supported here, though it
\r
410 // cannot be guaranteed to always behave correctly because we cannot
\r
411 // synchronize these two callbacks.
\r
413 // A property listener is installed for over/underrun information.
\r
414 // However, no functionality is currently provided to allow property
\r
415 // listeners to trigger user handlers because it is unclear what could
\r
416 // be done if a critical stream parameter (buffer size, sample rate,
\r
417 // device disconnect) notification arrived. The listeners entail
\r
418 // quite a bit of extra code and most likely, a user program wouldn't
\r
419 // be prepared for the result anyway. However, we do provide a flag
\r
420 // to the client callback function to inform of an over/underrun.
\r
422 // A structure to hold various information related to the CoreAudio API
\r
424 struct CoreHandle {
\r
425 AudioDeviceID id[2]; // device ids
\r
426 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
427 AudioDeviceIOProcID procId[2];
\r
429 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
430 UInt32 nStreams[2]; // number of streams to use
\r
432 char *deviceBuffer;
\r
433 pthread_cond_t condition;
\r
434 int drainCounter; // Tracks callback counts when draining
\r
435 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
438 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
441 RtApiCore:: RtApiCore()
\r
443 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
444 // This is a largely undocumented but absolutely necessary
\r
445 // requirement starting with OS-X 10.6. If not called, queries and
\r
446 // updates to various audio device properties are not handled
\r
448 CFRunLoopRef theRunLoop = NULL;
\r
449 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
450 kAudioObjectPropertyScopeGlobal,
\r
451 kAudioObjectPropertyElementMaster };
\r
452 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
453 if ( result != noErr ) {
\r
454 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
455 error( RtAudioError::WARNING );
\r
460 RtApiCore :: ~RtApiCore()
\r
462 // The subclass destructor gets called before the base class
\r
463 // destructor, so close an existing stream before deallocating
\r
464 // apiDeviceId memory.
\r
465 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
468 unsigned int RtApiCore :: getDeviceCount( void )
\r
470 // Find out how many audio devices there are, if any.
\r
472 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
473 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
474 if ( result != noErr ) {
\r
475 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
476 error( RtAudioError::WARNING );
\r
480 return dataSize / sizeof( AudioDeviceID );
\r
483 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
485 unsigned int nDevices = getDeviceCount();
\r
486 if ( nDevices <= 1 ) return 0;
\r
489 UInt32 dataSize = sizeof( AudioDeviceID );
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
491 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
492 if ( result != noErr ) {
\r
493 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
494 error( RtAudioError::WARNING );
\r
498 dataSize *= nDevices;
\r
499 AudioDeviceID deviceList[ nDevices ];
\r
500 property.mSelector = kAudioHardwarePropertyDevices;
\r
501 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
502 if ( result != noErr ) {
\r
503 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
504 error( RtAudioError::WARNING );
\r
508 for ( unsigned int i=0; i<nDevices; i++ )
\r
509 if ( id == deviceList[i] ) return i;
\r
511 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
512 error( RtAudioError::WARNING );
\r
516 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
518 unsigned int nDevices = getDeviceCount();
\r
519 if ( nDevices <= 1 ) return 0;
\r
522 UInt32 dataSize = sizeof( AudioDeviceID );
\r
523 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
524 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
525 if ( result != noErr ) {
\r
526 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
527 error( RtAudioError::WARNING );
\r
531 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
532 AudioDeviceID deviceList[ nDevices ];
\r
533 property.mSelector = kAudioHardwarePropertyDevices;
\r
534 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
535 if ( result != noErr ) {
\r
536 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
537 error( RtAudioError::WARNING );
\r
541 for ( unsigned int i=0; i<nDevices; i++ )
\r
542 if ( id == deviceList[i] ) return i;
\r
544 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
545 error( RtAudioError::WARNING );
\r
549 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
551 RtAudio::DeviceInfo info;
\r
552 info.probed = false;
\r
555 unsigned int nDevices = getDeviceCount();
\r
556 if ( nDevices == 0 ) {
\r
557 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
558 error( RtAudioError::INVALID_USE );
\r
562 if ( device >= nDevices ) {
\r
563 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
564 error( RtAudioError::INVALID_USE );
\r
568 AudioDeviceID deviceList[ nDevices ];
\r
569 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
570 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
571 kAudioObjectPropertyScopeGlobal,
\r
572 kAudioObjectPropertyElementMaster };
\r
573 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
574 0, NULL, &dataSize, (void *) &deviceList );
\r
575 if ( result != noErr ) {
\r
576 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
577 error( RtAudioError::WARNING );
\r
581 AudioDeviceID id = deviceList[ device ];
\r
583 // Get the device name.
\r
585 CFStringRef cfname;
\r
586 dataSize = sizeof( CFStringRef );
\r
587 property.mSelector = kAudioObjectPropertyManufacturer;
\r
588 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
589 if ( result != noErr ) {
\r
590 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
591 errorText_ = errorStream_.str();
\r
592 error( RtAudioError::WARNING );
\r
596 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
597 int length = CFStringGetLength(cfname);
\r
598 char *mname = (char *)malloc(length * 3 + 1);
\r
599 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
600 info.name.append( (const char *)mname, strlen(mname) );
\r
601 info.name.append( ": " );
\r
602 CFRelease( cfname );
\r
605 property.mSelector = kAudioObjectPropertyName;
\r
606 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
607 if ( result != noErr ) {
\r
608 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
609 errorText_ = errorStream_.str();
\r
610 error( RtAudioError::WARNING );
\r
614 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
615 length = CFStringGetLength(cfname);
\r
616 char *name = (char *)malloc(length * 3 + 1);
\r
617 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
618 info.name.append( (const char *)name, strlen(name) );
\r
619 CFRelease( cfname );
\r
622 // Get the output stream "configuration".
\r
623 AudioBufferList *bufferList = nil;
\r
624 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
625 property.mScope = kAudioDevicePropertyScopeOutput;
\r
626 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
628 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
629 if ( result != noErr || dataSize == 0 ) {
\r
630 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
631 errorText_ = errorStream_.str();
\r
632 error( RtAudioError::WARNING );
\r
636 // Allocate the AudioBufferList.
\r
637 bufferList = (AudioBufferList *) malloc( dataSize );
\r
638 if ( bufferList == NULL ) {
\r
639 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
640 error( RtAudioError::WARNING );
\r
644 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
645 if ( result != noErr || dataSize == 0 ) {
\r
646 free( bufferList );
\r
647 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
648 errorText_ = errorStream_.str();
\r
649 error( RtAudioError::WARNING );
\r
653 // Get output channel information.
\r
654 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
655 for ( i=0; i<nStreams; i++ )
\r
656 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
657 free( bufferList );
\r
659 // Get the input stream "configuration".
\r
660 property.mScope = kAudioDevicePropertyScopeInput;
\r
661 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
662 if ( result != noErr || dataSize == 0 ) {
\r
663 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
664 errorText_ = errorStream_.str();
\r
665 error( RtAudioError::WARNING );
\r
669 // Allocate the AudioBufferList.
\r
670 bufferList = (AudioBufferList *) malloc( dataSize );
\r
671 if ( bufferList == NULL ) {
\r
672 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
673 error( RtAudioError::WARNING );
\r
677 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
678 if (result != noErr || dataSize == 0) {
\r
679 free( bufferList );
\r
680 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
681 errorText_ = errorStream_.str();
\r
682 error( RtAudioError::WARNING );
\r
686 // Get input channel information.
\r
687 nStreams = bufferList->mNumberBuffers;
\r
688 for ( i=0; i<nStreams; i++ )
\r
689 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
690 free( bufferList );
\r
692 // If device opens for both playback and capture, we determine the channels.
\r
693 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
694 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
696 // Probe the device sample rates.
\r
697 bool isInput = false;
\r
698 if ( info.outputChannels == 0 ) isInput = true;
\r
700 // Determine the supported sample rates.
\r
701 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
702 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
703 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
704 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
705 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
706 errorText_ = errorStream_.str();
\r
707 error( RtAudioError::WARNING );
\r
711 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
712 AudioValueRange rangeList[ nRanges ];
\r
713 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
714 if ( result != kAudioHardwareNoError ) {
\r
715 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
716 errorText_ = errorStream_.str();
\r
717 error( RtAudioError::WARNING );
\r
721 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
722 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
723 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
724 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
727 info.sampleRates.clear();
\r
728 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
729 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
730 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
733 if ( info.sampleRates.size() == 0 ) {
\r
734 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
735 errorText_ = errorStream_.str();
\r
736 error( RtAudioError::WARNING );
\r
740 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
741 // Thus, any other "physical" formats supported by the device are of
\r
742 // no interest to the client.
\r
743 info.nativeFormats = RTAUDIO_FLOAT32;
\r
745 if ( info.outputChannels > 0 )
\r
746 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
747 if ( info.inputChannels > 0 )
\r
748 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
750 info.probed = true;
\r
754 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
755 const AudioTimeStamp* /*inNow*/,
\r
756 const AudioBufferList* inInputData,
\r
757 const AudioTimeStamp* /*inInputTime*/,
\r
758 AudioBufferList* outOutputData,
\r
759 const AudioTimeStamp* /*inOutputTime*/,
\r
760 void* infoPointer )
\r
762 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
764 RtApiCore *object = (RtApiCore *) info->object;
\r
765 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
766 return kAudioHardwareUnspecifiedError;
\r
768 return kAudioHardwareNoError;
\r
771 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
773 const AudioObjectPropertyAddress properties[],
\r
774 void* handlePointer )
\r
776 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
777 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
778 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
779 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
780 handle->xrun[1] = true;
\r
782 handle->xrun[0] = true;
\r
786 return kAudioHardwareNoError;
\r
789 static OSStatus rateListener( AudioObjectID inDevice,
\r
790 UInt32 /*nAddresses*/,
\r
791 const AudioObjectPropertyAddress /*properties*/[],
\r
792 void* ratePointer )
\r
795 Float64 *rate = (Float64 *) ratePointer;
\r
796 UInt32 dataSize = sizeof( Float64 );
\r
797 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
798 kAudioObjectPropertyScopeGlobal,
\r
799 kAudioObjectPropertyElementMaster };
\r
800 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
801 return kAudioHardwareNoError;
\r
804 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
805 unsigned int firstChannel, unsigned int sampleRate,
\r
806 RtAudioFormat format, unsigned int *bufferSize,
\r
807 RtAudio::StreamOptions *options )
\r
810 unsigned int nDevices = getDeviceCount();
\r
811 if ( nDevices == 0 ) {
\r
812 // This should not happen because a check is made before this function is called.
\r
813 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
817 if ( device >= nDevices ) {
\r
818 // This should not happen because a check is made before this function is called.
\r
819 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
823 AudioDeviceID deviceList[ nDevices ];
\r
824 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
825 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
826 kAudioObjectPropertyScopeGlobal,
\r
827 kAudioObjectPropertyElementMaster };
\r
828 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
829 0, NULL, &dataSize, (void *) &deviceList );
\r
830 if ( result != noErr ) {
\r
831 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
835 AudioDeviceID id = deviceList[ device ];
\r
837 // Setup for stream mode.
\r
838 bool isInput = false;
\r
839 if ( mode == INPUT ) {
\r
841 property.mScope = kAudioDevicePropertyScopeInput;
\r
844 property.mScope = kAudioDevicePropertyScopeOutput;
\r
846 // Get the stream "configuration".
\r
847 AudioBufferList *bufferList = nil;
\r
849 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
850 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
851 if ( result != noErr || dataSize == 0 ) {
\r
852 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
853 errorText_ = errorStream_.str();
\r
857 // Allocate the AudioBufferList.
\r
858 bufferList = (AudioBufferList *) malloc( dataSize );
\r
859 if ( bufferList == NULL ) {
\r
860 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
864 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
865 if (result != noErr || dataSize == 0) {
\r
866 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
867 errorText_ = errorStream_.str();
\r
871 // Search for one or more streams that contain the desired number of
\r
872 // channels. CoreAudio devices can have an arbitrary number of
\r
873 // streams and each stream can have an arbitrary number of channels.
\r
874 // For each stream, a single buffer of interleaved samples is
\r
875 // provided. RtAudio prefers the use of one stream of interleaved
\r
876 // data or multiple consecutive single-channel streams. However, we
\r
877 // now support multiple consecutive multi-channel streams of
\r
878 // interleaved data as well.
\r
879 UInt32 iStream, offsetCounter = firstChannel;
\r
880 UInt32 nStreams = bufferList->mNumberBuffers;
\r
881 bool monoMode = false;
\r
882 bool foundStream = false;
\r
884 // First check that the device supports the requested number of
\r
886 UInt32 deviceChannels = 0;
\r
887 for ( iStream=0; iStream<nStreams; iStream++ )
\r
888 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
890 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
891 free( bufferList );
\r
892 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
893 errorText_ = errorStream_.str();
\r
897 // Look for a single stream meeting our needs.
\r
898 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
899 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
900 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
901 if ( streamChannels >= channels + offsetCounter ) {
\r
902 firstStream = iStream;
\r
903 channelOffset = offsetCounter;
\r
904 foundStream = true;
\r
907 if ( streamChannels > offsetCounter ) break;
\r
908 offsetCounter -= streamChannels;
\r
911 // If we didn't find a single stream above, then we should be able
\r
912 // to meet the channel specification with multiple streams.
\r
913 if ( foundStream == false ) {
\r
915 offsetCounter = firstChannel;
\r
916 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
917 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
918 if ( streamChannels > offsetCounter ) break;
\r
919 offsetCounter -= streamChannels;
\r
922 firstStream = iStream;
\r
923 channelOffset = offsetCounter;
\r
924 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
926 if ( streamChannels > 1 ) monoMode = false;
\r
927 while ( channelCounter > 0 ) {
\r
928 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
929 if ( streamChannels > 1 ) monoMode = false;
\r
930 channelCounter -= streamChannels;
\r
935 free( bufferList );
\r
937 // Determine the buffer size.
\r
938 AudioValueRange bufferRange;
\r
939 dataSize = sizeof( AudioValueRange );
\r
940 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
941 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
943 if ( result != noErr ) {
\r
944 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
945 errorText_ = errorStream_.str();
\r
949 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
950 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
951 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
953 // Set the buffer size. For multiple streams, I'm assuming we only
\r
954 // need to make this setting for the master channel.
\r
955 UInt32 theSize = (UInt32) *bufferSize;
\r
956 dataSize = sizeof( UInt32 );
\r
957 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
958 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
960 if ( result != noErr ) {
\r
961 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
962 errorText_ = errorStream_.str();
\r
966 // If attempting to setup a duplex stream, the bufferSize parameter
\r
967 // MUST be the same in both directions!
\r
968 *bufferSize = theSize;
\r
969 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
970 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
971 errorText_ = errorStream_.str();
\r
975 stream_.bufferSize = *bufferSize;
\r
976 stream_.nBuffers = 1;
\r
978 // Try to set "hog" mode ... it's not clear to me this is working.
\r
979 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
981 dataSize = sizeof( hog_pid );
\r
982 property.mSelector = kAudioDevicePropertyHogMode;
\r
983 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
984 if ( result != noErr ) {
\r
985 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
986 errorText_ = errorStream_.str();
\r
990 if ( hog_pid != getpid() ) {
\r
991 hog_pid = getpid();
\r
992 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
993 if ( result != noErr ) {
\r
994 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
995 errorText_ = errorStream_.str();
\r
1001 // Check and if necessary, change the sample rate for the device.
\r
1002 Float64 nominalRate;
\r
1003 dataSize = sizeof( Float64 );
\r
1004 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1005 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1007 if ( result != noErr ) {
\r
1008 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1009 errorText_ = errorStream_.str();
\r
1013 // Only change the sample rate if off by more than 1 Hz.
\r
1014 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1016 // Set a property listener for the sample rate change
\r
1017 Float64 reportedRate = 0.0;
\r
1018 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1019 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 nominalRate = (Float64) sampleRate;
\r
1027 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1029 if ( result != noErr ) {
\r
1030 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1031 errorText_ = errorStream_.str();
\r
1035 // Now wait until the reported nominal rate is what we just set.
\r
1036 UInt32 microCounter = 0;
\r
1037 while ( reportedRate != nominalRate ) {
\r
1038 microCounter += 5000;
\r
1039 if ( microCounter > 5000000 ) break;
\r
1043 // Remove the property listener.
\r
1044 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1046 if ( microCounter > 5000000 ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1053 // Now set the stream format for all streams. Also, check the
\r
1054 // physical format of the device and change that if necessary.
\r
1055 AudioStreamBasicDescription description;
\r
1056 dataSize = sizeof( AudioStreamBasicDescription );
\r
1057 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1058 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1059 if ( result != noErr ) {
\r
1060 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1061 errorText_ = errorStream_.str();
\r
1065 // Set the sample rate and data format id. However, only make the
\r
1066 // change if the sample rate is not within 1.0 of the desired
\r
1067 // rate and the format is not linear pcm.
\r
1068 bool updateFormat = false;
\r
1069 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1070 description.mSampleRate = (Float64) sampleRate;
\r
1071 updateFormat = true;
\r
1074 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1075 description.mFormatID = kAudioFormatLinearPCM;
\r
1076 updateFormat = true;
\r
1079 if ( updateFormat ) {
\r
1080 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1081 if ( result != noErr ) {
\r
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1083 errorText_ = errorStream_.str();
\r
1088 // Now check the physical format.
\r
1089 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1090 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1091 if ( result != noErr ) {
\r
1092 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1093 errorText_ = errorStream_.str();
\r
1097 //std::cout << "Current physical stream format:" << std::endl;
\r
1098 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1099 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1100 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1101 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1103 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1104 description.mFormatID = kAudioFormatLinearPCM;
\r
1105 //description.mSampleRate = (Float64) sampleRate;
\r
1106 AudioStreamBasicDescription testDescription = description;
\r
1107 UInt32 formatFlags;
\r
1109 // We'll try higher bit rates first and then work our way down.
\r
1110 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1111 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1112 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1113 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1114 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1115 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1116 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1117 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1118 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1119 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1120 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1121 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1122 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1124 bool setPhysicalFormat = false;
\r
1125 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1126 testDescription = description;
\r
1127 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1128 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1129 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1130 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1132 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1133 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1134 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1135 if ( result == noErr ) {
\r
1136 setPhysicalFormat = true;
\r
1137 //std::cout << "Updated physical stream format:" << std::endl;
\r
1138 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1139 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1140 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1141 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1146 if ( !setPhysicalFormat ) {
\r
1147 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1148 errorText_ = errorStream_.str();
\r
1151 } // done setting virtual/physical formats.
\r
1153 // Get the stream / device latency.
\r
1155 dataSize = sizeof( UInt32 );
\r
1156 property.mSelector = kAudioDevicePropertyLatency;
\r
1157 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1158 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1159 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1161 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1162 errorText_ = errorStream_.str();
\r
1163 error( RtAudioError::WARNING );
\r
1167 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1168 // always be presented in native-endian format, so we should never
\r
1169 // need to byte swap.
\r
1170 stream_.doByteSwap[mode] = false;
\r
1172 // From the CoreAudio documentation, PCM data must be supplied as
\r
1174 stream_.userFormat = format;
\r
1175 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1177 if ( streamCount == 1 )
\r
1178 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1179 else // multiple streams
\r
1180 stream_.nDeviceChannels[mode] = channels;
\r
1181 stream_.nUserChannels[mode] = channels;
\r
1182 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1183 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1184 else stream_.userInterleaved = true;
\r
1185 stream_.deviceInterleaved[mode] = true;
\r
1186 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1188 // Set flags for buffer conversion.
\r
1189 stream_.doConvertBuffer[mode] = false;
\r
1190 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1191 stream_.doConvertBuffer[mode] = true;
\r
1192 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1193 stream_.doConvertBuffer[mode] = true;
\r
1194 if ( streamCount == 1 ) {
\r
1195 if ( stream_.nUserChannels[mode] > 1 &&
\r
1196 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1197 stream_.doConvertBuffer[mode] = true;
\r
1199 else if ( monoMode && stream_.userInterleaved )
\r
1200 stream_.doConvertBuffer[mode] = true;
\r
1202 // Allocate our CoreHandle structure for the stream.
\r
1203 CoreHandle *handle = 0;
\r
1204 if ( stream_.apiHandle == 0 ) {
\r
1206 handle = new CoreHandle;
\r
1208 catch ( std::bad_alloc& ) {
\r
1209 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1213 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1214 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1217 stream_.apiHandle = (void *) handle;
\r
1220 handle = (CoreHandle *) stream_.apiHandle;
\r
1221 handle->iStream[mode] = firstStream;
\r
1222 handle->nStreams[mode] = streamCount;
\r
1223 handle->id[mode] = id;
\r
1225 // Allocate necessary internal buffers.
\r
1226 unsigned long bufferBytes;
\r
1227 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1228 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1229 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1230 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1231 if ( stream_.userBuffer[mode] == NULL ) {
\r
1232 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1236 // If possible, we will make use of the CoreAudio stream buffers as
\r
1237 // "device buffers". However, we can't do this if using multiple
\r
1239 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1241 bool makeBuffer = true;
\r
1242 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1243 if ( mode == INPUT ) {
\r
1244 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1245 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1246 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1250 if ( makeBuffer ) {
\r
1251 bufferBytes *= *bufferSize;
\r
1252 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1253 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1254 if ( stream_.deviceBuffer == NULL ) {
\r
1255 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1261 stream_.sampleRate = sampleRate;
\r
1262 stream_.device[mode] = device;
\r
1263 stream_.state = STREAM_STOPPED;
\r
1264 stream_.callbackInfo.object = (void *) this;
\r
1266 // Setup the buffer conversion information structure.
\r
1267 if ( stream_.doConvertBuffer[mode] ) {
\r
1268 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1269 else setConvertInfo( mode, channelOffset );
\r
1272 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1273 // Only one callback procedure per device.
\r
1274 stream_.mode = DUPLEX;
\r
1276 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1277 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1279 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1280 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1282 if ( result != noErr ) {
\r
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1284 errorText_ = errorStream_.str();
\r
1287 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1288 stream_.mode = DUPLEX;
\r
1290 stream_.mode = mode;
\r
1293 // Setup the device property listener for over/underload.
\r
1294 property.mSelector = kAudioDeviceProcessorOverload;
\r
1295 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1301 pthread_cond_destroy( &handle->condition );
\r
1303 stream_.apiHandle = 0;
\r
1306 for ( int i=0; i<2; i++ ) {
\r
1307 if ( stream_.userBuffer[i] ) {
\r
1308 free( stream_.userBuffer[i] );
\r
1309 stream_.userBuffer[i] = 0;
\r
1313 if ( stream_.deviceBuffer ) {
\r
1314 free( stream_.deviceBuffer );
\r
1315 stream_.deviceBuffer = 0;
\r
1318 stream_.state = STREAM_CLOSED;
\r
1322 void RtApiCore :: closeStream( void )
\r
1324 if ( stream_.state == STREAM_CLOSED ) {
\r
1325 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1326 error( RtAudioError::WARNING );
\r
1330 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1331 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1332 if ( stream_.state == STREAM_RUNNING )
\r
1333 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1334 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1335 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1337 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1338 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1342 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1343 if ( stream_.state == STREAM_RUNNING )
\r
1344 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1345 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1346 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1348 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1349 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1353 for ( int i=0; i<2; i++ ) {
\r
1354 if ( stream_.userBuffer[i] ) {
\r
1355 free( stream_.userBuffer[i] );
\r
1356 stream_.userBuffer[i] = 0;
\r
1360 if ( stream_.deviceBuffer ) {
\r
1361 free( stream_.deviceBuffer );
\r
1362 stream_.deviceBuffer = 0;
\r
1365 // Destroy pthread condition variable.
\r
1366 pthread_cond_destroy( &handle->condition );
\r
1368 stream_.apiHandle = 0;
\r
1370 stream_.mode = UNINITIALIZED;
\r
1371 stream_.state = STREAM_CLOSED;
\r
1374 void RtApiCore :: startStream( void )
\r
1377 if ( stream_.state == STREAM_RUNNING ) {
\r
1378 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1379 error( RtAudioError::WARNING );
\r
1383 OSStatus result = noErr;
\r
1384 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1387 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1388 if ( result != noErr ) {
\r
1389 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1390 errorText_ = errorStream_.str();
\r
1395 if ( stream_.mode == INPUT ||
\r
1396 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1398 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1399 if ( result != noErr ) {
\r
1400 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1401 errorText_ = errorStream_.str();
\r
1406 handle->drainCounter = 0;
\r
1407 handle->internalDrain = false;
\r
1408 stream_.state = STREAM_RUNNING;
\r
1411 if ( result == noErr ) return;
\r
1412 error( RtAudioError::SYSTEM_ERROR );
\r
1415 void RtApiCore :: stopStream( void )
\r
1418 if ( stream_.state == STREAM_STOPPED ) {
\r
1419 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1420 error( RtAudioError::WARNING );
\r
1424 OSStatus result = noErr;
\r
1425 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1426 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1428 if ( handle->drainCounter == 0 ) {
\r
1429 handle->drainCounter = 2;
\r
1430 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1433 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1434 if ( result != noErr ) {
\r
1435 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1436 errorText_ = errorStream_.str();
\r
1441 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1443 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1444 if ( result != noErr ) {
\r
1445 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1446 errorText_ = errorStream_.str();
\r
1451 stream_.state = STREAM_STOPPED;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: abortStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1468 handle->drainCounter = 2;
\r
1473 // This function will be called by a spawned thread when the user
\r
1474 // callback function signals that the stream should be stopped or
\r
1475 // aborted. It is better to handle it this way because the
\r
1476 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1477 // function is called.
\r
1478 static void *coreStopStream( void *ptr )
\r
1480 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1481 RtApiCore *object = (RtApiCore *) info->object;
\r
1483 object->stopStream();
\r
1484 pthread_exit( NULL );
\r
1487 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1488 const AudioBufferList *inBufferList,
\r
1489 const AudioBufferList *outBufferList )
\r
1491 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1492 if ( stream_.state == STREAM_CLOSED ) {
\r
1493 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1494 error( RtAudioError::WARNING );
\r
1498 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1499 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1501 // Check if we were draining the stream and signal is finished.
\r
1502 if ( handle->drainCounter > 3 ) {
\r
1503 ThreadHandle threadId;
\r
1505 stream_.state = STREAM_STOPPING;
\r
1506 if ( handle->internalDrain == true )
\r
1507 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1508 else // external call to stopStream()
\r
1509 pthread_cond_signal( &handle->condition );
\r
1513 AudioDeviceID outputDevice = handle->id[0];
\r
1515 // Invoke user callback to get fresh output data UNLESS we are
\r
1516 // draining stream or duplex mode AND the input/output devices are
\r
1517 // different AND this function is called for the input device.
\r
1518 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1519 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1520 double streamTime = getStreamTime();
\r
1521 RtAudioStreamStatus status = 0;
\r
1522 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1523 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1524 handle->xrun[0] = false;
\r
1526 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1527 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1528 handle->xrun[1] = false;
\r
1531 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1532 stream_.bufferSize, streamTime, status, info->userData );
\r
1533 if ( cbReturnValue == 2 ) {
\r
1534 stream_.state = STREAM_STOPPING;
\r
1535 handle->drainCounter = 2;
\r
1539 else if ( cbReturnValue == 1 ) {
\r
1540 handle->drainCounter = 1;
\r
1541 handle->internalDrain = true;
\r
1545 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1547 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1549 if ( handle->nStreams[0] == 1 ) {
\r
1550 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1552 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1554 else { // fill multiple streams with zeros
\r
1555 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1556 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1558 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1562 else if ( handle->nStreams[0] == 1 ) {
\r
1563 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1564 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1565 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1567 else { // copy from user buffer
\r
1568 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1569 stream_.userBuffer[0],
\r
1570 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1573 else { // fill multiple streams
\r
1574 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1575 if ( stream_.doConvertBuffer[0] ) {
\r
1576 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1577 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1580 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1581 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1582 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1583 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1584 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1587 else { // fill multiple multi-channel streams with interleaved data
\r
1588 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1589 Float32 *out, *in;
\r
1591 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1592 UInt32 inChannels = stream_.nUserChannels[0];
\r
1593 if ( stream_.doConvertBuffer[0] ) {
\r
1594 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1595 inChannels = stream_.nDeviceChannels[0];
\r
1598 if ( inInterleaved ) inOffset = 1;
\r
1599 else inOffset = stream_.bufferSize;
\r
1601 channelsLeft = inChannels;
\r
1602 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1604 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1605 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1608 // Account for possible channel offset in first stream
\r
1609 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1610 streamChannels -= stream_.channelOffset[0];
\r
1611 outJump = stream_.channelOffset[0];
\r
1615 // Account for possible unfilled channels at end of the last stream
\r
1616 if ( streamChannels > channelsLeft ) {
\r
1617 outJump = streamChannels - channelsLeft;
\r
1618 streamChannels = channelsLeft;
\r
1621 // Determine input buffer offsets and skips
\r
1622 if ( inInterleaved ) {
\r
1623 inJump = inChannels;
\r
1624 in += inChannels - channelsLeft;
\r
1628 in += (inChannels - channelsLeft) * inOffset;
\r
1631 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1632 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1633 *out++ = in[j*inOffset];
\r
1638 channelsLeft -= streamChannels;
\r
1643 if ( handle->drainCounter ) {
\r
1644 handle->drainCounter++;
\r
1649 AudioDeviceID inputDevice;
\r
1650 inputDevice = handle->id[1];
\r
1651 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1653 if ( handle->nStreams[1] == 1 ) {
\r
1654 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1655 convertBuffer( stream_.userBuffer[1],
\r
1656 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1657 stream_.convertInfo[1] );
\r
1659 else { // copy to user buffer
\r
1660 memcpy( stream_.userBuffer[1],
\r
1661 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1662 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1665 else { // read from multiple streams
\r
1666 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1667 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1669 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1670 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1671 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1672 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1673 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1676 else { // read from multiple multi-channel streams
\r
1677 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1678 Float32 *out, *in;
\r
1680 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1681 UInt32 outChannels = stream_.nUserChannels[1];
\r
1682 if ( stream_.doConvertBuffer[1] ) {
\r
1683 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1684 outChannels = stream_.nDeviceChannels[1];
\r
1687 if ( outInterleaved ) outOffset = 1;
\r
1688 else outOffset = stream_.bufferSize;
\r
1690 channelsLeft = outChannels;
\r
1691 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1693 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1694 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1697 // Account for possible channel offset in first stream
\r
1698 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1699 streamChannels -= stream_.channelOffset[1];
\r
1700 inJump = stream_.channelOffset[1];
\r
1704 // Account for possible unread channels at end of the last stream
\r
1705 if ( streamChannels > channelsLeft ) {
\r
1706 inJump = streamChannels - channelsLeft;
\r
1707 streamChannels = channelsLeft;
\r
1710 // Determine output buffer offsets and skips
\r
1711 if ( outInterleaved ) {
\r
1712 outJump = outChannels;
\r
1713 out += outChannels - channelsLeft;
\r
1717 out += (outChannels - channelsLeft) * outOffset;
\r
1720 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1721 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1722 out[j*outOffset] = *in++;
\r
1727 channelsLeft -= streamChannels;
\r
1731 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1732 convertBuffer( stream_.userBuffer[1],
\r
1733 stream_.deviceBuffer,
\r
1734 stream_.convertInfo[1] );
\r
1740 //MUTEX_UNLOCK( &stream_.mutex );
\r
1742 RtApi::tickStreamTime();
\r
1746 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1750 case kAudioHardwareNotRunningError:
\r
1751 return "kAudioHardwareNotRunningError";
\r
1753 case kAudioHardwareUnspecifiedError:
\r
1754 return "kAudioHardwareUnspecifiedError";
\r
1756 case kAudioHardwareUnknownPropertyError:
\r
1757 return "kAudioHardwareUnknownPropertyError";
\r
1759 case kAudioHardwareBadPropertySizeError:
\r
1760 return "kAudioHardwareBadPropertySizeError";
\r
1762 case kAudioHardwareIllegalOperationError:
\r
1763 return "kAudioHardwareIllegalOperationError";
\r
1765 case kAudioHardwareBadObjectError:
\r
1766 return "kAudioHardwareBadObjectError";
\r
1768 case kAudioHardwareBadDeviceError:
\r
1769 return "kAudioHardwareBadDeviceError";
\r
1771 case kAudioHardwareBadStreamError:
\r
1772 return "kAudioHardwareBadStreamError";
\r
1774 case kAudioHardwareUnsupportedOperationError:
\r
1775 return "kAudioHardwareUnsupportedOperationError";
\r
1777 case kAudioDeviceUnsupportedFormatError:
\r
1778 return "kAudioDeviceUnsupportedFormatError";
\r
1780 case kAudioDevicePermissionsError:
\r
1781 return "kAudioDevicePermissionsError";
\r
1784 return "CoreAudio unknown error";
\r
1788 //******************** End of __MACOSX_CORE__ *********************//
\r
1791 #if defined(__UNIX_JACK__)
\r
1793 // JACK is a low-latency audio server, originally written for the
\r
1794 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1795 // connect a number of different applications to an audio device, as
\r
1796 // well as allowing them to share audio between themselves.
\r
1798 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1799 // have ports connected to the server. The JACK server is typically
\r
1800 // started in a terminal as follows:
\r
1802 // .jackd -d alsa -d hw:0
\r
1804 // or through an interface program such as qjackctl. Many of the
\r
1805 // parameters normally set for a stream are fixed by the JACK server
\r
1806 // and can be specified when the JACK server is started. In
\r
1809 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1811 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1812 // frames, and number of buffers = 4. Once the server is running, it
\r
1813 // is not possible to override these values. If the values are not
\r
1814 // specified in the command-line, the JACK server uses default values.
\r
1816 // The JACK server does not have to be running when an instance of
\r
1817 // RtApiJack is created, though the function getDeviceCount() will
\r
1818 // report 0 devices found until JACK has been started. When no
\r
1819 // devices are available (i.e., the JACK server is not running), a
\r
1820 // stream cannot be opened.
\r
1822 #include <jack/jack.h>
\r
1823 #include <unistd.h>
\r
1826 // A structure to hold various information related to the Jack API
\r
1827 // implementation.
\r
1828 struct JackHandle {
\r
1829 jack_client_t *client;
\r
1830 jack_port_t **ports[2];
\r
1831 std::string deviceName[2];
\r
1833 pthread_cond_t condition;
\r
1834 int drainCounter; // Tracks callback counts when draining
\r
1835 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1838 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1841 static void jackSilentError( const char * ) {};
\r
1843 RtApiJack :: RtApiJack()
\r
1845 // Nothing to do here.
\r
1846 #if !defined(__RTAUDIO_DEBUG__)
\r
1847 // Turn off Jack's internal error reporting.
\r
1848 jack_set_error_function( &jackSilentError );
\r
1852 RtApiJack :: ~RtApiJack()
\r
1854 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1857 unsigned int RtApiJack :: getDeviceCount( void )
\r
1859 // See if we can become a jack client.
\r
1860 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1861 jack_status_t *status = NULL;
\r
1862 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1863 if ( client == 0 ) return 0;
\r
1865 const char **ports;
\r
1866 std::string port, previousPort;
\r
1867 unsigned int nChannels = 0, nDevices = 0;
\r
1868 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1870 // Parse the port names up to the first colon (:).
\r
1871 size_t iColon = 0;
\r
1873 port = (char *) ports[ nChannels ];
\r
1874 iColon = port.find(":");
\r
1875 if ( iColon != std::string::npos ) {
\r
1876 port = port.substr( 0, iColon + 1 );
\r
1877 if ( port != previousPort ) {
\r
1879 previousPort = port;
\r
1882 } while ( ports[++nChannels] );
\r
1886 jack_client_close( client );
\r
1890 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1892 RtAudio::DeviceInfo info;
\r
1893 info.probed = false;
\r
1895 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1896 jack_status_t *status = NULL;
\r
1897 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1898 if ( client == 0 ) {
\r
1899 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1900 error( RtAudioError::WARNING );
\r
1904 const char **ports;
\r
1905 std::string port, previousPort;
\r
1906 unsigned int nPorts = 0, nDevices = 0;
\r
1907 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1909 // Parse the port names up to the first colon (:).
\r
1910 size_t iColon = 0;
\r
1912 port = (char *) ports[ nPorts ];
\r
1913 iColon = port.find(":");
\r
1914 if ( iColon != std::string::npos ) {
\r
1915 port = port.substr( 0, iColon );
\r
1916 if ( port != previousPort ) {
\r
1917 if ( nDevices == device ) info.name = port;
\r
1919 previousPort = port;
\r
1922 } while ( ports[++nPorts] );
\r
1926 if ( device >= nDevices ) {
\r
1927 jack_client_close( client );
\r
1928 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1929 error( RtAudioError::INVALID_USE );
\r
1933 // Get the current jack server sample rate.
\r
1934 info.sampleRates.clear();
\r
1935 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1937 // Count the available ports containing the client name as device
\r
1938 // channels. Jack "input ports" equal RtAudio output channels.
\r
1939 unsigned int nChannels = 0;
\r
1940 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1942 while ( ports[ nChannels ] ) nChannels++;
\r
1944 info.outputChannels = nChannels;
\r
1947 // Jack "output ports" equal RtAudio input channels.
\r
1949 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1951 while ( ports[ nChannels ] ) nChannels++;
\r
1953 info.inputChannels = nChannels;
\r
1956 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1957 jack_client_close(client);
\r
1958 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1959 error( RtAudioError::WARNING );
\r
1963 // If device opens for both playback and capture, we determine the channels.
\r
1964 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1965 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1967 // Jack always uses 32-bit floats.
\r
1968 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1970 // Jack doesn't provide default devices so we'll use the first available one.
\r
1971 if ( device == 0 && info.outputChannels > 0 )
\r
1972 info.isDefaultOutput = true;
\r
1973 if ( device == 0 && info.inputChannels > 0 )
\r
1974 info.isDefaultInput = true;
\r
1976 jack_client_close(client);
\r
1977 info.probed = true;
\r
1981 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1983 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1985 RtApiJack *object = (RtApiJack *) info->object;
\r
1986 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1991 // This function will be called by a spawned thread when the Jack
\r
1992 // server signals that it is shutting down. It is necessary to handle
\r
1993 // it this way because the jackShutdown() function must return before
\r
1994 // the jack_deactivate() function (in closeStream()) will return.
\r
1995 static void *jackCloseStream( void *ptr )
\r
1997 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1998 RtApiJack *object = (RtApiJack *) info->object;
\r
2000 object->closeStream();
\r
2002 pthread_exit( NULL );
\r
2004 static void jackShutdown( void *infoPointer )
\r
2006 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2007 RtApiJack *object = (RtApiJack *) info->object;
\r
2009 // Check current stream state. If stopped, then we'll assume this
\r
2010 // was called as a result of a call to RtApiJack::stopStream (the
\r
2011 // deactivation of a client handle causes this function to be called).
\r
2012 // If not, we'll assume the Jack server is shutting down or some
\r
2013 // other problem occurred and we should close the stream.
\r
2014 if ( object->isStreamRunning() == false ) return;
\r
2016 ThreadHandle threadId;
\r
2017 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2018 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2021 static int jackXrun( void *infoPointer )
\r
2023 JackHandle *handle = (JackHandle *) infoPointer;
\r
2025 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2026 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2031 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2032 unsigned int firstChannel, unsigned int sampleRate,
\r
2033 RtAudioFormat format, unsigned int *bufferSize,
\r
2034 RtAudio::StreamOptions *options )
\r
2036 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2038 // Look for jack server and try to become a client (only do once per stream).
\r
2039 jack_client_t *client = 0;
\r
2040 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2041 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2042 jack_status_t *status = NULL;
\r
2043 if ( options && !options->streamName.empty() )
\r
2044 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2046 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2047 if ( client == 0 ) {
\r
2048 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2049 error( RtAudioError::WARNING );
\r
2054 // The handle must have been created on an earlier pass.
\r
2055 client = handle->client;
\r
2058 const char **ports;
\r
2059 std::string port, previousPort, deviceName;
\r
2060 unsigned int nPorts = 0, nDevices = 0;
\r
2061 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2063 // Parse the port names up to the first colon (:).
\r
2064 size_t iColon = 0;
\r
2066 port = (char *) ports[ nPorts ];
\r
2067 iColon = port.find(":");
\r
2068 if ( iColon != std::string::npos ) {
\r
2069 port = port.substr( 0, iColon );
\r
2070 if ( port != previousPort ) {
\r
2071 if ( nDevices == device ) deviceName = port;
\r
2073 previousPort = port;
\r
2076 } while ( ports[++nPorts] );
\r
2080 if ( device >= nDevices ) {
\r
2081 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2085 // Count the available ports containing the client name as device
\r
2086 // channels. Jack "input ports" equal RtAudio output channels.
\r
2087 unsigned int nChannels = 0;
\r
2088 unsigned long flag = JackPortIsInput;
\r
2089 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2090 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2092 while ( ports[ nChannels ] ) nChannels++;
\r
2096 // Compare the jack ports for specified client to the requested number of channels.
\r
2097 if ( nChannels < (channels + firstChannel) ) {
\r
2098 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2099 errorText_ = errorStream_.str();
\r
2103 // Check the jack server sample rate.
\r
2104 unsigned int jackRate = jack_get_sample_rate( client );
\r
2105 if ( sampleRate != jackRate ) {
\r
2106 jack_client_close( client );
\r
2107 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2108 errorText_ = errorStream_.str();
\r
2111 stream_.sampleRate = jackRate;
\r
2113 // Get the latency of the JACK port.
\r
2114 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2115 if ( ports[ firstChannel ] ) {
\r
2116 // Added by Ge Wang
\r
2117 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2118 // the range (usually the min and max are equal)
\r
2119 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2120 // get the latency range
\r
2121 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2122 // be optimistic, use the min!
\r
2123 stream_.latency[mode] = latrange.min;
\r
2124 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2128 // The jack server always uses 32-bit floating-point data.
\r
2129 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2130 stream_.userFormat = format;
\r
2132 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2133 else stream_.userInterleaved = true;
\r
2135 // Jack always uses non-interleaved buffers.
\r
2136 stream_.deviceInterleaved[mode] = false;
\r
2138 // Jack always provides host byte-ordered data.
\r
2139 stream_.doByteSwap[mode] = false;
\r
2141 // Get the buffer size. The buffer size and number of buffers
\r
2142 // (periods) is set when the jack server is started.
\r
2143 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2144 *bufferSize = stream_.bufferSize;
\r
2146 stream_.nDeviceChannels[mode] = channels;
\r
2147 stream_.nUserChannels[mode] = channels;
\r
2149 // Set flags for buffer conversion.
\r
2150 stream_.doConvertBuffer[mode] = false;
\r
2151 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2152 stream_.doConvertBuffer[mode] = true;
\r
2153 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2154 stream_.nUserChannels[mode] > 1 )
\r
2155 stream_.doConvertBuffer[mode] = true;
\r
2157 // Allocate our JackHandle structure for the stream.
\r
2158 if ( handle == 0 ) {
\r
2160 handle = new JackHandle;
\r
2162 catch ( std::bad_alloc& ) {
\r
2163 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2167 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2168 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2171 stream_.apiHandle = (void *) handle;
\r
2172 handle->client = client;
\r
2174 handle->deviceName[mode] = deviceName;
\r
2176 // Allocate necessary internal buffers.
\r
2177 unsigned long bufferBytes;
\r
2178 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2179 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2180 if ( stream_.userBuffer[mode] == NULL ) {
\r
2181 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2185 if ( stream_.doConvertBuffer[mode] ) {
\r
2187 bool makeBuffer = true;
\r
2188 if ( mode == OUTPUT )
\r
2189 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2190 else { // mode == INPUT
\r
2191 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2192 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2193 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2194 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2198 if ( makeBuffer ) {
\r
2199 bufferBytes *= *bufferSize;
\r
2200 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2201 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2202 if ( stream_.deviceBuffer == NULL ) {
\r
2203 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2209 // Allocate memory for the Jack ports (channels) identifiers.
\r
2210 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2211 if ( handle->ports[mode] == NULL ) {
\r
2212 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2216 stream_.device[mode] = device;
\r
2217 stream_.channelOffset[mode] = firstChannel;
\r
2218 stream_.state = STREAM_STOPPED;
\r
2219 stream_.callbackInfo.object = (void *) this;
\r
2221 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2222 // We had already set up the stream for output.
\r
2223 stream_.mode = DUPLEX;
\r
2225 stream_.mode = mode;
\r
2226 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2227 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2228 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2231 // Register our ports.
\r
2233 if ( mode == OUTPUT ) {
\r
2234 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2235 snprintf( label, 64, "outport %d", i );
\r
2236 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2237 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2241 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2242 snprintf( label, 64, "inport %d", i );
\r
2243 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2244 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2248 // Setup the buffer conversion information structure. We don't use
\r
2249 // buffers to do channel offsets, so we override that parameter
\r
2251 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2257 pthread_cond_destroy( &handle->condition );
\r
2258 jack_client_close( handle->client );
\r
2260 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2261 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2264 stream_.apiHandle = 0;
\r
2267 for ( int i=0; i<2; i++ ) {
\r
2268 if ( stream_.userBuffer[i] ) {
\r
2269 free( stream_.userBuffer[i] );
\r
2270 stream_.userBuffer[i] = 0;
\r
2274 if ( stream_.deviceBuffer ) {
\r
2275 free( stream_.deviceBuffer );
\r
2276 stream_.deviceBuffer = 0;
\r
2282 void RtApiJack :: closeStream( void )
\r
2284 if ( stream_.state == STREAM_CLOSED ) {
\r
2285 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2286 error( RtAudioError::WARNING );
\r
2290 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2293 if ( stream_.state == STREAM_RUNNING )
\r
2294 jack_deactivate( handle->client );
\r
2296 jack_client_close( handle->client );
\r
2300 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2301 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2302 pthread_cond_destroy( &handle->condition );
\r
2304 stream_.apiHandle = 0;
\r
2307 for ( int i=0; i<2; i++ ) {
\r
2308 if ( stream_.userBuffer[i] ) {
\r
2309 free( stream_.userBuffer[i] );
\r
2310 stream_.userBuffer[i] = 0;
\r
2314 if ( stream_.deviceBuffer ) {
\r
2315 free( stream_.deviceBuffer );
\r
2316 stream_.deviceBuffer = 0;
\r
2319 stream_.mode = UNINITIALIZED;
\r
2320 stream_.state = STREAM_CLOSED;
\r
2323 void RtApiJack :: startStream( void )
\r
2326 if ( stream_.state == STREAM_RUNNING ) {
\r
2327 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2328 error( RtAudioError::WARNING );
\r
2332 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2333 int result = jack_activate( handle->client );
\r
2335 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2339 const char **ports;
\r
2341 // Get the list of available ports.
\r
2342 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2344 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2345 if ( ports == NULL) {
\r
2346 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2350 // Now make the port connections. Since RtAudio wasn't designed to
\r
2351 // allow the user to select particular channels of a device, we'll
\r
2352 // just open the first "nChannels" ports with offset.
\r
2353 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2355 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2356 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2359 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2366 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2368 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2369 if ( ports == NULL) {
\r
2370 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2374 // Now make the port connections. See note above.
\r
2375 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2377 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2378 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2381 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2388 handle->drainCounter = 0;
\r
2389 handle->internalDrain = false;
\r
2390 stream_.state = STREAM_RUNNING;
\r
2393 if ( result == 0 ) return;
\r
2394 error( RtAudioError::SYSTEM_ERROR );
\r
2397 void RtApiJack :: stopStream( void )
\r
2400 if ( stream_.state == STREAM_STOPPED ) {
\r
2401 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2402 error( RtAudioError::WARNING );
\r
2406 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2407 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2409 if ( handle->drainCounter == 0 ) {
\r
2410 handle->drainCounter = 2;
\r
2411 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2415 jack_deactivate( handle->client );
\r
2416 stream_.state = STREAM_STOPPED;
\r
2419 void RtApiJack :: abortStream( void )
\r
2422 if ( stream_.state == STREAM_STOPPED ) {
\r
2423 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2424 error( RtAudioError::WARNING );
\r
2428 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2429 handle->drainCounter = 2;
\r
2434 // This function will be called by a spawned thread when the user
\r
2435 // callback function signals that the stream should be stopped or
\r
2436 // aborted. It is necessary to handle it this way because the
\r
2437 // callbackEvent() function must return before the jack_deactivate()
\r
2438 // function will return.
\r
2439 static void *jackStopStream( void *ptr )
\r
2441 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2442 RtApiJack *object = (RtApiJack *) info->object;
\r
2444 object->stopStream();
\r
2445 pthread_exit( NULL );
\r
2448 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2450 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2451 if ( stream_.state == STREAM_CLOSED ) {
\r
2452 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2453 error( RtAudioError::WARNING );
\r
2456 if ( stream_.bufferSize != nframes ) {
\r
2457 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2458 error( RtAudioError::WARNING );
\r
2462 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2463 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2465 // Check if we were draining the stream and signal is finished.
\r
2466 if ( handle->drainCounter > 3 ) {
\r
2467 ThreadHandle threadId;
\r
2469 stream_.state = STREAM_STOPPING;
\r
2470 if ( handle->internalDrain == true )
\r
2471 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2473 pthread_cond_signal( &handle->condition );
\r
2477 // Invoke user callback first, to get fresh output data.
\r
2478 if ( handle->drainCounter == 0 ) {
\r
2479 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2480 double streamTime = getStreamTime();
\r
2481 RtAudioStreamStatus status = 0;
\r
2482 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2483 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2484 handle->xrun[0] = false;
\r
2486 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2487 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2488 handle->xrun[1] = false;
\r
2490 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2491 stream_.bufferSize, streamTime, status, info->userData );
\r
2492 if ( cbReturnValue == 2 ) {
\r
2493 stream_.state = STREAM_STOPPING;
\r
2494 handle->drainCounter = 2;
\r
2496 pthread_create( &id, NULL, jackStopStream, info );
\r
2499 else if ( cbReturnValue == 1 ) {
\r
2500 handle->drainCounter = 1;
\r
2501 handle->internalDrain = true;
\r
2505 jack_default_audio_sample_t *jackbuffer;
\r
2506 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2507 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2509 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2511 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2512 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2513 memset( jackbuffer, 0, bufferBytes );
\r
2517 else if ( stream_.doConvertBuffer[0] ) {
\r
2519 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2521 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2522 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2523 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2526 else { // no buffer conversion
\r
2527 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2528 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2529 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2533 if ( handle->drainCounter ) {
\r
2534 handle->drainCounter++;
\r
2539 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2541 if ( stream_.doConvertBuffer[1] ) {
\r
2542 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2543 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2544 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2546 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2548 else { // no buffer conversion
\r
2549 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2550 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2551 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2557 RtApi::tickStreamTime();
\r
2560 //******************** End of __UNIX_JACK__ *********************//
\r
2563 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2565 // The ASIO API is designed around a callback scheme, so this
\r
2566 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2567 // Jack. The primary constraint with ASIO is that it only allows
\r
2568 // access to a single driver at a time. Thus, it is not possible to
\r
2569 // have more than one simultaneous RtAudio stream.
\r
2571 // This implementation also requires a number of external ASIO files
\r
2572 // and a few global variables. The ASIO callback scheme does not
\r
2573 // allow for the passing of user data, so we must create a global
\r
2574 // pointer to our callbackInfo structure.
\r
2576 // On unix systems, we make use of a pthread condition variable.
\r
2577 // Since there is no equivalent in Windows, I hacked something based
\r
2578 // on information found in
\r
2579 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2581 #include "asiosys.h"
\r
2583 #include "iasiothiscallresolver.h"
\r
2584 #include "asiodrivers.h"
\r
2587 static AsioDrivers drivers;
\r
2588 static ASIOCallbacks asioCallbacks;
\r
2589 static ASIODriverInfo driverInfo;
\r
2590 static CallbackInfo *asioCallbackInfo;
\r
2591 static bool asioXRun;
\r
2593 struct AsioHandle {
\r
2594 int drainCounter; // Tracks callback counts when draining
\r
2595 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2596 ASIOBufferInfo *bufferInfos;
\r
2600 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2603 // Function declarations (definitions at end of section)
\r
2604 static const char* getAsioErrorString( ASIOError result );
\r
2605 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2606 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2608 RtApiAsio :: RtApiAsio()
\r
2610 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2611 // CoInitialize beforehand, but it must be for appartment threading
\r
2612 // (in which case, CoInitilialize will return S_FALSE here).
\r
2613 coInitialized_ = false;
\r
2614 HRESULT hr = CoInitialize( NULL );
\r
2615 if ( FAILED(hr) ) {
\r
2616 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2617 error( RtAudioError::WARNING );
\r
2619 coInitialized_ = true;
\r
2621 drivers.removeCurrentDriver();
\r
2622 driverInfo.asioVersion = 2;
\r
2624 // See note in DirectSound implementation about GetDesktopWindow().
\r
2625 driverInfo.sysRef = GetForegroundWindow();
\r
2628 RtApiAsio :: ~RtApiAsio()
\r
2630 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2631 if ( coInitialized_ ) CoUninitialize();
\r
2634 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2636 return (unsigned int) drivers.asioGetNumDev();
\r
2639 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2641 RtAudio::DeviceInfo info;
\r
2642 info.probed = false;
\r
2645 unsigned int nDevices = getDeviceCount();
\r
2646 if ( nDevices == 0 ) {
\r
2647 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2648 error( RtAudioError::INVALID_USE );
\r
2652 if ( device >= nDevices ) {
\r
2653 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2654 error( RtAudioError::INVALID_USE );
\r
2658 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2659 if ( stream_.state != STREAM_CLOSED ) {
\r
2660 if ( device >= devices_.size() ) {
\r
2661 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2662 error( RtAudioError::WARNING );
\r
2665 return devices_[ device ];
\r
2668 char driverName[32];
\r
2669 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2670 if ( result != ASE_OK ) {
\r
2671 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2672 errorText_ = errorStream_.str();
\r
2673 error( RtAudioError::WARNING );
\r
2677 info.name = driverName;
\r
2679 if ( !drivers.loadDriver( driverName ) ) {
\r
2680 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2681 errorText_ = errorStream_.str();
\r
2682 error( RtAudioError::WARNING );
\r
2686 result = ASIOInit( &driverInfo );
\r
2687 if ( result != ASE_OK ) {
\r
2688 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2689 errorText_ = errorStream_.str();
\r
2690 error( RtAudioError::WARNING );
\r
2694 // Determine the device channel information.
\r
2695 long inputChannels, outputChannels;
\r
2696 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2697 if ( result != ASE_OK ) {
\r
2698 drivers.removeCurrentDriver();
\r
2699 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2700 errorText_ = errorStream_.str();
\r
2701 error( RtAudioError::WARNING );
\r
2705 info.outputChannels = outputChannels;
\r
2706 info.inputChannels = inputChannels;
\r
2707 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2708 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2710 // Determine the supported sample rates.
\r
2711 info.sampleRates.clear();
\r
2712 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2713 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2714 if ( result == ASE_OK )
\r
2715 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2718 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2719 ASIOChannelInfo channelInfo;
\r
2720 channelInfo.channel = 0;
\r
2721 channelInfo.isInput = true;
\r
2722 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2723 result = ASIOGetChannelInfo( &channelInfo );
\r
2724 if ( result != ASE_OK ) {
\r
2725 drivers.removeCurrentDriver();
\r
2726 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2727 errorText_ = errorStream_.str();
\r
2728 error( RtAudioError::WARNING );
\r
2732 info.nativeFormats = 0;
\r
2733 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2734 info.nativeFormats |= RTAUDIO_SINT16;
\r
2735 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2736 info.nativeFormats |= RTAUDIO_SINT32;
\r
2737 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2738 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2739 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2740 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2741 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2742 info.nativeFormats |= RTAUDIO_SINT24;
\r
2744 if ( info.outputChannels > 0 )
\r
2745 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2746 if ( info.inputChannels > 0 )
\r
2747 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2749 info.probed = true;
\r
2750 drivers.removeCurrentDriver();
\r
2754 static void bufferSwitch( long index, ASIOBool processNow )
\r
2756 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2757 object->callbackEvent( index );
\r
2760 void RtApiAsio :: saveDeviceInfo( void )
\r
2764 unsigned int nDevices = getDeviceCount();
\r
2765 devices_.resize( nDevices );
\r
2766 for ( unsigned int i=0; i<nDevices; i++ )
\r
2767 devices_[i] = getDeviceInfo( i );
\r
2770 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2771 unsigned int firstChannel, unsigned int sampleRate,
\r
2772 RtAudioFormat format, unsigned int *bufferSize,
\r
2773 RtAudio::StreamOptions *options )
\r
2775 // For ASIO, a duplex stream MUST use the same driver.
\r
2776 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2777 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2781 char driverName[32];
\r
2782 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2783 if ( result != ASE_OK ) {
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2789 // Only load the driver once for duplex stream.
\r
2790 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2791 // The getDeviceInfo() function will not work when a stream is open
\r
2792 // because ASIO does not allow multiple devices to run at the same
\r
2793 // time. Thus, we'll probe the system before opening a stream and
\r
2794 // save the results for use by getDeviceInfo().
\r
2795 this->saveDeviceInfo();
\r
2797 if ( !drivers.loadDriver( driverName ) ) {
\r
2798 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2799 errorText_ = errorStream_.str();
\r
2803 result = ASIOInit( &driverInfo );
\r
2804 if ( result != ASE_OK ) {
\r
2805 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2806 errorText_ = errorStream_.str();
\r
2811 // Check the device channel count.
\r
2812 long inputChannels, outputChannels;
\r
2813 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2814 if ( result != ASE_OK ) {
\r
2815 drivers.removeCurrentDriver();
\r
2816 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2817 errorText_ = errorStream_.str();
\r
2821 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2822 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2823 drivers.removeCurrentDriver();
\r
2824 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2825 errorText_ = errorStream_.str();
\r
2828 stream_.nDeviceChannels[mode] = channels;
\r
2829 stream_.nUserChannels[mode] = channels;
\r
2830 stream_.channelOffset[mode] = firstChannel;
\r
2832 // Verify the sample rate is supported.
\r
2833 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2834 if ( result != ASE_OK ) {
\r
2835 drivers.removeCurrentDriver();
\r
2836 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2837 errorText_ = errorStream_.str();
\r
2841 // Get the current sample rate
\r
2842 ASIOSampleRate currentRate;
\r
2843 result = ASIOGetSampleRate( ¤tRate );
\r
2844 if ( result != ASE_OK ) {
\r
2845 drivers.removeCurrentDriver();
\r
2846 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2847 errorText_ = errorStream_.str();
\r
2851 // Set the sample rate only if necessary
\r
2852 if ( currentRate != sampleRate ) {
\r
2853 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2854 if ( result != ASE_OK ) {
\r
2855 drivers.removeCurrentDriver();
\r
2856 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2857 errorText_ = errorStream_.str();
\r
2862 // Determine the driver data type.
\r
2863 ASIOChannelInfo channelInfo;
\r
2864 channelInfo.channel = 0;
\r
2865 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2866 else channelInfo.isInput = true;
\r
2867 result = ASIOGetChannelInfo( &channelInfo );
\r
2868 if ( result != ASE_OK ) {
\r
2869 drivers.removeCurrentDriver();
\r
2870 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2871 errorText_ = errorStream_.str();
\r
2875 // Assuming WINDOWS host is always little-endian.
\r
2876 stream_.doByteSwap[mode] = false;
\r
2877 stream_.userFormat = format;
\r
2878 stream_.deviceFormat[mode] = 0;
\r
2879 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2880 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2881 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2883 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2884 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2885 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2887 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2888 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2889 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2891 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2892 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2893 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2895 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2896 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2897 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2900 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2901 drivers.removeCurrentDriver();
\r
2902 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2903 errorText_ = errorStream_.str();
\r
2907 // Set the buffer size. For a duplex stream, this will end up
\r
2908 // setting the buffer size based on the input constraints, which
\r
2910 long minSize, maxSize, preferSize, granularity;
\r
2911 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2912 if ( result != ASE_OK ) {
\r
2913 drivers.removeCurrentDriver();
\r
2914 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2915 errorText_ = errorStream_.str();
\r
2919 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2920 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2921 else if ( granularity == -1 ) {
\r
2922 // Make sure bufferSize is a power of two.
\r
2923 int log2_of_min_size = 0;
\r
2924 int log2_of_max_size = 0;
\r
2926 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2927 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2928 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2931 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2932 int min_delta_num = log2_of_min_size;
\r
2934 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2935 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2936 if (current_delta < min_delta) {
\r
2937 min_delta = current_delta;
\r
2938 min_delta_num = i;
\r
2942 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2943 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2944 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2946 else if ( granularity != 0 ) {
\r
2947 // Set to an even multiple of granularity, rounding up.
\r
2948 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2951 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2952 drivers.removeCurrentDriver();
\r
2953 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2957 stream_.bufferSize = *bufferSize;
\r
2958 stream_.nBuffers = 2;
\r
2960 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2961 else stream_.userInterleaved = true;
\r
2963 // ASIO always uses non-interleaved buffers.
\r
2964 stream_.deviceInterleaved[mode] = false;
\r
2966 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2967 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2968 if ( handle == 0 ) {
\r
2970 handle = new AsioHandle;
\r
2972 catch ( std::bad_alloc& ) {
\r
2973 //if ( handle == NULL ) {
\r
2974 drivers.removeCurrentDriver();
\r
2975 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2978 handle->bufferInfos = 0;
\r
2980 // Create a manual-reset event.
\r
2981 handle->condition = CreateEvent( NULL, // no security
\r
2982 TRUE, // manual-reset
\r
2983 FALSE, // non-signaled initially
\r
2984 NULL ); // unnamed
\r
2985 stream_.apiHandle = (void *) handle;
\r
2988 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2989 // and output separately, we'll have to dispose of previously
\r
2990 // created output buffers for a duplex stream.
\r
2991 long inputLatency, outputLatency;
\r
2992 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2993 ASIODisposeBuffers();
\r
2994 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2997 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2998 bool buffersAllocated = false;
\r
2999 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3000 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3001 if ( handle->bufferInfos == NULL ) {
\r
3002 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3003 errorText_ = errorStream_.str();
\r
3007 ASIOBufferInfo *infos;
\r
3008 infos = handle->bufferInfos;
\r
3009 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3010 infos->isInput = ASIOFalse;
\r
3011 infos->channelNum = i + stream_.channelOffset[0];
\r
3012 infos->buffers[0] = infos->buffers[1] = 0;
\r
3014 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3015 infos->isInput = ASIOTrue;
\r
3016 infos->channelNum = i + stream_.channelOffset[1];
\r
3017 infos->buffers[0] = infos->buffers[1] = 0;
\r
3020 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3021 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3022 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3023 asioCallbacks.asioMessage = &asioMessages;
\r
3024 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3025 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3026 if ( result != ASE_OK ) {
\r
3027 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3028 errorText_ = errorStream_.str();
\r
3031 buffersAllocated = true;
\r
3033 // Set flags for buffer conversion.
\r
3034 stream_.doConvertBuffer[mode] = false;
\r
3035 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3036 stream_.doConvertBuffer[mode] = true;
\r
3037 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3038 stream_.nUserChannels[mode] > 1 )
\r
3039 stream_.doConvertBuffer[mode] = true;
\r
3041 // Allocate necessary internal buffers
\r
3042 unsigned long bufferBytes;
\r
3043 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3044 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3045 if ( stream_.userBuffer[mode] == NULL ) {
\r
3046 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3050 if ( stream_.doConvertBuffer[mode] ) {
\r
3052 bool makeBuffer = true;
\r
3053 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3054 if ( mode == INPUT ) {
\r
3055 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3056 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3057 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3061 if ( makeBuffer ) {
\r
3062 bufferBytes *= *bufferSize;
\r
3063 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3064 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3065 if ( stream_.deviceBuffer == NULL ) {
\r
3066 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3072 stream_.sampleRate = sampleRate;
\r
3073 stream_.device[mode] = device;
\r
3074 stream_.state = STREAM_STOPPED;
\r
3075 asioCallbackInfo = &stream_.callbackInfo;
\r
3076 stream_.callbackInfo.object = (void *) this;
\r
3077 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3078 // We had already set up an output stream.
\r
3079 stream_.mode = DUPLEX;
\r
3081 stream_.mode = mode;
\r
3083 // Determine device latencies
\r
3084 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3085 if ( result != ASE_OK ) {
\r
3086 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3087 errorText_ = errorStream_.str();
\r
3088 error( RtAudioError::WARNING); // warn but don't fail
\r
3091 stream_.latency[0] = outputLatency;
\r
3092 stream_.latency[1] = inputLatency;
\r
3095 // Setup the buffer conversion information structure. We don't use
\r
3096 // buffers to do channel offsets, so we override that parameter
\r
3098 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3103 if ( buffersAllocated )
\r
3104 ASIODisposeBuffers();
\r
3105 drivers.removeCurrentDriver();
\r
3108 CloseHandle( handle->condition );
\r
3109 if ( handle->bufferInfos )
\r
3110 free( handle->bufferInfos );
\r
3112 stream_.apiHandle = 0;
\r
3115 for ( int i=0; i<2; i++ ) {
\r
3116 if ( stream_.userBuffer[i] ) {
\r
3117 free( stream_.userBuffer[i] );
\r
3118 stream_.userBuffer[i] = 0;
\r
3122 if ( stream_.deviceBuffer ) {
\r
3123 free( stream_.deviceBuffer );
\r
3124 stream_.deviceBuffer = 0;
\r
3130 void RtApiAsio :: closeStream()
\r
3132 if ( stream_.state == STREAM_CLOSED ) {
\r
3133 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3134 error( RtAudioError::WARNING );
\r
3138 if ( stream_.state == STREAM_RUNNING ) {
\r
3139 stream_.state = STREAM_STOPPED;
\r
3142 ASIODisposeBuffers();
\r
3143 drivers.removeCurrentDriver();
\r
3145 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3147 CloseHandle( handle->condition );
\r
3148 if ( handle->bufferInfos )
\r
3149 free( handle->bufferInfos );
\r
3151 stream_.apiHandle = 0;
\r
3154 for ( int i=0; i<2; i++ ) {
\r
3155 if ( stream_.userBuffer[i] ) {
\r
3156 free( stream_.userBuffer[i] );
\r
3157 stream_.userBuffer[i] = 0;
\r
3161 if ( stream_.deviceBuffer ) {
\r
3162 free( stream_.deviceBuffer );
\r
3163 stream_.deviceBuffer = 0;
\r
3166 stream_.mode = UNINITIALIZED;
\r
3167 stream_.state = STREAM_CLOSED;
\r
3170 bool stopThreadCalled = false;
\r
3172 void RtApiAsio :: startStream()
\r
3175 if ( stream_.state == STREAM_RUNNING ) {
\r
3176 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3177 error( RtAudioError::WARNING );
\r
3181 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3182 ASIOError result = ASIOStart();
\r
3183 if ( result != ASE_OK ) {
\r
3184 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3185 errorText_ = errorStream_.str();
\r
3189 handle->drainCounter = 0;
\r
3190 handle->internalDrain = false;
\r
3191 ResetEvent( handle->condition );
\r
3192 stream_.state = STREAM_RUNNING;
\r
3196 stopThreadCalled = false;
\r
3198 if ( result == ASE_OK ) return;
\r
3199 error( RtAudioError::SYSTEM_ERROR );
\r
3202 void RtApiAsio :: stopStream()
\r
3205 if ( stream_.state == STREAM_STOPPED ) {
\r
3206 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3207 error( RtAudioError::WARNING );
\r
3211 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3212 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3213 if ( handle->drainCounter == 0 ) {
\r
3214 handle->drainCounter = 2;
\r
3215 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3219 stream_.state = STREAM_STOPPED;
\r
3221 ASIOError result = ASIOStop();
\r
3222 if ( result != ASE_OK ) {
\r
3223 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3224 errorText_ = errorStream_.str();
\r
3227 if ( result == ASE_OK ) return;
\r
3228 error( RtAudioError::SYSTEM_ERROR );
\r
3231 void RtApiAsio :: abortStream()
\r
3234 if ( stream_.state == STREAM_STOPPED ) {
\r
3235 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3236 error( RtAudioError::WARNING );
\r
3240 // The following lines were commented-out because some behavior was
\r
3241 // noted where the device buffers need to be zeroed to avoid
\r
3242 // continuing sound, even when the device buffers are completely
\r
3243 // disposed. So now, calling abort is the same as calling stop.
\r
3244 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3245 // handle->drainCounter = 2;
\r
3249 // This function will be called by a spawned thread when the user
\r
3250 // callback function signals that the stream should be stopped or
\r
3251 // aborted. It is necessary to handle it this way because the
\r
3252 // callbackEvent() function must return before the ASIOStop()
\r
3253 // function will return.
\r
3254 static unsigned __stdcall asioStopStream( void *ptr )
\r
3256 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3257 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3259 object->stopStream();
\r
3260 _endthreadex( 0 );
\r
3264 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3266 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3267 if ( stream_.state == STREAM_CLOSED ) {
\r
3268 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3269 error( RtAudioError::WARNING );
\r
3273 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3274 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3276 // Check if we were draining the stream and signal if finished.
\r
3277 if ( handle->drainCounter > 3 ) {
\r
3279 stream_.state = STREAM_STOPPING;
\r
3280 if ( handle->internalDrain == false )
\r
3281 SetEvent( handle->condition );
\r
3282 else { // spawn a thread to stop the stream
\r
3283 unsigned threadId;
\r
3284 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3285 &stream_.callbackInfo, 0, &threadId );
\r
3290 // Invoke user callback to get fresh output data UNLESS we are
\r
3291 // draining stream.
\r
3292 if ( handle->drainCounter == 0 ) {
\r
3293 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3294 double streamTime = getStreamTime();
\r
3295 RtAudioStreamStatus status = 0;
\r
3296 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3297 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3300 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3301 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3304 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3305 stream_.bufferSize, streamTime, status, info->userData );
\r
3306 if ( cbReturnValue == 2 ) {
\r
3307 stream_.state = STREAM_STOPPING;
\r
3308 handle->drainCounter = 2;
\r
3309 unsigned threadId;
\r
3310 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3311 &stream_.callbackInfo, 0, &threadId );
\r
3314 else if ( cbReturnValue == 1 ) {
\r
3315 handle->drainCounter = 1;
\r
3316 handle->internalDrain = true;
\r
3320 unsigned int nChannels, bufferBytes, i, j;
\r
3321 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3322 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3324 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3326 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3328 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3329 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3330 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3334 else if ( stream_.doConvertBuffer[0] ) {
\r
3336 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3337 if ( stream_.doByteSwap[0] )
\r
3338 byteSwapBuffer( stream_.deviceBuffer,
\r
3339 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3340 stream_.deviceFormat[0] );
\r
3342 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3343 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3344 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3345 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3351 if ( stream_.doByteSwap[0] )
\r
3352 byteSwapBuffer( stream_.userBuffer[0],
\r
3353 stream_.bufferSize * stream_.nUserChannels[0],
\r
3354 stream_.userFormat );
\r
3356 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3357 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3358 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3359 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3364 if ( handle->drainCounter ) {
\r
3365 handle->drainCounter++;
\r
3370 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3372 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3374 if (stream_.doConvertBuffer[1]) {
\r
3376 // Always interleave ASIO input data.
\r
3377 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3378 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3379 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3380 handle->bufferInfos[i].buffers[bufferIndex],
\r
3384 if ( stream_.doByteSwap[1] )
\r
3385 byteSwapBuffer( stream_.deviceBuffer,
\r
3386 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3387 stream_.deviceFormat[1] );
\r
3388 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3392 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3393 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3394 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3395 handle->bufferInfos[i].buffers[bufferIndex],
\r
3400 if ( stream_.doByteSwap[1] )
\r
3401 byteSwapBuffer( stream_.userBuffer[1],
\r
3402 stream_.bufferSize * stream_.nUserChannels[1],
\r
3403 stream_.userFormat );
\r
3408 // The following call was suggested by Malte Clasen. While the API
\r
3409 // documentation indicates it should not be required, some device
\r
3410 // drivers apparently do not function correctly without it.
\r
3411 ASIOOutputReady();
\r
3413 RtApi::tickStreamTime();
\r
3417 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3419 // The ASIO documentation says that this usually only happens during
\r
3420 // external sync. Audio processing is not stopped by the driver,
\r
3421 // actual sample rate might not have even changed, maybe only the
\r
3422 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3425 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3427 object->stopStream();
\r
3429 catch ( RtAudioError &exception ) {
\r
3430 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3434 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3437 static long asioMessages( long selector, long value, void* message, double* opt )
\r
3441 switch( selector ) {
\r
3442 case kAsioSelectorSupported:
\r
3443 if ( value == kAsioResetRequest
\r
3444 || value == kAsioEngineVersion
\r
3445 || value == kAsioResyncRequest
\r
3446 || value == kAsioLatenciesChanged
\r
3447 // The following three were added for ASIO 2.0, you don't
\r
3448 // necessarily have to support them.
\r
3449 || value == kAsioSupportsTimeInfo
\r
3450 || value == kAsioSupportsTimeCode
\r
3451 || value == kAsioSupportsInputMonitor)
\r
3454 case kAsioResetRequest:
\r
3455 // Defer the task and perform the reset of the driver during the
\r
3456 // next "safe" situation. You cannot reset the driver right now,
\r
3457 // as this code is called from the driver. Reset the driver is
\r
3458 // done by completely destruct is. I.e. ASIOStop(),
\r
3459 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3461 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3464 case kAsioResyncRequest:
\r
3465 // This informs the application that the driver encountered some
\r
3466 // non-fatal data loss. It is used for synchronization purposes
\r
3467 // of different media. Added mainly to work around the Win16Mutex
\r
3468 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3469 // which could lose data because the Mutex was held too long by
\r
3470 // another thread. However a driver can issue it in other
\r
3471 // situations, too.
\r
3472 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3476 case kAsioLatenciesChanged:
\r
3477 // This will inform the host application that the drivers were
\r
3478 // latencies changed. Beware, it this does not mean that the
\r
3479 // buffer sizes have changed! You might need to update internal
\r
3481 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3484 case kAsioEngineVersion:
\r
3485 // Return the supported ASIO version of the host application. If
\r
3486 // a host application does not implement this selector, ASIO 1.0
\r
3487 // is assumed by the driver.
\r
3490 case kAsioSupportsTimeInfo:
\r
3491 // Informs the driver whether the
\r
3492 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3493 // For compatibility with ASIO 1.0 drivers the host application
\r
3494 // should always support the "old" bufferSwitch method, too.
\r
3497 case kAsioSupportsTimeCode:
\r
3498 // Informs the driver whether application is interested in time
\r
3499 // code info. If an application does not need to know about time
\r
3500 // code, the driver has less work to do.
\r
3507 static const char* getAsioErrorString( ASIOError result )
\r
3512 const char*message;
\r
3515 static const Messages m[] =
\r
3517 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3518 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3519 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3520 { ASE_InvalidMode, "Invalid mode." },
\r
3521 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3522 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3523 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3526 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3527 if ( m[i].value == result ) return m[i].message;
\r
3529 return "Unknown error.";
\r
3531 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3535 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3537 // Modified by Robin Davies, October 2005
\r
3538 // - Improvements to DirectX pointer chasing.
\r
3539 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3540 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3541 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3542 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3544 #include <dsound.h>
\r
3545 #include <assert.h>
\r
3546 #include <algorithm>
\r
3548 #if defined(__MINGW32__)
\r
3549 // missing from latest mingw winapi
\r
3550 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3551 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3552 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3553 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3556 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3558 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3559 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3562 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3564 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3565 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3566 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3567 return pointer >= earlierPointer && pointer < laterPointer;
\r
3570 // A structure to hold various information related to the DirectSound
\r
3571 // API implementation.
\r
3573 unsigned int drainCounter; // Tracks callback counts when draining
\r
3574 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3578 UINT bufferPointer[2];
\r
3579 DWORD dsBufferSize[2];
\r
3580 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3584 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3587 // Declarations for utility functions, callbacks, and structures
\r
3588 // specific to the DirectSound implementation.
\r
3589 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3590 LPCTSTR description,
\r
3592 LPVOID lpContext );
\r
3594 static const char* getErrorString( int code );
\r
3596 static unsigned __stdcall callbackHandler( void *ptr );
\r
3605 : found(false) { validId[0] = false; validId[1] = false; }
\r
3608 struct DsProbeData {
\r
3610 std::vector<struct DsDevice>* dsDevices;
\r
3613 RtApiDs :: RtApiDs()
\r
3615 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3616 // accept whatever the mainline chose for a threading model.
\r
3617 coInitialized_ = false;
\r
3618 HRESULT hr = CoInitialize( NULL );
\r
3619 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3622 RtApiDs :: ~RtApiDs()
\r
3624 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3625 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3628 // The DirectSound default output is always the first device.
\r
3629 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3634 // The DirectSound default input is always the first input device,
\r
3635 // which is the first capture device enumerated.
\r
3636 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3641 unsigned int RtApiDs :: getDeviceCount( void )
\r
3643 // Set query flag for previously found devices to false, so that we
\r
3644 // can check for any devices that have disappeared.
\r
3645 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3646 dsDevices[i].found = false;
\r
3648 // Query DirectSound devices.
\r
3649 struct DsProbeData probeInfo;
\r
3650 probeInfo.isInput = false;
\r
3651 probeInfo.dsDevices = &dsDevices;
\r
3652 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3653 if ( FAILED( result ) ) {
\r
3654 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3655 errorText_ = errorStream_.str();
\r
3656 error( RtAudioError::WARNING );
\r
3659 // Query DirectSoundCapture devices.
\r
3660 probeInfo.isInput = true;
\r
3661 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3662 if ( FAILED( result ) ) {
\r
3663 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3664 errorText_ = errorStream_.str();
\r
3665 error( RtAudioError::WARNING );
\r
3668 // Clean out any devices that may have disappeared.
\r
3669 std::vector< int > indices;
\r
3670 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3671 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3672 unsigned int nErased = 0;
\r
3673 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3674 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3676 return dsDevices.size();
\r
3679 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3681 RtAudio::DeviceInfo info;
\r
3682 info.probed = false;
\r
3684 if ( dsDevices.size() == 0 ) {
\r
3685 // Force a query of all devices
\r
3687 if ( dsDevices.size() == 0 ) {
\r
3688 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3689 error( RtAudioError::INVALID_USE );
\r
3694 if ( device >= dsDevices.size() ) {
\r
3695 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3696 error( RtAudioError::INVALID_USE );
\r
3701 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3703 LPDIRECTSOUND output;
\r
3705 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3706 if ( FAILED( result ) ) {
\r
3707 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3708 errorText_ = errorStream_.str();
\r
3709 error( RtAudioError::WARNING );
\r
3713 outCaps.dwSize = sizeof( outCaps );
\r
3714 result = output->GetCaps( &outCaps );
\r
3715 if ( FAILED( result ) ) {
\r
3716 output->Release();
\r
3717 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3718 errorText_ = errorStream_.str();
\r
3719 error( RtAudioError::WARNING );
\r
3723 // Get output channel information.
\r
3724 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3726 // Get sample rate information.
\r
3727 info.sampleRates.clear();
\r
3728 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3729 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3730 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3731 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3734 // Get format information.
\r
3735 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3736 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3738 output->Release();
\r
3740 if ( getDefaultOutputDevice() == device )
\r
3741 info.isDefaultOutput = true;
\r
3743 if ( dsDevices[ device ].validId[1] == false ) {
\r
3744 info.name = dsDevices[ device ].name;
\r
3745 info.probed = true;
\r
3751 LPDIRECTSOUNDCAPTURE input;
\r
3752 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3753 if ( FAILED( result ) ) {
\r
3754 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3755 errorText_ = errorStream_.str();
\r
3756 error( RtAudioError::WARNING );
\r
3761 inCaps.dwSize = sizeof( inCaps );
\r
3762 result = input->GetCaps( &inCaps );
\r
3763 if ( FAILED( result ) ) {
\r
3765 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3766 errorText_ = errorStream_.str();
\r
3767 error( RtAudioError::WARNING );
\r
3771 // Get input channel information.
\r
3772 info.inputChannels = inCaps.dwChannels;
\r
3774 // Get sample rate and format information.
\r
3775 std::vector<unsigned int> rates;
\r
3776 if ( inCaps.dwChannels >= 2 ) {
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3781 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3786 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3787 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3792 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3793 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3794 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3795 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3796 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3799 else if ( inCaps.dwChannels == 1 ) {
\r
3800 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3801 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3802 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3803 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3804 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3805 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3806 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3807 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3809 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3813 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3815 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3817 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3818 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3819 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3822 else info.inputChannels = 0; // technically, this would be an error
\r
3826 if ( info.inputChannels == 0 ) return info;
\r
3828 // Copy the supported rates to the info structure but avoid duplication.
\r
3830 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3832 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3833 if ( rates[i] == info.sampleRates[j] ) {
\r
3838 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3840 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3842 // If device opens for both playback and capture, we determine the channels.
\r
3843 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3844 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3846 if ( device == 0 ) info.isDefaultInput = true;
\r
3848 // Copy name and return.
\r
3849 info.name = dsDevices[ device ].name;
\r
3850 info.probed = true;
\r
3854 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3855 unsigned int firstChannel, unsigned int sampleRate,
\r
3856 RtAudioFormat format, unsigned int *bufferSize,
\r
3857 RtAudio::StreamOptions *options )
\r
3859 if ( channels + firstChannel > 2 ) {
\r
3860 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3864 unsigned int nDevices = dsDevices.size();
\r
3865 if ( nDevices == 0 ) {
\r
3866 // This should not happen because a check is made before this function is called.
\r
3867 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3871 if ( device >= nDevices ) {
\r
3872 // This should not happen because a check is made before this function is called.
\r
3873 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3877 if ( mode == OUTPUT ) {
\r
3878 if ( dsDevices[ device ].validId[0] == false ) {
\r
3879 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3880 errorText_ = errorStream_.str();
\r
3884 else { // mode == INPUT
\r
3885 if ( dsDevices[ device ].validId[1] == false ) {
\r
3886 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3887 errorText_ = errorStream_.str();
\r
3892 // According to a note in PortAudio, using GetDesktopWindow()
\r
3893 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3894 // that occur when the application's window is not the foreground
\r
3895 // window. Also, if the application window closes before the
\r
3896 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3897 // problems when using GetDesktopWindow() but it seems fine now
\r
3898 // (January 2010). I'll leave it commented here.
\r
3899 // HWND hWnd = GetForegroundWindow();
\r
3900 HWND hWnd = GetDesktopWindow();
\r
3902 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3903 // two. This is a judgement call and a value of two is probably too
\r
3904 // low for capture, but it should work for playback.
\r
3906 if ( options ) nBuffers = options->numberOfBuffers;
\r
3907 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3908 if ( nBuffers < 2 ) nBuffers = 3;
\r
3910 // Check the lower range of the user-specified buffer size and set
\r
3911 // (arbitrarily) to a lower bound of 32.
\r
3912 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3914 // Create the wave format structure. The data format setting will
\r
3915 // be determined later.
\r
3916 WAVEFORMATEX waveFormat;
\r
3917 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3918 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3919 waveFormat.nChannels = channels + firstChannel;
\r
3920 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3922 // Determine the device buffer size. By default, we'll use the value
\r
3923 // defined above (32K), but we will grow it to make allowances for
\r
3924 // very large software buffer sizes.
\r
3925 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3926 DWORD dsPointerLeadTime = 0;
\r
3928 void *ohandle = 0, *bhandle = 0;
\r
3930 if ( mode == OUTPUT ) {
\r
3932 LPDIRECTSOUND output;
\r
3933 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3934 if ( FAILED( result ) ) {
\r
3935 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3936 errorText_ = errorStream_.str();
\r
3941 outCaps.dwSize = sizeof( outCaps );
\r
3942 result = output->GetCaps( &outCaps );
\r
3943 if ( FAILED( result ) ) {
\r
3944 output->Release();
\r
3945 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3946 errorText_ = errorStream_.str();
\r
3950 // Check channel information.
\r
3951 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3952 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3953 errorText_ = errorStream_.str();
\r
3957 // Check format information. Use 16-bit format unless not
\r
3958 // supported or user requests 8-bit.
\r
3959 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3960 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3961 waveFormat.wBitsPerSample = 16;
\r
3962 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3965 waveFormat.wBitsPerSample = 8;
\r
3966 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3968 stream_.userFormat = format;
\r
3970 // Update wave format structure and buffer information.
\r
3971 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3972 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3973 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3975 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3976 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3977 dsBufferSize *= 2;
\r
3979 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3980 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3981 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3982 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3983 if ( FAILED( result ) ) {
\r
3984 output->Release();
\r
3985 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3986 errorText_ = errorStream_.str();
\r
3990 // Even though we will write to the secondary buffer, we need to
\r
3991 // access the primary buffer to set the correct output format
\r
3992 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3993 // buffer description.
\r
3994 DSBUFFERDESC bufferDescription;
\r
3995 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3996 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3997 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3999 // Obtain the primary buffer
\r
4000 LPDIRECTSOUNDBUFFER buffer;
\r
4001 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4002 if ( FAILED( result ) ) {
\r
4003 output->Release();
\r
4004 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4005 errorText_ = errorStream_.str();
\r
4009 // Set the primary DS buffer sound format.
\r
4010 result = buffer->SetFormat( &waveFormat );
\r
4011 if ( FAILED( result ) ) {
\r
4012 output->Release();
\r
4013 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4014 errorText_ = errorStream_.str();
\r
4018 // Setup the secondary DS buffer description.
\r
4019 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4020 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4021 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4022 DSBCAPS_GLOBALFOCUS |
\r
4023 DSBCAPS_GETCURRENTPOSITION2 |
\r
4024 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4025 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4026 bufferDescription.lpwfxFormat = &waveFormat;
\r
4028 // Try to create the secondary DS buffer. If that doesn't work,
\r
4029 // try to use software mixing. Otherwise, there's a problem.
\r
4030 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4031 if ( FAILED( result ) ) {
\r
4032 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4033 DSBCAPS_GLOBALFOCUS |
\r
4034 DSBCAPS_GETCURRENTPOSITION2 |
\r
4035 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4036 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4037 if ( FAILED( result ) ) {
\r
4038 output->Release();
\r
4039 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4040 errorText_ = errorStream_.str();
\r
4045 // Get the buffer size ... might be different from what we specified.
\r
4047 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4048 result = buffer->GetCaps( &dsbcaps );
\r
4049 if ( FAILED( result ) ) {
\r
4050 output->Release();
\r
4051 buffer->Release();
\r
4052 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4053 errorText_ = errorStream_.str();
\r
4057 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4059 // Lock the DS buffer
\r
4062 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4063 if ( FAILED( result ) ) {
\r
4064 output->Release();
\r
4065 buffer->Release();
\r
4066 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4067 errorText_ = errorStream_.str();
\r
4071 // Zero the DS buffer
\r
4072 ZeroMemory( audioPtr, dataLen );
\r
4074 // Unlock the DS buffer
\r
4075 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4076 if ( FAILED( result ) ) {
\r
4077 output->Release();
\r
4078 buffer->Release();
\r
4079 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4080 errorText_ = errorStream_.str();
\r
4084 ohandle = (void *) output;
\r
4085 bhandle = (void *) buffer;
\r
4088 if ( mode == INPUT ) {
\r
4090 LPDIRECTSOUNDCAPTURE input;
\r
4091 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4092 if ( FAILED( result ) ) {
\r
4093 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4094 errorText_ = errorStream_.str();
\r
4099 inCaps.dwSize = sizeof( inCaps );
\r
4100 result = input->GetCaps( &inCaps );
\r
4101 if ( FAILED( result ) ) {
\r
4103 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4104 errorText_ = errorStream_.str();
\r
4108 // Check channel information.
\r
4109 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4110 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4114 // Check format information. Use 16-bit format unless user
\r
4115 // requests 8-bit.
\r
4116 DWORD deviceFormats;
\r
4117 if ( channels + firstChannel == 2 ) {
\r
4118 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4119 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4120 waveFormat.wBitsPerSample = 8;
\r
4121 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4123 else { // assume 16-bit is supported
\r
4124 waveFormat.wBitsPerSample = 16;
\r
4125 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4128 else { // channel == 1
\r
4129 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4130 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4131 waveFormat.wBitsPerSample = 8;
\r
4132 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4134 else { // assume 16-bit is supported
\r
4135 waveFormat.wBitsPerSample = 16;
\r
4136 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4139 stream_.userFormat = format;
\r
4141 // Update wave format structure and buffer information.
\r
4142 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4143 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4144 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4146 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4147 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4148 dsBufferSize *= 2;
\r
4150 // Setup the secondary DS buffer description.
\r
4151 DSCBUFFERDESC bufferDescription;
\r
4152 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4153 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4154 bufferDescription.dwFlags = 0;
\r
4155 bufferDescription.dwReserved = 0;
\r
4156 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4157 bufferDescription.lpwfxFormat = &waveFormat;
\r
4159 // Create the capture buffer.
\r
4160 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4161 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4162 if ( FAILED( result ) ) {
\r
4164 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4165 errorText_ = errorStream_.str();
\r
4169 // Get the buffer size ... might be different from what we specified.
\r
4170 DSCBCAPS dscbcaps;
\r
4171 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4172 result = buffer->GetCaps( &dscbcaps );
\r
4173 if ( FAILED( result ) ) {
\r
4175 buffer->Release();
\r
4176 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4177 errorText_ = errorStream_.str();
\r
4181 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4183 // NOTE: We could have a problem here if this is a duplex stream
\r
4184 // and the play and capture hardware buffer sizes are different
\r
4185 // (I'm actually not sure if that is a problem or not).
\r
4186 // Currently, we are not verifying that.
\r
4188 // Lock the capture buffer
\r
4191 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4192 if ( FAILED( result ) ) {
\r
4194 buffer->Release();
\r
4195 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4196 errorText_ = errorStream_.str();
\r
4200 // Zero the buffer
\r
4201 ZeroMemory( audioPtr, dataLen );
\r
4203 // Unlock the buffer
\r
4204 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4205 if ( FAILED( result ) ) {
\r
4207 buffer->Release();
\r
4208 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4209 errorText_ = errorStream_.str();
\r
4213 ohandle = (void *) input;
\r
4214 bhandle = (void *) buffer;
\r
4217 // Set various stream parameters
\r
4218 DsHandle *handle = 0;
\r
4219 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4220 stream_.nUserChannels[mode] = channels;
\r
4221 stream_.bufferSize = *bufferSize;
\r
4222 stream_.channelOffset[mode] = firstChannel;
\r
4223 stream_.deviceInterleaved[mode] = true;
\r
4224 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4225 else stream_.userInterleaved = true;
\r
4227 // Set flag for buffer conversion
\r
4228 stream_.doConvertBuffer[mode] = false;
\r
4229 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4230 stream_.doConvertBuffer[mode] = true;
\r
4231 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4232 stream_.doConvertBuffer[mode] = true;
\r
4233 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4234 stream_.nUserChannels[mode] > 1 )
\r
4235 stream_.doConvertBuffer[mode] = true;
\r
4237 // Allocate necessary internal buffers
\r
4238 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4239 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4240 if ( stream_.userBuffer[mode] == NULL ) {
\r
4241 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4245 if ( stream_.doConvertBuffer[mode] ) {
\r
4247 bool makeBuffer = true;
\r
4248 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4249 if ( mode == INPUT ) {
\r
4250 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4251 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4252 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4256 if ( makeBuffer ) {
\r
4257 bufferBytes *= *bufferSize;
\r
4258 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4259 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4260 if ( stream_.deviceBuffer == NULL ) {
\r
4261 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4267 // Allocate our DsHandle structures for the stream.
\r
4268 if ( stream_.apiHandle == 0 ) {
\r
4270 handle = new DsHandle;
\r
4272 catch ( std::bad_alloc& ) {
\r
4273 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4277 // Create a manual-reset event.
\r
4278 handle->condition = CreateEvent( NULL, // no security
\r
4279 TRUE, // manual-reset
\r
4280 FALSE, // non-signaled initially
\r
4281 NULL ); // unnamed
\r
4282 stream_.apiHandle = (void *) handle;
\r
4285 handle = (DsHandle *) stream_.apiHandle;
\r
4286 handle->id[mode] = ohandle;
\r
4287 handle->buffer[mode] = bhandle;
\r
4288 handle->dsBufferSize[mode] = dsBufferSize;
\r
4289 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4291 stream_.device[mode] = device;
\r
4292 stream_.state = STREAM_STOPPED;
\r
4293 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4294 // We had already set up an output stream.
\r
4295 stream_.mode = DUPLEX;
\r
4297 stream_.mode = mode;
\r
4298 stream_.nBuffers = nBuffers;
\r
4299 stream_.sampleRate = sampleRate;
\r
4301 // Setup the buffer conversion information structure.
\r
4302 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4304 // Setup the callback thread.
\r
4305 if ( stream_.callbackInfo.isRunning == false ) {
\r
4306 unsigned threadId;
\r
4307 stream_.callbackInfo.isRunning = true;
\r
4308 stream_.callbackInfo.object = (void *) this;
\r
4309 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4310 &stream_.callbackInfo, 0, &threadId );
\r
4311 if ( stream_.callbackInfo.thread == 0 ) {
\r
4312 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4316 // Boost DS thread priority
\r
4317 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4323 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4324 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4325 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4326 if ( buffer ) buffer->Release();
\r
4327 object->Release();
\r
4329 if ( handle->buffer[1] ) {
\r
4330 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4331 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4332 if ( buffer ) buffer->Release();
\r
4333 object->Release();
\r
4335 CloseHandle( handle->condition );
\r
4337 stream_.apiHandle = 0;
\r
4340 for ( int i=0; i<2; i++ ) {
\r
4341 if ( stream_.userBuffer[i] ) {
\r
4342 free( stream_.userBuffer[i] );
\r
4343 stream_.userBuffer[i] = 0;
\r
4347 if ( stream_.deviceBuffer ) {
\r
4348 free( stream_.deviceBuffer );
\r
4349 stream_.deviceBuffer = 0;
\r
4352 stream_.state = STREAM_CLOSED;
\r
4356 void RtApiDs :: closeStream()
\r
4358 if ( stream_.state == STREAM_CLOSED ) {
\r
4359 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4360 error( RtAudioError::WARNING );
\r
4364 // Stop the callback thread.
\r
4365 stream_.callbackInfo.isRunning = false;
\r
4366 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4367 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4369 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4371 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4372 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4373 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4376 buffer->Release();
\r
4378 object->Release();
\r
4380 if ( handle->buffer[1] ) {
\r
4381 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4382 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4385 buffer->Release();
\r
4387 object->Release();
\r
4389 CloseHandle( handle->condition );
\r
4391 stream_.apiHandle = 0;
\r
4394 for ( int i=0; i<2; i++ ) {
\r
4395 if ( stream_.userBuffer[i] ) {
\r
4396 free( stream_.userBuffer[i] );
\r
4397 stream_.userBuffer[i] = 0;
\r
4401 if ( stream_.deviceBuffer ) {
\r
4402 free( stream_.deviceBuffer );
\r
4403 stream_.deviceBuffer = 0;
\r
4406 stream_.mode = UNINITIALIZED;
\r
4407 stream_.state = STREAM_CLOSED;
\r
4410 void RtApiDs :: startStream()
\r
4413 if ( stream_.state == STREAM_RUNNING ) {
\r
4414 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4415 error( RtAudioError::WARNING );
\r
4419 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4421 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4422 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4423 // this is already in effect.
\r
4424 timeBeginPeriod( 1 );
\r
4426 buffersRolling = false;
\r
4427 duplexPrerollBytes = 0;
\r
4429 if ( stream_.mode == DUPLEX ) {
\r
4430 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4431 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4434 HRESULT result = 0;
\r
4435 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4437 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4438 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4439 if ( FAILED( result ) ) {
\r
4440 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4441 errorText_ = errorStream_.str();
\r
4446 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4448 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4449 result = buffer->Start( DSCBSTART_LOOPING );
\r
4450 if ( FAILED( result ) ) {
\r
4451 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4452 errorText_ = errorStream_.str();
\r
4457 handle->drainCounter = 0;
\r
4458 handle->internalDrain = false;
\r
4459 ResetEvent( handle->condition );
\r
4460 stream_.state = STREAM_RUNNING;
\r
4463 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4466 void RtApiDs :: stopStream()
\r
4469 if ( stream_.state == STREAM_STOPPED ) {
\r
4470 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4471 error( RtAudioError::WARNING );
\r
4475 HRESULT result = 0;
\r
4478 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4479 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4480 if ( handle->drainCounter == 0 ) {
\r
4481 handle->drainCounter = 2;
\r
4482 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4485 stream_.state = STREAM_STOPPED;
\r
4487 // Stop the buffer and clear memory
\r
4488 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4489 result = buffer->Stop();
\r
4490 if ( FAILED( result ) ) {
\r
4491 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4492 errorText_ = errorStream_.str();
\r
4496 // Lock the buffer and clear it so that if we start to play again,
\r
4497 // we won't have old data playing.
\r
4498 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4499 if ( FAILED( result ) ) {
\r
4500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4501 errorText_ = errorStream_.str();
\r
4505 // Zero the DS buffer
\r
4506 ZeroMemory( audioPtr, dataLen );
\r
4508 // Unlock the DS buffer
\r
4509 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4510 if ( FAILED( result ) ) {
\r
4511 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4512 errorText_ = errorStream_.str();
\r
4516 // If we start playing again, we must begin at beginning of buffer.
\r
4517 handle->bufferPointer[0] = 0;
\r
4520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4521 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4525 stream_.state = STREAM_STOPPED;
\r
4527 result = buffer->Stop();
\r
4528 if ( FAILED( result ) ) {
\r
4529 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4530 errorText_ = errorStream_.str();
\r
4534 // Lock the buffer and clear it so that if we start to play again,
\r
4535 // we won't have old data playing.
\r
4536 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4537 if ( FAILED( result ) ) {
\r
4538 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4539 errorText_ = errorStream_.str();
\r
4543 // Zero the DS buffer
\r
4544 ZeroMemory( audioPtr, dataLen );
\r
4546 // Unlock the DS buffer
\r
4547 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4548 if ( FAILED( result ) ) {
\r
4549 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4550 errorText_ = errorStream_.str();
\r
4554 // If we start recording again, we must begin at beginning of buffer.
\r
4555 handle->bufferPointer[1] = 0;
\r
4559 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4560 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4563 void RtApiDs :: abortStream()
\r
4566 if ( stream_.state == STREAM_STOPPED ) {
\r
4567 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4568 error( RtAudioError::WARNING );
\r
4572 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4573 handle->drainCounter = 2;
\r
4578 void RtApiDs :: callbackEvent()
\r
4580 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4581 Sleep( 50 ); // sleep 50 milliseconds
\r
4585 if ( stream_.state == STREAM_CLOSED ) {
\r
4586 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4587 error( RtAudioError::WARNING );
\r
4591 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4592 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4594 // Check if we were draining the stream and signal is finished.
\r
4595 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4597 stream_.state = STREAM_STOPPING;
\r
4598 if ( handle->internalDrain == false )
\r
4599 SetEvent( handle->condition );
\r
4605 // Invoke user callback to get fresh output data UNLESS we are
\r
4606 // draining stream.
\r
4607 if ( handle->drainCounter == 0 ) {
\r
4608 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4609 double streamTime = getStreamTime();
\r
4610 RtAudioStreamStatus status = 0;
\r
4611 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4612 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4613 handle->xrun[0] = false;
\r
4615 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4616 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4617 handle->xrun[1] = false;
\r
4619 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4620 stream_.bufferSize, streamTime, status, info->userData );
\r
4621 if ( cbReturnValue == 2 ) {
\r
4622 stream_.state = STREAM_STOPPING;
\r
4623 handle->drainCounter = 2;
\r
4627 else if ( cbReturnValue == 1 ) {
\r
4628 handle->drainCounter = 1;
\r
4629 handle->internalDrain = true;
\r
4634 DWORD currentWritePointer, safeWritePointer;
\r
4635 DWORD currentReadPointer, safeReadPointer;
\r
4636 UINT nextWritePointer;
\r
4638 LPVOID buffer1 = NULL;
\r
4639 LPVOID buffer2 = NULL;
\r
4640 DWORD bufferSize1 = 0;
\r
4641 DWORD bufferSize2 = 0;
\r
4646 if ( buffersRolling == false ) {
\r
4647 if ( stream_.mode == DUPLEX ) {
\r
4648 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4650 // It takes a while for the devices to get rolling. As a result,
\r
4651 // there's no guarantee that the capture and write device pointers
\r
4652 // will move in lockstep. Wait here for both devices to start
\r
4653 // rolling, and then set our buffer pointers accordingly.
\r
4654 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4655 // bytes later than the write buffer.
\r
4657 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4658 // take place between the two GetCurrentPosition calls... but I'm
\r
4659 // really not sure how to solve the problem. Temporarily boost to
\r
4660 // Realtime priority, maybe; but I'm not sure what priority the
\r
4661 // DirectSound service threads run at. We *should* be roughly
\r
4662 // within a ms or so of correct.
\r
4664 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4665 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4667 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4669 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4670 if ( FAILED( result ) ) {
\r
4671 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4672 errorText_ = errorStream_.str();
\r
4673 error( RtAudioError::SYSTEM_ERROR );
\r
4676 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4677 if ( FAILED( result ) ) {
\r
4678 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4679 errorText_ = errorStream_.str();
\r
4680 error( RtAudioError::SYSTEM_ERROR );
\r
4684 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4685 if ( FAILED( result ) ) {
\r
4686 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4687 errorText_ = errorStream_.str();
\r
4688 error( RtAudioError::SYSTEM_ERROR );
\r
4691 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4692 if ( FAILED( result ) ) {
\r
4693 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4694 errorText_ = errorStream_.str();
\r
4695 error( RtAudioError::SYSTEM_ERROR );
\r
4698 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4702 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4704 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4705 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4706 handle->bufferPointer[1] = safeReadPointer;
\r
4708 else if ( stream_.mode == OUTPUT ) {
\r
4710 // Set the proper nextWritePosition after initial startup.
\r
4711 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4712 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4713 if ( FAILED( result ) ) {
\r
4714 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4715 errorText_ = errorStream_.str();
\r
4716 error( RtAudioError::SYSTEM_ERROR );
\r
4719 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4720 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4723 buffersRolling = true;
\r
4726 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4728 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4730 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4731 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4732 bufferBytes *= formatBytes( stream_.userFormat );
\r
4733 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4736 // Setup parameters and do buffer conversion if necessary.
\r
4737 if ( stream_.doConvertBuffer[0] ) {
\r
4738 buffer = stream_.deviceBuffer;
\r
4739 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4740 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4741 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4744 buffer = stream_.userBuffer[0];
\r
4745 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4746 bufferBytes *= formatBytes( stream_.userFormat );
\r
4749 // No byte swapping necessary in DirectSound implementation.
\r
4751 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4752 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4754 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4755 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4757 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4758 nextWritePointer = handle->bufferPointer[0];
\r
4760 DWORD endWrite, leadPointer;
\r
4762 // Find out where the read and "safe write" pointers are.
\r
4763 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4764 if ( FAILED( result ) ) {
\r
4765 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4766 errorText_ = errorStream_.str();
\r
4767 error( RtAudioError::SYSTEM_ERROR );
\r
4771 // We will copy our output buffer into the region between
\r
4772 // safeWritePointer and leadPointer. If leadPointer is not
\r
4773 // beyond the next endWrite position, wait until it is.
\r
4774 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4775 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4776 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4777 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4778 endWrite = nextWritePointer + bufferBytes;
\r
4780 // Check whether the entire write region is behind the play pointer.
\r
4781 if ( leadPointer >= endWrite ) break;
\r
4783 // If we are here, then we must wait until the leadPointer advances
\r
4784 // beyond the end of our next write region. We use the
\r
4785 // Sleep() function to suspend operation until that happens.
\r
4786 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4787 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4788 if ( millis < 1.0 ) millis = 1.0;
\r
4789 Sleep( (DWORD) millis );
\r
4792 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4793 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4794 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4795 handle->xrun[0] = true;
\r
4796 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4797 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4798 handle->bufferPointer[0] = nextWritePointer;
\r
4799 endWrite = nextWritePointer + bufferBytes;
\r
4802 // Lock free space in the buffer
\r
4803 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4804 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4805 if ( FAILED( result ) ) {
\r
4806 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4807 errorText_ = errorStream_.str();
\r
4808 error( RtAudioError::SYSTEM_ERROR );
\r
4812 // Copy our buffer into the DS buffer
\r
4813 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4814 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4816 // Update our buffer offset and unlock sound buffer
\r
4817 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4818 if ( FAILED( result ) ) {
\r
4819 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4820 errorText_ = errorStream_.str();
\r
4821 error( RtAudioError::SYSTEM_ERROR );
\r
4824 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4825 handle->bufferPointer[0] = nextWritePointer;
\r
4827 if ( handle->drainCounter ) {
\r
4828 handle->drainCounter++;
\r
4833 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4835 // Setup parameters.
\r
4836 if ( stream_.doConvertBuffer[1] ) {
\r
4837 buffer = stream_.deviceBuffer;
\r
4838 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4839 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4842 buffer = stream_.userBuffer[1];
\r
4843 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4844 bufferBytes *= formatBytes( stream_.userFormat );
\r
4847 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4848 long nextReadPointer = handle->bufferPointer[1];
\r
4849 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4851 // Find out where the write and "safe read" pointers are.
\r
4852 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4853 if ( FAILED( result ) ) {
\r
4854 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4855 errorText_ = errorStream_.str();
\r
4856 error( RtAudioError::SYSTEM_ERROR );
\r
4860 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4861 DWORD endRead = nextReadPointer + bufferBytes;
\r
4863 // Handling depends on whether we are INPUT or DUPLEX.
\r
4864 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4865 // then a wait here will drag the write pointers into the forbidden zone.
\r
4867 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4868 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4869 // practical way to sync up the read and write pointers reliably, given the
\r
4870 // the very complex relationship between phase and increment of the read and write
\r
4873 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4874 // provide a pre-roll period of 0.5 seconds in which we return
\r
4875 // zeros from the read buffer while the pointers sync up.
\r
4877 if ( stream_.mode == DUPLEX ) {
\r
4878 if ( safeReadPointer < endRead ) {
\r
4879 if ( duplexPrerollBytes <= 0 ) {
\r
4880 // Pre-roll time over. Be more agressive.
\r
4881 int adjustment = endRead-safeReadPointer;
\r
4883 handle->xrun[1] = true;
\r
4885 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4886 // and perform fine adjustments later.
\r
4887 // - small adjustments: back off by twice as much.
\r
4888 if ( adjustment >= 2*bufferBytes )
\r
4889 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4891 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4893 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4897 // In pre=roll time. Just do it.
\r
4898 nextReadPointer = safeReadPointer - bufferBytes;
\r
4899 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4901 endRead = nextReadPointer + bufferBytes;
\r
4904 else { // mode == INPUT
\r
4905 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4906 // See comments for playback.
\r
4907 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4908 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4909 if ( millis < 1.0 ) millis = 1.0;
\r
4910 Sleep( (DWORD) millis );
\r
4912 // Wake up and find out where we are now.
\r
4913 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4914 if ( FAILED( result ) ) {
\r
4915 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4916 errorText_ = errorStream_.str();
\r
4917 error( RtAudioError::SYSTEM_ERROR );
\r
4921 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4925 // Lock free space in the buffer
\r
4926 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4927 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4928 if ( FAILED( result ) ) {
\r
4929 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4930 errorText_ = errorStream_.str();
\r
4931 error( RtAudioError::SYSTEM_ERROR );
\r
4935 if ( duplexPrerollBytes <= 0 ) {
\r
4936 // Copy our buffer into the DS buffer
\r
4937 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4938 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4941 memset( buffer, 0, bufferSize1 );
\r
4942 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4943 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4946 // Update our buffer offset and unlock sound buffer
\r
4947 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4948 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4949 if ( FAILED( result ) ) {
\r
4950 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4951 errorText_ = errorStream_.str();
\r
4952 error( RtAudioError::SYSTEM_ERROR );
\r
4955 handle->bufferPointer[1] = nextReadPointer;
\r
4957 // No byte swapping necessary in DirectSound implementation.
\r
4959 // If necessary, convert 8-bit data from unsigned to signed.
\r
4960 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4961 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4963 // Do buffer conversion if necessary.
\r
4964 if ( stream_.doConvertBuffer[1] )
\r
4965 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4969 RtApi::tickStreamTime();
\r
4972 // Definitions for utility functions and callbacks
\r
4973 // specific to the DirectSound implementation.
\r
4975 static unsigned __stdcall callbackHandler( void *ptr )
\r
4977 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4978 RtApiDs *object = (RtApiDs *) info->object;
\r
4979 bool* isRunning = &info->isRunning;
\r
4981 while ( *isRunning == true ) {
\r
4982 object->callbackEvent();
\r
4985 _endthreadex( 0 );
\r
4989 #include "tchar.h"
\r
4991 static std::string convertTChar( LPCTSTR name )
\r
4993 #if defined( UNICODE ) || defined( _UNICODE )
\r
4994 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4995 std::string s( length-1, '\0' );
\r
4996 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
4998 std::string s( name );
\r
5004 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5005 LPCTSTR description,
\r
5007 LPVOID lpContext )
\r
5009 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
5010 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
5013 bool validDevice = false;
\r
5014 if ( probeInfo.isInput == true ) {
\r
5016 LPDIRECTSOUNDCAPTURE object;
\r
5018 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5019 if ( hr != DS_OK ) return TRUE;
\r
5021 caps.dwSize = sizeof(caps);
\r
5022 hr = object->GetCaps( &caps );
\r
5023 if ( hr == DS_OK ) {
\r
5024 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5025 validDevice = true;
\r
5027 object->Release();
\r
5031 LPDIRECTSOUND object;
\r
5032 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5033 if ( hr != DS_OK ) return TRUE;
\r
5035 caps.dwSize = sizeof(caps);
\r
5036 hr = object->GetCaps( &caps );
\r
5037 if ( hr == DS_OK ) {
\r
5038 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5039 validDevice = true;
\r
5041 object->Release();
\r
5044 // If good device, then save its name and guid.
\r
5045 std::string name = convertTChar( description );
\r
5046 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5047 if ( lpguid == NULL )
\r
5048 name = "Default Device";
\r
5049 if ( validDevice ) {
\r
5050 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5051 if ( dsDevices[i].name == name ) {
\r
5052 dsDevices[i].found = true;
\r
5053 if ( probeInfo.isInput ) {
\r
5054 dsDevices[i].id[1] = lpguid;
\r
5055 dsDevices[i].validId[1] = true;
\r
5058 dsDevices[i].id[0] = lpguid;
\r
5059 dsDevices[i].validId[0] = true;
\r
5066 device.name = name;
\r
5067 device.found = true;
\r
5068 if ( probeInfo.isInput ) {
\r
5069 device.id[1] = lpguid;
\r
5070 device.validId[1] = true;
\r
5073 device.id[0] = lpguid;
\r
5074 device.validId[0] = true;
\r
5076 dsDevices.push_back( device );
\r
5082 static const char* getErrorString( int code )
\r
5086 case DSERR_ALLOCATED:
\r
5087 return "Already allocated";
\r
5089 case DSERR_CONTROLUNAVAIL:
\r
5090 return "Control unavailable";
\r
5092 case DSERR_INVALIDPARAM:
\r
5093 return "Invalid parameter";
\r
5095 case DSERR_INVALIDCALL:
\r
5096 return "Invalid call";
\r
5098 case DSERR_GENERIC:
\r
5099 return "Generic error";
\r
5101 case DSERR_PRIOLEVELNEEDED:
\r
5102 return "Priority level needed";
\r
5104 case DSERR_OUTOFMEMORY:
\r
5105 return "Out of memory";
\r
5107 case DSERR_BADFORMAT:
\r
5108 return "The sample rate or the channel format is not supported";
\r
5110 case DSERR_UNSUPPORTED:
\r
5111 return "Not supported";
\r
5113 case DSERR_NODRIVER:
\r
5114 return "No driver";
\r
5116 case DSERR_ALREADYINITIALIZED:
\r
5117 return "Already initialized";
\r
5119 case DSERR_NOAGGREGATION:
\r
5120 return "No aggregation";
\r
5122 case DSERR_BUFFERLOST:
\r
5123 return "Buffer lost";
\r
5125 case DSERR_OTHERAPPHASPRIO:
\r
5126 return "Another application already has priority";
\r
5128 case DSERR_UNINITIALIZED:
\r
5129 return "Uninitialized";
\r
5132 return "DirectSound unknown error";
\r
5135 //******************** End of __WINDOWS_DS__ *********************//
\r
5139 #if defined(__LINUX_ALSA__)
\r
5141 #include <alsa/asoundlib.h>
\r
5142 #include <unistd.h>
\r
5144 // A structure to hold various information related to the ALSA API
\r
5145 // implementation.
\r
5146 struct AlsaHandle {
\r
5147 snd_pcm_t *handles[2];
\r
5148 bool synchronized;
\r
5150 pthread_cond_t runnable_cv;
\r
5154 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5157 static void *alsaCallbackHandler( void * ptr );
\r
5159 RtApiAlsa :: RtApiAlsa()
\r
5161 // Nothing to do here.
\r
5164 RtApiAlsa :: ~RtApiAlsa()
\r
5166 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5169 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5171 unsigned nDevices = 0;
\r
5172 int result, subdevice, card;
\r
5174 snd_ctl_t *handle;
\r
5176 // Count cards and devices
\r
5178 snd_card_next( &card );
\r
5179 while ( card >= 0 ) {
\r
5180 sprintf( name, "hw:%d", card );
\r
5181 result = snd_ctl_open( &handle, name, 0 );
\r
5182 if ( result < 0 ) {
\r
5183 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5184 errorText_ = errorStream_.str();
\r
5185 error( RtAudioError::WARNING );
\r
5190 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5191 if ( result < 0 ) {
\r
5192 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5193 errorText_ = errorStream_.str();
\r
5194 error( RtAudioError::WARNING );
\r
5197 if ( subdevice < 0 )
\r
5202 snd_ctl_close( handle );
\r
5203 snd_card_next( &card );
\r
5206 result = snd_ctl_open( &handle, "default", 0 );
\r
5207 if (result == 0) {
\r
5209 snd_ctl_close( handle );
\r
5215 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5217 RtAudio::DeviceInfo info;
\r
5218 info.probed = false;
\r
5220 unsigned nDevices = 0;
\r
5221 int result, subdevice, card;
\r
5223 snd_ctl_t *chandle;
\r
5225 // Count cards and devices
\r
5227 snd_card_next( &card );
\r
5228 while ( card >= 0 ) {
\r
5229 sprintf( name, "hw:%d", card );
\r
5230 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5231 if ( result < 0 ) {
\r
5232 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5233 errorText_ = errorStream_.str();
\r
5234 error( RtAudioError::WARNING );
\r
5239 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5240 if ( result < 0 ) {
\r
5241 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5242 errorText_ = errorStream_.str();
\r
5243 error( RtAudioError::WARNING );
\r
5246 if ( subdevice < 0 ) break;
\r
5247 if ( nDevices == device ) {
\r
5248 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5254 snd_ctl_close( chandle );
\r
5255 snd_card_next( &card );
\r
5258 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5259 if ( result == 0 ) {
\r
5260 if ( nDevices == device ) {
\r
5261 strcpy( name, "default" );
\r
5267 if ( nDevices == 0 ) {
\r
5268 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5269 error( RtAudioError::INVALID_USE );
\r
5273 if ( device >= nDevices ) {
\r
5274 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5275 error( RtAudioError::INVALID_USE );
\r
5281 // If a stream is already open, we cannot probe the stream devices.
\r
5282 // Thus, use the saved results.
\r
5283 if ( stream_.state != STREAM_CLOSED &&
\r
5284 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5285 snd_ctl_close( chandle );
\r
5286 if ( device >= devices_.size() ) {
\r
5287 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5288 error( RtAudioError::WARNING );
\r
5291 return devices_[ device ];
\r
5294 int openMode = SND_PCM_ASYNC;
\r
5295 snd_pcm_stream_t stream;
\r
5296 snd_pcm_info_t *pcminfo;
\r
5297 snd_pcm_info_alloca( &pcminfo );
\r
5298 snd_pcm_t *phandle;
\r
5299 snd_pcm_hw_params_t *params;
\r
5300 snd_pcm_hw_params_alloca( ¶ms );
\r
5302 // First try for playback unless default device (which has subdev -1)
\r
5303 stream = SND_PCM_STREAM_PLAYBACK;
\r
5304 snd_pcm_info_set_stream( pcminfo, stream );
\r
5305 if ( subdevice != -1 ) {
\r
5306 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5307 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5309 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5310 if ( result < 0 ) {
\r
5311 // Device probably doesn't support playback.
\r
5312 goto captureProbe;
\r
5316 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5317 if ( result < 0 ) {
\r
5318 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5319 errorText_ = errorStream_.str();
\r
5320 error( RtAudioError::WARNING );
\r
5321 goto captureProbe;
\r
5324 // The device is open ... fill the parameter structure.
\r
5325 result = snd_pcm_hw_params_any( phandle, params );
\r
5326 if ( result < 0 ) {
\r
5327 snd_pcm_close( phandle );
\r
5328 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5329 errorText_ = errorStream_.str();
\r
5330 error( RtAudioError::WARNING );
\r
5331 goto captureProbe;
\r
5334 // Get output channel information.
\r
5335 unsigned int value;
\r
5336 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5337 if ( result < 0 ) {
\r
5338 snd_pcm_close( phandle );
\r
5339 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5340 errorText_ = errorStream_.str();
\r
5341 error( RtAudioError::WARNING );
\r
5342 goto captureProbe;
\r
5344 info.outputChannels = value;
\r
5345 snd_pcm_close( phandle );
\r
5348 stream = SND_PCM_STREAM_CAPTURE;
\r
5349 snd_pcm_info_set_stream( pcminfo, stream );
\r
5351 // Now try for capture unless default device (with subdev = -1)
\r
5352 if ( subdevice != -1 ) {
\r
5353 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5354 snd_ctl_close( chandle );
\r
5355 if ( result < 0 ) {
\r
5356 // Device probably doesn't support capture.
\r
5357 if ( info.outputChannels == 0 ) return info;
\r
5358 goto probeParameters;
\r
5362 snd_ctl_close( chandle );
\r
5364 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5365 if ( result < 0 ) {
\r
5366 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5367 errorText_ = errorStream_.str();
\r
5368 error( RtAudioError::WARNING );
\r
5369 if ( info.outputChannels == 0 ) return info;
\r
5370 goto probeParameters;
\r
5373 // The device is open ... fill the parameter structure.
\r
5374 result = snd_pcm_hw_params_any( phandle, params );
\r
5375 if ( result < 0 ) {
\r
5376 snd_pcm_close( phandle );
\r
5377 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5378 errorText_ = errorStream_.str();
\r
5379 error( RtAudioError::WARNING );
\r
5380 if ( info.outputChannels == 0 ) return info;
\r
5381 goto probeParameters;
\r
5384 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5385 if ( result < 0 ) {
\r
5386 snd_pcm_close( phandle );
\r
5387 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5388 errorText_ = errorStream_.str();
\r
5389 error( RtAudioError::WARNING );
\r
5390 if ( info.outputChannels == 0 ) return info;
\r
5391 goto probeParameters;
\r
5393 info.inputChannels = value;
\r
5394 snd_pcm_close( phandle );
\r
5396 // If device opens for both playback and capture, we determine the channels.
\r
5397 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5398 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5400 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5401 if ( device == 0 && info.outputChannels > 0 )
\r
5402 info.isDefaultOutput = true;
\r
5403 if ( device == 0 && info.inputChannels > 0 )
\r
5404 info.isDefaultInput = true;
\r
5407 // At this point, we just need to figure out the supported data
\r
5408 // formats and sample rates. We'll proceed by opening the device in
\r
5409 // the direction with the maximum number of channels, or playback if
\r
5410 // they are equal. This might limit our sample rate options, but so
\r
5413 if ( info.outputChannels >= info.inputChannels )
\r
5414 stream = SND_PCM_STREAM_PLAYBACK;
\r
5416 stream = SND_PCM_STREAM_CAPTURE;
\r
5417 snd_pcm_info_set_stream( pcminfo, stream );
\r
5419 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5420 if ( result < 0 ) {
\r
5421 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5422 errorText_ = errorStream_.str();
\r
5423 error( RtAudioError::WARNING );
\r
5427 // The device is open ... fill the parameter structure.
\r
5428 result = snd_pcm_hw_params_any( phandle, params );
\r
5429 if ( result < 0 ) {
\r
5430 snd_pcm_close( phandle );
\r
5431 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5432 errorText_ = errorStream_.str();
\r
5433 error( RtAudioError::WARNING );
\r
5437 // Test our discrete set of sample rate values.
\r
5438 info.sampleRates.clear();
\r
5439 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5440 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5441 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5443 if ( info.sampleRates.size() == 0 ) {
\r
5444 snd_pcm_close( phandle );
\r
5445 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5446 errorText_ = errorStream_.str();
\r
5447 error( RtAudioError::WARNING );
\r
5451 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5452 snd_pcm_format_t format;
\r
5453 info.nativeFormats = 0;
\r
5454 format = SND_PCM_FORMAT_S8;
\r
5455 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5456 info.nativeFormats |= RTAUDIO_SINT8;
\r
5457 format = SND_PCM_FORMAT_S16;
\r
5458 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5459 info.nativeFormats |= RTAUDIO_SINT16;
\r
5460 format = SND_PCM_FORMAT_S24;
\r
5461 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5462 info.nativeFormats |= RTAUDIO_SINT24;
\r
5463 format = SND_PCM_FORMAT_S32;
\r
5464 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5465 info.nativeFormats |= RTAUDIO_SINT32;
\r
5466 format = SND_PCM_FORMAT_FLOAT;
\r
5467 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5468 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5469 format = SND_PCM_FORMAT_FLOAT64;
\r
5470 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5471 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5473 // Check that we have at least one supported format
\r
5474 if ( info.nativeFormats == 0 ) {
\r
5475 snd_pcm_close( phandle );
\r
5476 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5477 errorText_ = errorStream_.str();
\r
5478 error( RtAudioError::WARNING );
\r
5482 // Get the device name
\r
5484 result = snd_card_get_name( card, &cardname );
\r
5485 if ( result >= 0 ) {
\r
5486 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5491 // That's all ... close the device and return
\r
5492 snd_pcm_close( phandle );
\r
5493 info.probed = true;
\r
5497 void RtApiAlsa :: saveDeviceInfo( void )
\r
5501 unsigned int nDevices = getDeviceCount();
\r
5502 devices_.resize( nDevices );
\r
5503 for ( unsigned int i=0; i<nDevices; i++ )
\r
5504 devices_[i] = getDeviceInfo( i );
\r
5507 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5508 unsigned int firstChannel, unsigned int sampleRate,
\r
5509 RtAudioFormat format, unsigned int *bufferSize,
\r
5510 RtAudio::StreamOptions *options )
\r
5513 #if defined(__RTAUDIO_DEBUG__)
\r
5514 snd_output_t *out;
\r
5515 snd_output_stdio_attach(&out, stderr, 0);
\r
5518 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5520 unsigned nDevices = 0;
\r
5521 int result, subdevice, card;
\r
5523 snd_ctl_t *chandle;
\r
5525 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5526 snprintf(name, sizeof(name), "%s", "default");
\r
5528 // Count cards and devices
\r
5530 snd_card_next( &card );
\r
5531 while ( card >= 0 ) {
\r
5532 sprintf( name, "hw:%d", card );
\r
5533 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5534 if ( result < 0 ) {
\r
5535 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5536 errorText_ = errorStream_.str();
\r
5541 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5542 if ( result < 0 ) break;
\r
5543 if ( subdevice < 0 ) break;
\r
5544 if ( nDevices == device ) {
\r
5545 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5546 snd_ctl_close( chandle );
\r
5551 snd_ctl_close( chandle );
\r
5552 snd_card_next( &card );
\r
5555 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5556 if ( result == 0 ) {
\r
5557 if ( nDevices == device ) {
\r
5558 strcpy( name, "default" );
\r
5564 if ( nDevices == 0 ) {
\r
5565 // This should not happen because a check is made before this function is called.
\r
5566 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5570 if ( device >= nDevices ) {
\r
5571 // This should not happen because a check is made before this function is called.
\r
5572 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5579 // The getDeviceInfo() function will not work for a device that is
\r
5580 // already open. Thus, we'll probe the system before opening a
\r
5581 // stream and save the results for use by getDeviceInfo().
\r
5582 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5583 this->saveDeviceInfo();
\r
5585 snd_pcm_stream_t stream;
\r
5586 if ( mode == OUTPUT )
\r
5587 stream = SND_PCM_STREAM_PLAYBACK;
\r
5589 stream = SND_PCM_STREAM_CAPTURE;
\r
5591 snd_pcm_t *phandle;
\r
5592 int openMode = SND_PCM_ASYNC;
\r
5593 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5594 if ( result < 0 ) {
\r
5595 if ( mode == OUTPUT )
\r
5596 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5598 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5599 errorText_ = errorStream_.str();
\r
5603 // Fill the parameter structure.
\r
5604 snd_pcm_hw_params_t *hw_params;
\r
5605 snd_pcm_hw_params_alloca( &hw_params );
\r
5606 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5607 if ( result < 0 ) {
\r
5608 snd_pcm_close( phandle );
\r
5609 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5610 errorText_ = errorStream_.str();
\r
5614 #if defined(__RTAUDIO_DEBUG__)
\r
5615 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5616 snd_pcm_hw_params_dump( hw_params, out );
\r
5619 // Set access ... check user preference.
\r
5620 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5621 stream_.userInterleaved = false;
\r
5622 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5623 if ( result < 0 ) {
\r
5624 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5625 stream_.deviceInterleaved[mode] = true;
\r
5628 stream_.deviceInterleaved[mode] = false;
\r
5631 stream_.userInterleaved = true;
\r
5632 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5633 if ( result < 0 ) {
\r
5634 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5635 stream_.deviceInterleaved[mode] = false;
\r
5638 stream_.deviceInterleaved[mode] = true;
\r
5641 if ( result < 0 ) {
\r
5642 snd_pcm_close( phandle );
\r
5643 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5644 errorText_ = errorStream_.str();
\r
5648 // Determine how to set the device format.
\r
5649 stream_.userFormat = format;
\r
5650 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5652 if ( format == RTAUDIO_SINT8 )
\r
5653 deviceFormat = SND_PCM_FORMAT_S8;
\r
5654 else if ( format == RTAUDIO_SINT16 )
\r
5655 deviceFormat = SND_PCM_FORMAT_S16;
\r
5656 else if ( format == RTAUDIO_SINT24 )
\r
5657 deviceFormat = SND_PCM_FORMAT_S24;
\r
5658 else if ( format == RTAUDIO_SINT32 )
\r
5659 deviceFormat = SND_PCM_FORMAT_S32;
\r
5660 else if ( format == RTAUDIO_FLOAT32 )
\r
5661 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5662 else if ( format == RTAUDIO_FLOAT64 )
\r
5663 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5665 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5666 stream_.deviceFormat[mode] = format;
\r
5670 // The user requested format is not natively supported by the device.
\r
5671 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5672 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5673 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5677 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5678 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5679 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5683 deviceFormat = SND_PCM_FORMAT_S32;
\r
5684 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5685 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5689 deviceFormat = SND_PCM_FORMAT_S24;
\r
5690 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5691 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5695 deviceFormat = SND_PCM_FORMAT_S16;
\r
5696 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5697 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5701 deviceFormat = SND_PCM_FORMAT_S8;
\r
5702 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5703 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5707 // If we get here, no supported format was found.
\r
5708 snd_pcm_close( phandle );
\r
5709 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5710 errorText_ = errorStream_.str();
\r
5714 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5715 if ( result < 0 ) {
\r
5716 snd_pcm_close( phandle );
\r
5717 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5718 errorText_ = errorStream_.str();
\r
5722 // Determine whether byte-swaping is necessary.
\r
5723 stream_.doByteSwap[mode] = false;
\r
5724 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5725 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5726 if ( result == 0 )
\r
5727 stream_.doByteSwap[mode] = true;
\r
5728 else if (result < 0) {
\r
5729 snd_pcm_close( phandle );
\r
5730 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5731 errorText_ = errorStream_.str();
\r
5736 // Set the sample rate.
\r
5737 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5738 if ( result < 0 ) {
\r
5739 snd_pcm_close( phandle );
\r
5740 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5741 errorText_ = errorStream_.str();
\r
5745 // Determine the number of channels for this device. We support a possible
\r
5746 // minimum device channel number > than the value requested by the user.
\r
5747 stream_.nUserChannels[mode] = channels;
\r
5748 unsigned int value;
\r
5749 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5750 unsigned int deviceChannels = value;
\r
5751 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5752 snd_pcm_close( phandle );
\r
5753 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5754 errorText_ = errorStream_.str();
\r
5758 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5759 if ( result < 0 ) {
\r
5760 snd_pcm_close( phandle );
\r
5761 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5762 errorText_ = errorStream_.str();
\r
5765 deviceChannels = value;
\r
5766 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5767 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5769 // Set the device channels.
\r
5770 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5771 if ( result < 0 ) {
\r
5772 snd_pcm_close( phandle );
\r
5773 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5774 errorText_ = errorStream_.str();
\r
5778 // Set the buffer (or period) size.
\r
5780 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5781 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5782 if ( result < 0 ) {
\r
5783 snd_pcm_close( phandle );
\r
5784 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5785 errorText_ = errorStream_.str();
\r
5788 *bufferSize = periodSize;
\r
5790 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5791 unsigned int periods = 0;
\r
5792 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5793 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5794 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5795 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5796 if ( result < 0 ) {
\r
5797 snd_pcm_close( phandle );
\r
5798 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5799 errorText_ = errorStream_.str();
\r
5803 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5804 // MUST be the same in both directions!
\r
5805 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5806 snd_pcm_close( phandle );
\r
5807 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5808 errorText_ = errorStream_.str();
\r
5812 stream_.bufferSize = *bufferSize;
\r
5814 // Install the hardware configuration
\r
5815 result = snd_pcm_hw_params( phandle, hw_params );
\r
5816 if ( result < 0 ) {
\r
5817 snd_pcm_close( phandle );
\r
5818 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5819 errorText_ = errorStream_.str();
\r
5823 #if defined(__RTAUDIO_DEBUG__)
\r
5824 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5825 snd_pcm_hw_params_dump( hw_params, out );
\r
5828 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5829 snd_pcm_sw_params_t *sw_params = NULL;
\r
5830 snd_pcm_sw_params_alloca( &sw_params );
\r
5831 snd_pcm_sw_params_current( phandle, sw_params );
\r
5832 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5833 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5834 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5836 // The following two settings were suggested by Theo Veenker
\r
5837 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5838 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5840 // here are two options for a fix
\r
5841 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5842 snd_pcm_uframes_t val;
\r
5843 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5844 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5846 result = snd_pcm_sw_params( phandle, sw_params );
\r
5847 if ( result < 0 ) {
\r
5848 snd_pcm_close( phandle );
\r
5849 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5850 errorText_ = errorStream_.str();
\r
5854 #if defined(__RTAUDIO_DEBUG__)
\r
5855 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5856 snd_pcm_sw_params_dump( sw_params, out );
\r
5859 // Set flags for buffer conversion
\r
5860 stream_.doConvertBuffer[mode] = false;
\r
5861 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5862 stream_.doConvertBuffer[mode] = true;
\r
5863 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5864 stream_.doConvertBuffer[mode] = true;
\r
5865 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5866 stream_.nUserChannels[mode] > 1 )
\r
5867 stream_.doConvertBuffer[mode] = true;
\r
5869 // Allocate the ApiHandle if necessary and then save.
\r
5870 AlsaHandle *apiInfo = 0;
\r
5871 if ( stream_.apiHandle == 0 ) {
\r
5873 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5875 catch ( std::bad_alloc& ) {
\r
5876 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5880 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5881 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5885 stream_.apiHandle = (void *) apiInfo;
\r
5886 apiInfo->handles[0] = 0;
\r
5887 apiInfo->handles[1] = 0;
\r
5890 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5892 apiInfo->handles[mode] = phandle;
\r
5895 // Allocate necessary internal buffers.
\r
5896 unsigned long bufferBytes;
\r
5897 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5898 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5899 if ( stream_.userBuffer[mode] == NULL ) {
\r
5900 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5904 if ( stream_.doConvertBuffer[mode] ) {
\r
5906 bool makeBuffer = true;
\r
5907 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5908 if ( mode == INPUT ) {
\r
5909 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5910 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5911 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5915 if ( makeBuffer ) {
\r
5916 bufferBytes *= *bufferSize;
\r
5917 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5918 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5919 if ( stream_.deviceBuffer == NULL ) {
\r
5920 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5926 stream_.sampleRate = sampleRate;
\r
5927 stream_.nBuffers = periods;
\r
5928 stream_.device[mode] = device;
\r
5929 stream_.state = STREAM_STOPPED;
\r
5931 // Setup the buffer conversion information structure.
\r
5932 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5934 // Setup thread if necessary.
\r
5935 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5936 // We had already set up an output stream.
\r
5937 stream_.mode = DUPLEX;
\r
5938 // Link the streams if possible.
\r
5939 apiInfo->synchronized = false;
\r
5940 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5941 apiInfo->synchronized = true;
\r
5943 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5944 error( RtAudioError::WARNING );
\r
5948 stream_.mode = mode;
\r
5950 // Setup callback thread.
\r
5951 stream_.callbackInfo.object = (void *) this;
\r
5953 // Set the thread attributes for joinable and realtime scheduling
\r
5954 // priority (optional). The higher priority will only take affect
\r
5955 // if the program is run as root or suid. Note, under Linux
\r
5956 // processes with CAP_SYS_NICE privilege, a user can change
\r
5957 // scheduling policy and priority (thus need not be root). See
\r
5958 // POSIX "capabilities".
\r
5959 pthread_attr_t attr;
\r
5960 pthread_attr_init( &attr );
\r
5961 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5963 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5964 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5965 // We previously attempted to increase the audio callback priority
\r
5966 // to SCHED_RR here via the attributes. However, while no errors
\r
5967 // were reported in doing so, it did not work. So, now this is
\r
5968 // done in the alsaCallbackHandler function.
\r
5969 stream_.callbackInfo.doRealtime = true;
\r
5970 int priority = options->priority;
\r
5971 int min = sched_get_priority_min( SCHED_RR );
\r
5972 int max = sched_get_priority_max( SCHED_RR );
\r
5973 if ( priority < min ) priority = min;
\r
5974 else if ( priority > max ) priority = max;
\r
5975 stream_.callbackInfo.priority = priority;
\r
5979 stream_.callbackInfo.isRunning = true;
\r
5980 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5981 pthread_attr_destroy( &attr );
\r
5983 stream_.callbackInfo.isRunning = false;
\r
5984 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5993 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5994 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5995 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5997 stream_.apiHandle = 0;
\r
6000 if ( phandle) snd_pcm_close( phandle );
\r
6002 for ( int i=0; i<2; i++ ) {
\r
6003 if ( stream_.userBuffer[i] ) {
\r
6004 free( stream_.userBuffer[i] );
\r
6005 stream_.userBuffer[i] = 0;
\r
6009 if ( stream_.deviceBuffer ) {
\r
6010 free( stream_.deviceBuffer );
\r
6011 stream_.deviceBuffer = 0;
\r
6014 stream_.state = STREAM_CLOSED;
\r
6018 void RtApiAlsa :: closeStream()
\r
6020 if ( stream_.state == STREAM_CLOSED ) {
\r
6021 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6022 error( RtAudioError::WARNING );
\r
6026 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6027 stream_.callbackInfo.isRunning = false;
\r
6028 MUTEX_LOCK( &stream_.mutex );
\r
6029 if ( stream_.state == STREAM_STOPPED ) {
\r
6030 apiInfo->runnable = true;
\r
6031 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6033 MUTEX_UNLOCK( &stream_.mutex );
\r
6034 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6036 if ( stream_.state == STREAM_RUNNING ) {
\r
6037 stream_.state = STREAM_STOPPED;
\r
6038 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6039 snd_pcm_drop( apiInfo->handles[0] );
\r
6040 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6041 snd_pcm_drop( apiInfo->handles[1] );
\r
6045 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6046 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6047 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6049 stream_.apiHandle = 0;
\r
6052 for ( int i=0; i<2; i++ ) {
\r
6053 if ( stream_.userBuffer[i] ) {
\r
6054 free( stream_.userBuffer[i] );
\r
6055 stream_.userBuffer[i] = 0;
\r
6059 if ( stream_.deviceBuffer ) {
\r
6060 free( stream_.deviceBuffer );
\r
6061 stream_.deviceBuffer = 0;
\r
6064 stream_.mode = UNINITIALIZED;
\r
6065 stream_.state = STREAM_CLOSED;
\r
6068 void RtApiAlsa :: startStream()
\r
6070 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6073 if ( stream_.state == STREAM_RUNNING ) {
\r
6074 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6075 error( RtAudioError::WARNING );
\r
6079 MUTEX_LOCK( &stream_.mutex );
\r
6082 snd_pcm_state_t state;
\r
6083 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6084 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6085 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6086 state = snd_pcm_state( handle[0] );
\r
6087 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6088 result = snd_pcm_prepare( handle[0] );
\r
6089 if ( result < 0 ) {
\r
6090 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6091 errorText_ = errorStream_.str();
\r
6097 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6098 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
6099 state = snd_pcm_state( handle[1] );
\r
6100 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6101 result = snd_pcm_prepare( handle[1] );
\r
6102 if ( result < 0 ) {
\r
6103 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6104 errorText_ = errorStream_.str();
\r
6110 stream_.state = STREAM_RUNNING;
\r
6113 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6114 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6115 MUTEX_UNLOCK( &stream_.mutex );
\r
6117 if ( result >= 0 ) return;
\r
6118 error( RtAudioError::SYSTEM_ERROR );
\r
6121 void RtApiAlsa :: stopStream()
\r
6124 if ( stream_.state == STREAM_STOPPED ) {
\r
6125 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6126 error( RtAudioError::WARNING );
\r
6130 stream_.state = STREAM_STOPPED;
\r
6131 MUTEX_LOCK( &stream_.mutex );
\r
6134 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6135 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6136 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6137 if ( apiInfo->synchronized )
\r
6138 result = snd_pcm_drop( handle[0] );
\r
6140 result = snd_pcm_drain( handle[0] );
\r
6141 if ( result < 0 ) {
\r
6142 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6143 errorText_ = errorStream_.str();
\r
6148 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6149 result = snd_pcm_drop( handle[1] );
\r
6150 if ( result < 0 ) {
\r
6151 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6152 errorText_ = errorStream_.str();
\r
6158 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6159 MUTEX_UNLOCK( &stream_.mutex );
\r
6161 if ( result >= 0 ) return;
\r
6162 error( RtAudioError::SYSTEM_ERROR );
\r
6165 void RtApiAlsa :: abortStream()
\r
6168 if ( stream_.state == STREAM_STOPPED ) {
\r
6169 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6170 error( RtAudioError::WARNING );
\r
6174 stream_.state = STREAM_STOPPED;
\r
6175 MUTEX_LOCK( &stream_.mutex );
\r
6178 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6179 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6180 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6181 result = snd_pcm_drop( handle[0] );
\r
6182 if ( result < 0 ) {
\r
6183 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6184 errorText_ = errorStream_.str();
\r
6189 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6190 result = snd_pcm_drop( handle[1] );
\r
6191 if ( result < 0 ) {
\r
6192 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6193 errorText_ = errorStream_.str();
\r
6199 MUTEX_UNLOCK( &stream_.mutex );
\r
6201 if ( result >= 0 ) return;
\r
6202 error( RtAudioError::SYSTEM_ERROR );
\r
6205 void RtApiAlsa :: callbackEvent()
\r
6207 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6208 if ( stream_.state == STREAM_STOPPED ) {
\r
6209 MUTEX_LOCK( &stream_.mutex );
\r
6210 while ( !apiInfo->runnable )
\r
6211 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6213 if ( stream_.state != STREAM_RUNNING ) {
\r
6214 MUTEX_UNLOCK( &stream_.mutex );
\r
6217 MUTEX_UNLOCK( &stream_.mutex );
\r
6220 if ( stream_.state == STREAM_CLOSED ) {
\r
6221 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6222 error( RtAudioError::WARNING );
\r
6226 int doStopStream = 0;
\r
6227 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6228 double streamTime = getStreamTime();
\r
6229 RtAudioStreamStatus status = 0;
\r
6230 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6231 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6232 apiInfo->xrun[0] = false;
\r
6234 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6235 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6236 apiInfo->xrun[1] = false;
\r
6238 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6239 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6241 if ( doStopStream == 2 ) {
\r
6246 MUTEX_LOCK( &stream_.mutex );
\r
6248 // The state might change while waiting on a mutex.
\r
6249 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6254 snd_pcm_t **handle;
\r
6255 snd_pcm_sframes_t frames;
\r
6256 RtAudioFormat format;
\r
6257 handle = (snd_pcm_t **) apiInfo->handles;
\r
6259 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6261 // Setup parameters.
\r
6262 if ( stream_.doConvertBuffer[1] ) {
\r
6263 buffer = stream_.deviceBuffer;
\r
6264 channels = stream_.nDeviceChannels[1];
\r
6265 format = stream_.deviceFormat[1];
\r
6268 buffer = stream_.userBuffer[1];
\r
6269 channels = stream_.nUserChannels[1];
\r
6270 format = stream_.userFormat;
\r
6273 // Read samples from device in interleaved/non-interleaved format.
\r
6274 if ( stream_.deviceInterleaved[1] )
\r
6275 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6277 void *bufs[channels];
\r
6278 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6279 for ( int i=0; i<channels; i++ )
\r
6280 bufs[i] = (void *) (buffer + (i * offset));
\r
6281 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6284 if ( result < (int) stream_.bufferSize ) {
\r
6285 // Either an error or overrun occured.
\r
6286 if ( result == -EPIPE ) {
\r
6287 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6288 if ( state == SND_PCM_STATE_XRUN ) {
\r
6289 apiInfo->xrun[1] = true;
\r
6290 result = snd_pcm_prepare( handle[1] );
\r
6291 if ( result < 0 ) {
\r
6292 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6293 errorText_ = errorStream_.str();
\r
6297 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6298 errorText_ = errorStream_.str();
\r
6302 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6303 errorText_ = errorStream_.str();
\r
6305 error( RtAudioError::WARNING );
\r
6309 // Do byte swapping if necessary.
\r
6310 if ( stream_.doByteSwap[1] )
\r
6311 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6313 // Do buffer conversion if necessary.
\r
6314 if ( stream_.doConvertBuffer[1] )
\r
6315 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6317 // Check stream latency
\r
6318 result = snd_pcm_delay( handle[1], &frames );
\r
6319 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6324 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6326 // Setup parameters and do buffer conversion if necessary.
\r
6327 if ( stream_.doConvertBuffer[0] ) {
\r
6328 buffer = stream_.deviceBuffer;
\r
6329 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6330 channels = stream_.nDeviceChannels[0];
\r
6331 format = stream_.deviceFormat[0];
\r
6334 buffer = stream_.userBuffer[0];
\r
6335 channels = stream_.nUserChannels[0];
\r
6336 format = stream_.userFormat;
\r
6339 // Do byte swapping if necessary.
\r
6340 if ( stream_.doByteSwap[0] )
\r
6341 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6343 // Write samples to device in interleaved/non-interleaved format.
\r
6344 if ( stream_.deviceInterleaved[0] )
\r
6345 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6347 void *bufs[channels];
\r
6348 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6349 for ( int i=0; i<channels; i++ )
\r
6350 bufs[i] = (void *) (buffer + (i * offset));
\r
6351 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6354 if ( result < (int) stream_.bufferSize ) {
\r
6355 // Either an error or underrun occured.
\r
6356 if ( result == -EPIPE ) {
\r
6357 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6358 if ( state == SND_PCM_STATE_XRUN ) {
\r
6359 apiInfo->xrun[0] = true;
\r
6360 result = snd_pcm_prepare( handle[0] );
\r
6361 if ( result < 0 ) {
\r
6362 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6363 errorText_ = errorStream_.str();
\r
6367 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6368 errorText_ = errorStream_.str();
\r
6372 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6373 errorText_ = errorStream_.str();
\r
6375 error( RtAudioError::WARNING );
\r
6379 // Check stream latency
\r
6380 result = snd_pcm_delay( handle[0], &frames );
\r
6381 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6385 MUTEX_UNLOCK( &stream_.mutex );
\r
6387 RtApi::tickStreamTime();
\r
6388 if ( doStopStream == 1 ) this->stopStream();
\r
6391 static void *alsaCallbackHandler( void *ptr )
\r
6393 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6394 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6395 bool *isRunning = &info->isRunning;
\r
6397 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6398 if ( &info->doRealtime ) {
\r
6399 pthread_t tID = pthread_self(); // ID of this thread
\r
6400 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6401 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6405 while ( *isRunning == true ) {
\r
6406 pthread_testcancel();
\r
6407 object->callbackEvent();
\r
6410 pthread_exit( NULL );
\r
6413 //******************** End of __LINUX_ALSA__ *********************//
\r
6416 #if defined(__LINUX_PULSE__)
\r
6418 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6419 // and Tristan Matthews.
\r
6421 #include <pulse/error.h>
\r
6422 #include <pulse/simple.h>
\r
6425 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6426 44100, 48000, 96000, 0};
\r
6428 struct rtaudio_pa_format_mapping_t {
\r
6429 RtAudioFormat rtaudio_format;
\r
6430 pa_sample_format_t pa_format;
\r
6433 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6434 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6435 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6436 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6437 {0, PA_SAMPLE_INVALID}};
\r
6439 struct PulseAudioHandle {
\r
6440 pa_simple *s_play;
\r
6443 pthread_cond_t runnable_cv;
\r
6445 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6448 RtApiPulse::~RtApiPulse()
\r
6450 if ( stream_.state != STREAM_CLOSED )
\r
6454 unsigned int RtApiPulse::getDeviceCount( void )
\r
6459 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6461 RtAudio::DeviceInfo info;
\r
6462 info.probed = true;
\r
6463 info.name = "PulseAudio";
\r
6464 info.outputChannels = 2;
\r
6465 info.inputChannels = 2;
\r
6466 info.duplexChannels = 2;
\r
6467 info.isDefaultOutput = true;
\r
6468 info.isDefaultInput = true;
\r
6470 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6471 info.sampleRates.push_back( *sr );
\r
6473 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6478 static void *pulseaudio_callback( void * user )
\r
6480 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6481 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6482 volatile bool *isRunning = &cbi->isRunning;
\r
6484 while ( *isRunning ) {
\r
6485 pthread_testcancel();
\r
6486 context->callbackEvent();
\r
6489 pthread_exit( NULL );
\r
6492 void RtApiPulse::closeStream( void )
\r
6494 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6496 stream_.callbackInfo.isRunning = false;
\r
6498 MUTEX_LOCK( &stream_.mutex );
\r
6499 if ( stream_.state == STREAM_STOPPED ) {
\r
6500 pah->runnable = true;
\r
6501 pthread_cond_signal( &pah->runnable_cv );
\r
6503 MUTEX_UNLOCK( &stream_.mutex );
\r
6505 pthread_join( pah->thread, 0 );
\r
6506 if ( pah->s_play ) {
\r
6507 pa_simple_flush( pah->s_play, NULL );
\r
6508 pa_simple_free( pah->s_play );
\r
6511 pa_simple_free( pah->s_rec );
\r
6513 pthread_cond_destroy( &pah->runnable_cv );
\r
6515 stream_.apiHandle = 0;
\r
6518 if ( stream_.userBuffer[0] ) {
\r
6519 free( stream_.userBuffer[0] );
\r
6520 stream_.userBuffer[0] = 0;
\r
6522 if ( stream_.userBuffer[1] ) {
\r
6523 free( stream_.userBuffer[1] );
\r
6524 stream_.userBuffer[1] = 0;
\r
6527 stream_.state = STREAM_CLOSED;
\r
6528 stream_.mode = UNINITIALIZED;
\r
6531 void RtApiPulse::callbackEvent( void )
\r
6533 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6535 if ( stream_.state == STREAM_STOPPED ) {
\r
6536 MUTEX_LOCK( &stream_.mutex );
\r
6537 while ( !pah->runnable )
\r
6538 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6540 if ( stream_.state != STREAM_RUNNING ) {
\r
6541 MUTEX_UNLOCK( &stream_.mutex );
\r
6544 MUTEX_UNLOCK( &stream_.mutex );
\r
6547 if ( stream_.state == STREAM_CLOSED ) {
\r
6548 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6549 "this shouldn't happen!";
\r
6550 error( RtAudioError::WARNING );
\r
6554 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6555 double streamTime = getStreamTime();
\r
6556 RtAudioStreamStatus status = 0;
\r
6557 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6558 stream_.bufferSize, streamTime, status,
\r
6559 stream_.callbackInfo.userData );
\r
6561 if ( doStopStream == 2 ) {
\r
6566 MUTEX_LOCK( &stream_.mutex );
\r
6567 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6568 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6570 if ( stream_.state != STREAM_RUNNING )
\r
6575 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6576 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6577 convertBuffer( stream_.deviceBuffer,
\r
6578 stream_.userBuffer[OUTPUT],
\r
6579 stream_.convertInfo[OUTPUT] );
\r
6580 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6581 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6583 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6584 formatBytes( stream_.userFormat );
\r
6586 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6587 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6588 pa_strerror( pa_error ) << ".";
\r
6589 errorText_ = errorStream_.str();
\r
6590 error( RtAudioError::WARNING );
\r
6594 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6595 if ( stream_.doConvertBuffer[INPUT] )
\r
6596 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6597 formatBytes( stream_.deviceFormat[INPUT] );
\r
6599 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6600 formatBytes( stream_.userFormat );
\r
6602 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6603 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6604 pa_strerror( pa_error ) << ".";
\r
6605 errorText_ = errorStream_.str();
\r
6606 error( RtAudioError::WARNING );
\r
6608 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6609 convertBuffer( stream_.userBuffer[INPUT],
\r
6610 stream_.deviceBuffer,
\r
6611 stream_.convertInfo[INPUT] );
\r
6616 MUTEX_UNLOCK( &stream_.mutex );
\r
6617 RtApi::tickStreamTime();
\r
6619 if ( doStopStream == 1 )
\r
6623 void RtApiPulse::startStream( void )
\r
6625 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6627 if ( stream_.state == STREAM_CLOSED ) {
\r
6628 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6629 error( RtAudioError::INVALID_USE );
\r
6632 if ( stream_.state == STREAM_RUNNING ) {
\r
6633 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6634 error( RtAudioError::WARNING );
\r
6638 MUTEX_LOCK( &stream_.mutex );
\r
6640 stream_.state = STREAM_RUNNING;
\r
6642 pah->runnable = true;
\r
6643 pthread_cond_signal( &pah->runnable_cv );
\r
6644 MUTEX_UNLOCK( &stream_.mutex );
\r
6647 void RtApiPulse::stopStream( void )
\r
6649 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6651 if ( stream_.state == STREAM_CLOSED ) {
\r
6652 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6653 error( RtAudioError::INVALID_USE );
\r
6656 if ( stream_.state == STREAM_STOPPED ) {
\r
6657 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6658 error( RtAudioError::WARNING );
\r
6662 stream_.state = STREAM_STOPPED;
\r
6663 MUTEX_LOCK( &stream_.mutex );
\r
6665 if ( pah && pah->s_play ) {
\r
6667 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6668 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6669 pa_strerror( pa_error ) << ".";
\r
6670 errorText_ = errorStream_.str();
\r
6671 MUTEX_UNLOCK( &stream_.mutex );
\r
6672 error( RtAudioError::SYSTEM_ERROR );
\r
6677 stream_.state = STREAM_STOPPED;
\r
6678 MUTEX_UNLOCK( &stream_.mutex );
\r
6681 void RtApiPulse::abortStream( void )
\r
6683 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6685 if ( stream_.state == STREAM_CLOSED ) {
\r
6686 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6687 error( RtAudioError::INVALID_USE );
\r
6690 if ( stream_.state == STREAM_STOPPED ) {
\r
6691 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6692 error( RtAudioError::WARNING );
\r
6696 stream_.state = STREAM_STOPPED;
\r
6697 MUTEX_LOCK( &stream_.mutex );
\r
6699 if ( pah && pah->s_play ) {
\r
6701 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6702 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6703 pa_strerror( pa_error ) << ".";
\r
6704 errorText_ = errorStream_.str();
\r
6705 MUTEX_UNLOCK( &stream_.mutex );
\r
6706 error( RtAudioError::SYSTEM_ERROR );
\r
6711 stream_.state = STREAM_STOPPED;
\r
6712 MUTEX_UNLOCK( &stream_.mutex );
\r
6715 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6716 unsigned int channels, unsigned int firstChannel,
\r
6717 unsigned int sampleRate, RtAudioFormat format,
\r
6718 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6720 PulseAudioHandle *pah = 0;
\r
6721 unsigned long bufferBytes = 0;
\r
6722 pa_sample_spec ss;
\r
6724 if ( device != 0 ) return false;
\r
6725 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6726 if ( channels != 1 && channels != 2 ) {
\r
6727 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6730 ss.channels = channels;
\r
6732 if ( firstChannel != 0 ) return false;
\r
6734 bool sr_found = false;
\r
6735 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6736 if ( sampleRate == *sr ) {
\r
6738 stream_.sampleRate = sampleRate;
\r
6739 ss.rate = sampleRate;
\r
6743 if ( !sr_found ) {
\r
6744 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6748 bool sf_found = 0;
\r
6749 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6750 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6751 if ( format == sf->rtaudio_format ) {
\r
6753 stream_.userFormat = sf->rtaudio_format;
\r
6754 ss.format = sf->pa_format;
\r
6758 if ( !sf_found ) {
\r
6759 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6763 // Set interleaving parameters.
\r
6764 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6765 else stream_.userInterleaved = true;
\r
6766 stream_.deviceInterleaved[mode] = true;
\r
6767 stream_.nBuffers = 1;
\r
6768 stream_.doByteSwap[mode] = false;
\r
6769 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6770 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6771 stream_.nUserChannels[mode] = channels;
\r
6772 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6773 stream_.channelOffset[mode] = 0;
\r
6775 // Allocate necessary internal buffers.
\r
6776 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6777 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6778 if ( stream_.userBuffer[mode] == NULL ) {
\r
6779 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6782 stream_.bufferSize = *bufferSize;
\r
6784 if ( stream_.doConvertBuffer[mode] ) {
\r
6786 bool makeBuffer = true;
\r
6787 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6788 if ( mode == INPUT ) {
\r
6789 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6790 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6791 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6795 if ( makeBuffer ) {
\r
6796 bufferBytes *= *bufferSize;
\r
6797 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6798 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6799 if ( stream_.deviceBuffer == NULL ) {
\r
6800 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6806 stream_.device[mode] = device;
\r
6808 // Setup the buffer conversion information structure.
\r
6809 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6811 if ( !stream_.apiHandle ) {
\r
6812 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6814 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6818 stream_.apiHandle = pah;
\r
6819 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6820 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6824 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6827 std::string streamName = "RtAudio";
\r
6828 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
6831 pa_buffer_attr buffer_attr;
\r
6832 buffer_attr.fragsize = bufferBytes;
\r
6833 buffer_attr.maxlength = -1;
\r
6835 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
6836 if ( !pah->s_rec ) {
\r
6837 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6842 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6843 if ( !pah->s_play ) {
\r
6844 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6852 if ( stream_.mode == UNINITIALIZED )
\r
6853 stream_.mode = mode;
\r
6854 else if ( stream_.mode == mode )
\r
6857 stream_.mode = DUPLEX;
\r
6859 if ( !stream_.callbackInfo.isRunning ) {
\r
6860 stream_.callbackInfo.object = this;
\r
6861 stream_.callbackInfo.isRunning = true;
\r
6862 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6863 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6868 stream_.state = STREAM_STOPPED;
\r
6872 if ( pah && stream_.callbackInfo.isRunning ) {
\r
6873 pthread_cond_destroy( &pah->runnable_cv );
\r
6875 stream_.apiHandle = 0;
\r
6878 for ( int i=0; i<2; i++ ) {
\r
6879 if ( stream_.userBuffer[i] ) {
\r
6880 free( stream_.userBuffer[i] );
\r
6881 stream_.userBuffer[i] = 0;
\r
6885 if ( stream_.deviceBuffer ) {
\r
6886 free( stream_.deviceBuffer );
\r
6887 stream_.deviceBuffer = 0;
\r
6893 //******************** End of __LINUX_PULSE__ *********************//
\r
6896 #if defined(__LINUX_OSS__)
\r
6898 #include <unistd.h>
\r
6899 #include <sys/ioctl.h>
\r
6900 #include <unistd.h>
\r
6901 #include <fcntl.h>
\r
6902 #include <sys/soundcard.h>
\r
6903 #include <errno.h>
\r
6906 static void *ossCallbackHandler(void * ptr);
\r
6908 // A structure to hold various information related to the OSS API
\r
6909 // implementation.
\r
6910 struct OssHandle {
\r
6911 int id[2]; // device ids
\r
6914 pthread_cond_t runnable;
\r
6917 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6920 RtApiOss :: RtApiOss()
\r
6922 // Nothing to do here.
\r
6925 RtApiOss :: ~RtApiOss()
\r
6927 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6930 unsigned int RtApiOss :: getDeviceCount( void )
\r
6932 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6933 if ( mixerfd == -1 ) {
\r
6934 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6935 error( RtAudioError::WARNING );
\r
6939 oss_sysinfo sysinfo;
\r
6940 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6942 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6943 error( RtAudioError::WARNING );
\r
6948 return sysinfo.numaudios;
\r
6951 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6953 RtAudio::DeviceInfo info;
\r
6954 info.probed = false;
\r
6956 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6957 if ( mixerfd == -1 ) {
\r
6958 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6959 error( RtAudioError::WARNING );
\r
6963 oss_sysinfo sysinfo;
\r
6964 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6965 if ( result == -1 ) {
\r
6967 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6968 error( RtAudioError::WARNING );
\r
6972 unsigned nDevices = sysinfo.numaudios;
\r
6973 if ( nDevices == 0 ) {
\r
6975 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6976 error( RtAudioError::INVALID_USE );
\r
6980 if ( device >= nDevices ) {
\r
6982 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6983 error( RtAudioError::INVALID_USE );
\r
6987 oss_audioinfo ainfo;
\r
6988 ainfo.dev = device;
\r
6989 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6991 if ( result == -1 ) {
\r
6992 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6993 errorText_ = errorStream_.str();
\r
6994 error( RtAudioError::WARNING );
\r
6999 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
7000 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
7001 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
7002 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
7003 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7006 // Probe data formats ... do for input
\r
7007 unsigned long mask = ainfo.iformats;
\r
7008 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
7009 info.nativeFormats |= RTAUDIO_SINT16;
\r
7010 if ( mask & AFMT_S8 )
\r
7011 info.nativeFormats |= RTAUDIO_SINT8;
\r
7012 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
7013 info.nativeFormats |= RTAUDIO_SINT32;
\r
7014 if ( mask & AFMT_FLOAT )
\r
7015 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7016 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
7017 info.nativeFormats |= RTAUDIO_SINT24;
\r
7019 // Check that we have at least one supported format
\r
7020 if ( info.nativeFormats == 0 ) {
\r
7021 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7022 errorText_ = errorStream_.str();
\r
7023 error( RtAudioError::WARNING );
\r
7027 // Probe the supported sample rates.
\r
7028 info.sampleRates.clear();
\r
7029 if ( ainfo.nrates ) {
\r
7030 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
7031 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7032 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
7033 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7040 // Check min and max rate values;
\r
7041 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7042 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
7043 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7047 if ( info.sampleRates.size() == 0 ) {
\r
7048 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
7049 errorText_ = errorStream_.str();
\r
7050 error( RtAudioError::WARNING );
\r
7053 info.probed = true;
\r
7054 info.name = ainfo.name;
\r
7061 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7062 unsigned int firstChannel, unsigned int sampleRate,
\r
7063 RtAudioFormat format, unsigned int *bufferSize,
\r
7064 RtAudio::StreamOptions *options )
\r
7066 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
7067 if ( mixerfd == -1 ) {
\r
7068 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
7072 oss_sysinfo sysinfo;
\r
7073 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
7074 if ( result == -1 ) {
\r
7076 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7080 unsigned nDevices = sysinfo.numaudios;
\r
7081 if ( nDevices == 0 ) {
\r
7082 // This should not happen because a check is made before this function is called.
\r
7084 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7088 if ( device >= nDevices ) {
\r
7089 // This should not happen because a check is made before this function is called.
\r
7091 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7095 oss_audioinfo ainfo;
\r
7096 ainfo.dev = device;
\r
7097 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7099 if ( result == -1 ) {
\r
7100 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7101 errorText_ = errorStream_.str();
\r
7105 // Check if device supports input or output
\r
7106 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7107 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7108 if ( mode == OUTPUT )
\r
7109 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7111 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7112 errorText_ = errorStream_.str();
\r
7117 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7118 if ( mode == OUTPUT )
\r
7119 flags |= O_WRONLY;
\r
7120 else { // mode == INPUT
\r
7121 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7122 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7123 close( handle->id[0] );
\r
7124 handle->id[0] = 0;
\r
7125 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7126 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7127 errorText_ = errorStream_.str();
\r
7130 // Check that the number previously set channels is the same.
\r
7131 if ( stream_.nUserChannels[0] != channels ) {
\r
7132 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7133 errorText_ = errorStream_.str();
\r
7139 flags |= O_RDONLY;
\r
7142 // Set exclusive access if specified.
\r
7143 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7145 // Try to open the device.
\r
7147 fd = open( ainfo.devnode, flags, 0 );
\r
7149 if ( errno == EBUSY )
\r
7150 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7152 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7153 errorText_ = errorStream_.str();
\r
7157 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7159 if ( flags | O_RDWR ) {
\r
7160 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7161 if ( result == -1) {
\r
7162 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7163 errorText_ = errorStream_.str();
\r
7169 // Check the device channel support.
\r
7170 stream_.nUserChannels[mode] = channels;
\r
7171 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7173 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7174 errorText_ = errorStream_.str();
\r
7178 // Set the number of channels.
\r
7179 int deviceChannels = channels + firstChannel;
\r
7180 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7181 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7183 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7184 errorText_ = errorStream_.str();
\r
7187 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7189 // Get the data format mask
\r
7191 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7192 if ( result == -1 ) {
\r
7194 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7195 errorText_ = errorStream_.str();
\r
7199 // Determine how to set the device format.
\r
7200 stream_.userFormat = format;
\r
7201 int deviceFormat = -1;
\r
7202 stream_.doByteSwap[mode] = false;
\r
7203 if ( format == RTAUDIO_SINT8 ) {
\r
7204 if ( mask & AFMT_S8 ) {
\r
7205 deviceFormat = AFMT_S8;
\r
7206 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7209 else if ( format == RTAUDIO_SINT16 ) {
\r
7210 if ( mask & AFMT_S16_NE ) {
\r
7211 deviceFormat = AFMT_S16_NE;
\r
7212 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7214 else if ( mask & AFMT_S16_OE ) {
\r
7215 deviceFormat = AFMT_S16_OE;
\r
7216 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7217 stream_.doByteSwap[mode] = true;
\r
7220 else if ( format == RTAUDIO_SINT24 ) {
\r
7221 if ( mask & AFMT_S24_NE ) {
\r
7222 deviceFormat = AFMT_S24_NE;
\r
7223 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7225 else if ( mask & AFMT_S24_OE ) {
\r
7226 deviceFormat = AFMT_S24_OE;
\r
7227 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7228 stream_.doByteSwap[mode] = true;
\r
7231 else if ( format == RTAUDIO_SINT32 ) {
\r
7232 if ( mask & AFMT_S32_NE ) {
\r
7233 deviceFormat = AFMT_S32_NE;
\r
7234 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7236 else if ( mask & AFMT_S32_OE ) {
\r
7237 deviceFormat = AFMT_S32_OE;
\r
7238 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7239 stream_.doByteSwap[mode] = true;
\r
7243 if ( deviceFormat == -1 ) {
\r
7244 // The user requested format is not natively supported by the device.
\r
7245 if ( mask & AFMT_S16_NE ) {
\r
7246 deviceFormat = AFMT_S16_NE;
\r
7247 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7249 else if ( mask & AFMT_S32_NE ) {
\r
7250 deviceFormat = AFMT_S32_NE;
\r
7251 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7253 else if ( mask & AFMT_S24_NE ) {
\r
7254 deviceFormat = AFMT_S24_NE;
\r
7255 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7257 else if ( mask & AFMT_S16_OE ) {
\r
7258 deviceFormat = AFMT_S16_OE;
\r
7259 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7260 stream_.doByteSwap[mode] = true;
\r
7262 else if ( mask & AFMT_S32_OE ) {
\r
7263 deviceFormat = AFMT_S32_OE;
\r
7264 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7265 stream_.doByteSwap[mode] = true;
\r
7267 else if ( mask & AFMT_S24_OE ) {
\r
7268 deviceFormat = AFMT_S24_OE;
\r
7269 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7270 stream_.doByteSwap[mode] = true;
\r
7272 else if ( mask & AFMT_S8) {
\r
7273 deviceFormat = AFMT_S8;
\r
7274 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7278 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7279 // This really shouldn't happen ...
\r
7281 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7282 errorText_ = errorStream_.str();
\r
7286 // Set the data format.
\r
7287 int temp = deviceFormat;
\r
7288 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7289 if ( result == -1 || deviceFormat != temp ) {
\r
7291 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7292 errorText_ = errorStream_.str();
\r
7296 // Attempt to set the buffer size. According to OSS, the minimum
\r
7297 // number of buffers is two. The supposed minimum buffer size is 16
\r
7298 // bytes, so that will be our lower bound. The argument to this
\r
7299 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7300 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7301 // We'll check the actual value used near the end of the setup
\r
7303 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7304 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7306 if ( options ) buffers = options->numberOfBuffers;
\r
7307 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7308 if ( buffers < 2 ) buffers = 3;
\r
7309 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7310 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7311 if ( result == -1 ) {
\r
7313 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7314 errorText_ = errorStream_.str();
\r
7317 stream_.nBuffers = buffers;
\r
7319 // Save buffer size (in sample frames).
\r
7320 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7321 stream_.bufferSize = *bufferSize;
\r
7323 // Set the sample rate.
\r
7324 int srate = sampleRate;
\r
7325 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7326 if ( result == -1 ) {
\r
7328 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7329 errorText_ = errorStream_.str();
\r
7333 // Verify the sample rate setup worked.
\r
7334 if ( abs( srate - sampleRate ) > 100 ) {
\r
7336 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7337 errorText_ = errorStream_.str();
\r
7340 stream_.sampleRate = sampleRate;
\r
7342 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7343 // We're doing duplex setup here.
\r
7344 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7345 stream_.nDeviceChannels[0] = deviceChannels;
\r
7348 // Set interleaving parameters.
\r
7349 stream_.userInterleaved = true;
\r
7350 stream_.deviceInterleaved[mode] = true;
\r
7351 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7352 stream_.userInterleaved = false;
\r
7354 // Set flags for buffer conversion
\r
7355 stream_.doConvertBuffer[mode] = false;
\r
7356 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7357 stream_.doConvertBuffer[mode] = true;
\r
7358 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7359 stream_.doConvertBuffer[mode] = true;
\r
7360 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7361 stream_.nUserChannels[mode] > 1 )
\r
7362 stream_.doConvertBuffer[mode] = true;
\r
7364 // Allocate the stream handles if necessary and then save.
\r
7365 if ( stream_.apiHandle == 0 ) {
\r
7367 handle = new OssHandle;
\r
7369 catch ( std::bad_alloc& ) {
\r
7370 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7374 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7375 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7379 stream_.apiHandle = (void *) handle;
\r
7382 handle = (OssHandle *) stream_.apiHandle;
\r
7384 handle->id[mode] = fd;
\r
7386 // Allocate necessary internal buffers.
\r
7387 unsigned long bufferBytes;
\r
7388 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7389 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7390 if ( stream_.userBuffer[mode] == NULL ) {
\r
7391 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7395 if ( stream_.doConvertBuffer[mode] ) {
\r
7397 bool makeBuffer = true;
\r
7398 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7399 if ( mode == INPUT ) {
\r
7400 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7401 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7402 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7406 if ( makeBuffer ) {
\r
7407 bufferBytes *= *bufferSize;
\r
7408 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7409 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7410 if ( stream_.deviceBuffer == NULL ) {
\r
7411 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7417 stream_.device[mode] = device;
\r
7418 stream_.state = STREAM_STOPPED;
\r
7420 // Setup the buffer conversion information structure.
\r
7421 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7423 // Setup thread if necessary.
\r
7424 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7425 // We had already set up an output stream.
\r
7426 stream_.mode = DUPLEX;
\r
7427 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7430 stream_.mode = mode;
\r
7432 // Setup callback thread.
\r
7433 stream_.callbackInfo.object = (void *) this;
\r
7435 // Set the thread attributes for joinable and realtime scheduling
\r
7436 // priority. The higher priority will only take affect if the
\r
7437 // program is run as root or suid.
\r
7438 pthread_attr_t attr;
\r
7439 pthread_attr_init( &attr );
\r
7440 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7441 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7442 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7443 struct sched_param param;
\r
7444 int priority = options->priority;
\r
7445 int min = sched_get_priority_min( SCHED_RR );
\r
7446 int max = sched_get_priority_max( SCHED_RR );
\r
7447 if ( priority < min ) priority = min;
\r
7448 else if ( priority > max ) priority = max;
\r
7449 param.sched_priority = priority;
\r
7450 pthread_attr_setschedparam( &attr, ¶m );
\r
7451 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7454 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7456 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7459 stream_.callbackInfo.isRunning = true;
\r
7460 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7461 pthread_attr_destroy( &attr );
\r
7463 stream_.callbackInfo.isRunning = false;
\r
7464 errorText_ = "RtApiOss::error creating callback thread!";
\r
7473 pthread_cond_destroy( &handle->runnable );
\r
7474 if ( handle->id[0] ) close( handle->id[0] );
\r
7475 if ( handle->id[1] ) close( handle->id[1] );
\r
7477 stream_.apiHandle = 0;
\r
7480 for ( int i=0; i<2; i++ ) {
\r
7481 if ( stream_.userBuffer[i] ) {
\r
7482 free( stream_.userBuffer[i] );
\r
7483 stream_.userBuffer[i] = 0;
\r
7487 if ( stream_.deviceBuffer ) {
\r
7488 free( stream_.deviceBuffer );
\r
7489 stream_.deviceBuffer = 0;
\r
7495 void RtApiOss :: closeStream()
\r
7497 if ( stream_.state == STREAM_CLOSED ) {
\r
7498 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7499 error( RtAudioError::WARNING );
\r
7503 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7504 stream_.callbackInfo.isRunning = false;
\r
7505 MUTEX_LOCK( &stream_.mutex );
\r
7506 if ( stream_.state == STREAM_STOPPED )
\r
7507 pthread_cond_signal( &handle->runnable );
\r
7508 MUTEX_UNLOCK( &stream_.mutex );
\r
7509 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7511 if ( stream_.state == STREAM_RUNNING ) {
\r
7512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7513 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7515 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7516 stream_.state = STREAM_STOPPED;
\r
7520 pthread_cond_destroy( &handle->runnable );
\r
7521 if ( handle->id[0] ) close( handle->id[0] );
\r
7522 if ( handle->id[1] ) close( handle->id[1] );
\r
7524 stream_.apiHandle = 0;
\r
7527 for ( int i=0; i<2; i++ ) {
\r
7528 if ( stream_.userBuffer[i] ) {
\r
7529 free( stream_.userBuffer[i] );
\r
7530 stream_.userBuffer[i] = 0;
\r
7534 if ( stream_.deviceBuffer ) {
\r
7535 free( stream_.deviceBuffer );
\r
7536 stream_.deviceBuffer = 0;
\r
7539 stream_.mode = UNINITIALIZED;
\r
7540 stream_.state = STREAM_CLOSED;
\r
7543 void RtApiOss :: startStream()
\r
7546 if ( stream_.state == STREAM_RUNNING ) {
\r
7547 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7548 error( RtAudioError::WARNING );
\r
7552 MUTEX_LOCK( &stream_.mutex );
\r
7554 stream_.state = STREAM_RUNNING;
\r
7556 // No need to do anything else here ... OSS automatically starts
\r
7557 // when fed samples.
\r
7559 MUTEX_UNLOCK( &stream_.mutex );
\r
7561 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7562 pthread_cond_signal( &handle->runnable );
\r
7565 void RtApiOss :: stopStream()
\r
7568 if ( stream_.state == STREAM_STOPPED ) {
\r
7569 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7570 error( RtAudioError::WARNING );
\r
7574 MUTEX_LOCK( &stream_.mutex );
\r
7576 // The state might change while waiting on a mutex.
\r
7577 if ( stream_.state == STREAM_STOPPED ) {
\r
7578 MUTEX_UNLOCK( &stream_.mutex );
\r
7583 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7584 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7586 // Flush the output with zeros a few times.
\r
7589 RtAudioFormat format;
\r
7591 if ( stream_.doConvertBuffer[0] ) {
\r
7592 buffer = stream_.deviceBuffer;
\r
7593 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7594 format = stream_.deviceFormat[0];
\r
7597 buffer = stream_.userBuffer[0];
\r
7598 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7599 format = stream_.userFormat;
\r
7602 memset( buffer, 0, samples * formatBytes(format) );
\r
7603 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7604 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7605 if ( result == -1 ) {
\r
7606 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7607 error( RtAudioError::WARNING );
\r
7611 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7612 if ( result == -1 ) {
\r
7613 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7614 errorText_ = errorStream_.str();
\r
7617 handle->triggered = false;
\r
7620 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7621 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7622 if ( result == -1 ) {
\r
7623 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7624 errorText_ = errorStream_.str();
\r
7630 stream_.state = STREAM_STOPPED;
\r
7631 MUTEX_UNLOCK( &stream_.mutex );
\r
7633 if ( result != -1 ) return;
\r
7634 error( RtAudioError::SYSTEM_ERROR );
\r
7637 void RtApiOss :: abortStream()
\r
7640 if ( stream_.state == STREAM_STOPPED ) {
\r
7641 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7642 error( RtAudioError::WARNING );
\r
7646 MUTEX_LOCK( &stream_.mutex );
\r
7648 // The state might change while waiting on a mutex.
\r
7649 if ( stream_.state == STREAM_STOPPED ) {
\r
7650 MUTEX_UNLOCK( &stream_.mutex );
\r
7655 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7656 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7657 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7658 if ( result == -1 ) {
\r
7659 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7660 errorText_ = errorStream_.str();
\r
7663 handle->triggered = false;
\r
7666 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7667 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7668 if ( result == -1 ) {
\r
7669 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7670 errorText_ = errorStream_.str();
\r
7676 stream_.state = STREAM_STOPPED;
\r
7677 MUTEX_UNLOCK( &stream_.mutex );
\r
7679 if ( result != -1 ) return;
\r
7680 error( RtAudioError::SYSTEM_ERROR );
\r
7683 void RtApiOss :: callbackEvent()
\r
7685 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7686 if ( stream_.state == STREAM_STOPPED ) {
\r
7687 MUTEX_LOCK( &stream_.mutex );
\r
7688 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7689 if ( stream_.state != STREAM_RUNNING ) {
\r
7690 MUTEX_UNLOCK( &stream_.mutex );
\r
7693 MUTEX_UNLOCK( &stream_.mutex );
\r
7696 if ( stream_.state == STREAM_CLOSED ) {
\r
7697 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7698 error( RtAudioError::WARNING );
\r
7702 // Invoke user callback to get fresh output data.
\r
7703 int doStopStream = 0;
\r
7704 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7705 double streamTime = getStreamTime();
\r
7706 RtAudioStreamStatus status = 0;
\r
7707 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7708 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7709 handle->xrun[0] = false;
\r
7711 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7712 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7713 handle->xrun[1] = false;
\r
7715 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7716 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7717 if ( doStopStream == 2 ) {
\r
7718 this->abortStream();
\r
7722 MUTEX_LOCK( &stream_.mutex );
\r
7724 // The state might change while waiting on a mutex.
\r
7725 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7730 RtAudioFormat format;
\r
7732 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7734 // Setup parameters and do buffer conversion if necessary.
\r
7735 if ( stream_.doConvertBuffer[0] ) {
\r
7736 buffer = stream_.deviceBuffer;
\r
7737 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7738 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7739 format = stream_.deviceFormat[0];
\r
7742 buffer = stream_.userBuffer[0];
\r
7743 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7744 format = stream_.userFormat;
\r
7747 // Do byte swapping if necessary.
\r
7748 if ( stream_.doByteSwap[0] )
\r
7749 byteSwapBuffer( buffer, samples, format );
\r
7751 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7753 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7754 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7755 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7756 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7757 handle->triggered = true;
\r
7760 // Write samples to device.
\r
7761 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7763 if ( result == -1 ) {
\r
7764 // We'll assume this is an underrun, though there isn't a
\r
7765 // specific means for determining that.
\r
7766 handle->xrun[0] = true;
\r
7767 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7768 error( RtAudioError::WARNING );
\r
7769 // Continue on to input section.
\r
7773 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7775 // Setup parameters.
\r
7776 if ( stream_.doConvertBuffer[1] ) {
\r
7777 buffer = stream_.deviceBuffer;
\r
7778 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7779 format = stream_.deviceFormat[1];
\r
7782 buffer = stream_.userBuffer[1];
\r
7783 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7784 format = stream_.userFormat;
\r
7787 // Read samples from device.
\r
7788 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7790 if ( result == -1 ) {
\r
7791 // We'll assume this is an overrun, though there isn't a
\r
7792 // specific means for determining that.
\r
7793 handle->xrun[1] = true;
\r
7794 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7795 error( RtAudioError::WARNING );
\r
7799 // Do byte swapping if necessary.
\r
7800 if ( stream_.doByteSwap[1] )
\r
7801 byteSwapBuffer( buffer, samples, format );
\r
7803 // Do buffer conversion if necessary.
\r
7804 if ( stream_.doConvertBuffer[1] )
\r
7805 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7809 MUTEX_UNLOCK( &stream_.mutex );
\r
7811 RtApi::tickStreamTime();
\r
7812 if ( doStopStream == 1 ) this->stopStream();
\r
7815 static void *ossCallbackHandler( void *ptr )
\r
7817 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7818 RtApiOss *object = (RtApiOss *) info->object;
\r
7819 bool *isRunning = &info->isRunning;
\r
7821 while ( *isRunning == true ) {
\r
7822 pthread_testcancel();
\r
7823 object->callbackEvent();
\r
7826 pthread_exit( NULL );
\r
7829 //******************** End of __LINUX_OSS__ *********************//
\r
7833 // *************************************************** //
\r
7835 // Protected common (OS-independent) RtAudio methods.
\r
7837 // *************************************************** //
\r
7839 // This method can be modified to control the behavior of error
\r
7840 // message printing.
\r
7841 void RtApi :: error( RtAudioError::Type type )
\r
7843 errorStream_.str(""); // clear the ostringstream
\r
7845 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
7846 if ( errorCallback ) {
\r
7847 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
7849 if ( firstErrorOccurred )
\r
7852 firstErrorOccurred = true;
\r
7853 const std::string errorMessage = errorText_;
\r
7855 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
7856 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
7860 errorCallback( type, errorMessage );
\r
7861 firstErrorOccurred = false;
\r
7865 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
7866 std::cerr << '\n' << errorText_ << "\n\n";
\r
7867 else if ( type != RtAudioError::WARNING )
\r
7868 throw( RtAudioError( errorText_, type ) );
\r
7871 void RtApi :: verifyStream()
\r
7873 if ( stream_.state == STREAM_CLOSED ) {
\r
7874 errorText_ = "RtApi:: a stream is not open!";
\r
7875 error( RtAudioError::INVALID_USE );
\r
7879 void RtApi :: clearStreamInfo()
\r
7881 stream_.mode = UNINITIALIZED;
\r
7882 stream_.state = STREAM_CLOSED;
\r
7883 stream_.sampleRate = 0;
\r
7884 stream_.bufferSize = 0;
\r
7885 stream_.nBuffers = 0;
\r
7886 stream_.userFormat = 0;
\r
7887 stream_.userInterleaved = true;
\r
7888 stream_.streamTime = 0.0;
\r
7889 stream_.apiHandle = 0;
\r
7890 stream_.deviceBuffer = 0;
\r
7891 stream_.callbackInfo.callback = 0;
\r
7892 stream_.callbackInfo.userData = 0;
\r
7893 stream_.callbackInfo.isRunning = false;
\r
7894 stream_.callbackInfo.errorCallback = 0;
\r
7895 for ( int i=0; i<2; i++ ) {
\r
7896 stream_.device[i] = 11111;
\r
7897 stream_.doConvertBuffer[i] = false;
\r
7898 stream_.deviceInterleaved[i] = true;
\r
7899 stream_.doByteSwap[i] = false;
\r
7900 stream_.nUserChannels[i] = 0;
\r
7901 stream_.nDeviceChannels[i] = 0;
\r
7902 stream_.channelOffset[i] = 0;
\r
7903 stream_.deviceFormat[i] = 0;
\r
7904 stream_.latency[i] = 0;
\r
7905 stream_.userBuffer[i] = 0;
\r
7906 stream_.convertInfo[i].channels = 0;
\r
7907 stream_.convertInfo[i].inJump = 0;
\r
7908 stream_.convertInfo[i].outJump = 0;
\r
7909 stream_.convertInfo[i].inFormat = 0;
\r
7910 stream_.convertInfo[i].outFormat = 0;
\r
7911 stream_.convertInfo[i].inOffset.clear();
\r
7912 stream_.convertInfo[i].outOffset.clear();
\r
7916 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7918 if ( format == RTAUDIO_SINT16 )
\r
7920 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7922 else if ( format == RTAUDIO_FLOAT64 )
\r
7924 else if ( format == RTAUDIO_SINT24 )
\r
7926 else if ( format == RTAUDIO_SINT8 )
\r
7929 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7930 error( RtAudioError::WARNING );
\r
7935 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7937 if ( mode == INPUT ) { // convert device to user buffer
\r
7938 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7939 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7940 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7941 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7943 else { // convert user to device buffer
\r
7944 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7945 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7946 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7947 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7950 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7951 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7953 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7955 // Set up the interleave/deinterleave offsets.
\r
7956 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7957 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7958 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7959 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7960 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7961 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7962 stream_.convertInfo[mode].inJump = 1;
\r
7966 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7967 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7968 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7969 stream_.convertInfo[mode].outJump = 1;
\r
7973 else { // no (de)interleaving
\r
7974 if ( stream_.userInterleaved ) {
\r
7975 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7976 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7977 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7981 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7982 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7983 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7984 stream_.convertInfo[mode].inJump = 1;
\r
7985 stream_.convertInfo[mode].outJump = 1;
\r
7990 // Add channel offset.
\r
7991 if ( firstChannel > 0 ) {
\r
7992 if ( stream_.deviceInterleaved[mode] ) {
\r
7993 if ( mode == OUTPUT ) {
\r
7994 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7995 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7998 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7999 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
8003 if ( mode == OUTPUT ) {
\r
8004 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8005 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8008 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8009 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8015 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
8017 // This function does format conversion, input/output channel compensation, and
\r
8018 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
8019 // the lower three bytes of a 32-bit integer.
\r
8021 // Clear our device buffer when in/out duplex device channels are different
\r
8022 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
8023 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
8024 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
8027 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
8029 Float64 *out = (Float64 *)outBuffer;
\r
8031 if (info.inFormat == RTAUDIO_SINT8) {
\r
8032 signed char *in = (signed char *)inBuffer;
\r
8033 scale = 1.0 / 127.5;
\r
8034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8035 for (j=0; j<info.channels; j++) {
\r
8036 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8037 out[info.outOffset[j]] += 0.5;
\r
8038 out[info.outOffset[j]] *= scale;
\r
8040 in += info.inJump;
\r
8041 out += info.outJump;
\r
8044 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8045 Int16 *in = (Int16 *)inBuffer;
\r
8046 scale = 1.0 / 32767.5;
\r
8047 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8048 for (j=0; j<info.channels; j++) {
\r
8049 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8050 out[info.outOffset[j]] += 0.5;
\r
8051 out[info.outOffset[j]] *= scale;
\r
8053 in += info.inJump;
\r
8054 out += info.outJump;
\r
8057 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8058 Int24 *in = (Int24 *)inBuffer;
\r
8059 scale = 1.0 / 8388607.5;
\r
8060 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8061 for (j=0; j<info.channels; j++) {
\r
8062 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
8063 out[info.outOffset[j]] += 0.5;
\r
8064 out[info.outOffset[j]] *= scale;
\r
8066 in += info.inJump;
\r
8067 out += info.outJump;
\r
8070 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8071 Int32 *in = (Int32 *)inBuffer;
\r
8072 scale = 1.0 / 2147483647.5;
\r
8073 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8074 for (j=0; j<info.channels; j++) {
\r
8075 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8076 out[info.outOffset[j]] += 0.5;
\r
8077 out[info.outOffset[j]] *= scale;
\r
8079 in += info.inJump;
\r
8080 out += info.outJump;
\r
8083 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8084 Float32 *in = (Float32 *)inBuffer;
\r
8085 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8086 for (j=0; j<info.channels; j++) {
\r
8087 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8089 in += info.inJump;
\r
8090 out += info.outJump;
\r
8093 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8094 // Channel compensation and/or (de)interleaving only.
\r
8095 Float64 *in = (Float64 *)inBuffer;
\r
8096 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8097 for (j=0; j<info.channels; j++) {
\r
8098 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8100 in += info.inJump;
\r
8101 out += info.outJump;
\r
8105 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8107 Float32 *out = (Float32 *)outBuffer;
\r
8109 if (info.inFormat == RTAUDIO_SINT8) {
\r
8110 signed char *in = (signed char *)inBuffer;
\r
8111 scale = (Float32) ( 1.0 / 127.5 );
\r
8112 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8113 for (j=0; j<info.channels; j++) {
\r
8114 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8115 out[info.outOffset[j]] += 0.5;
\r
8116 out[info.outOffset[j]] *= scale;
\r
8118 in += info.inJump;
\r
8119 out += info.outJump;
\r
8122 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8123 Int16 *in = (Int16 *)inBuffer;
\r
8124 scale = (Float32) ( 1.0 / 32767.5 );
\r
8125 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8126 for (j=0; j<info.channels; j++) {
\r
8127 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8128 out[info.outOffset[j]] += 0.5;
\r
8129 out[info.outOffset[j]] *= scale;
\r
8131 in += info.inJump;
\r
8132 out += info.outJump;
\r
8135 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8136 Int24 *in = (Int24 *)inBuffer;
\r
8137 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8138 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8139 for (j=0; j<info.channels; j++) {
\r
8140 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8141 out[info.outOffset[j]] += 0.5;
\r
8142 out[info.outOffset[j]] *= scale;
\r
8144 in += info.inJump;
\r
8145 out += info.outJump;
\r
8148 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8149 Int32 *in = (Int32 *)inBuffer;
\r
8150 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8151 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8152 for (j=0; j<info.channels; j++) {
\r
8153 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8154 out[info.outOffset[j]] += 0.5;
\r
8155 out[info.outOffset[j]] *= scale;
\r
8157 in += info.inJump;
\r
8158 out += info.outJump;
\r
8161 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8162 // Channel compensation and/or (de)interleaving only.
\r
8163 Float32 *in = (Float32 *)inBuffer;
\r
8164 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8165 for (j=0; j<info.channels; j++) {
\r
8166 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8168 in += info.inJump;
\r
8169 out += info.outJump;
\r
8172 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8173 Float64 *in = (Float64 *)inBuffer;
\r
8174 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8175 for (j=0; j<info.channels; j++) {
\r
8176 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8178 in += info.inJump;
\r
8179 out += info.outJump;
\r
8183 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8184 Int32 *out = (Int32 *)outBuffer;
\r
8185 if (info.inFormat == RTAUDIO_SINT8) {
\r
8186 signed char *in = (signed char *)inBuffer;
\r
8187 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8188 for (j=0; j<info.channels; j++) {
\r
8189 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8190 out[info.outOffset[j]] <<= 24;
\r
8192 in += info.inJump;
\r
8193 out += info.outJump;
\r
8196 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8197 Int16 *in = (Int16 *)inBuffer;
\r
8198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8199 for (j=0; j<info.channels; j++) {
\r
8200 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8201 out[info.outOffset[j]] <<= 16;
\r
8203 in += info.inJump;
\r
8204 out += info.outJump;
\r
8207 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8208 Int24 *in = (Int24 *)inBuffer;
\r
8209 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8210 for (j=0; j<info.channels; j++) {
\r
8211 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8212 out[info.outOffset[j]] <<= 8;
\r
8214 in += info.inJump;
\r
8215 out += info.outJump;
\r
8218 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8219 // Channel compensation and/or (de)interleaving only.
\r
8220 Int32 *in = (Int32 *)inBuffer;
\r
8221 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8222 for (j=0; j<info.channels; j++) {
\r
8223 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8225 in += info.inJump;
\r
8226 out += info.outJump;
\r
8229 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8230 Float32 *in = (Float32 *)inBuffer;
\r
8231 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8232 for (j=0; j<info.channels; j++) {
\r
8233 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8235 in += info.inJump;
\r
8236 out += info.outJump;
\r
8239 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8240 Float64 *in = (Float64 *)inBuffer;
\r
8241 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8242 for (j=0; j<info.channels; j++) {
\r
8243 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8245 in += info.inJump;
\r
8246 out += info.outJump;
\r
8250 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8251 Int24 *out = (Int24 *)outBuffer;
\r
8252 if (info.inFormat == RTAUDIO_SINT8) {
\r
8253 signed char *in = (signed char *)inBuffer;
\r
8254 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8255 for (j=0; j<info.channels; j++) {
\r
8256 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8257 //out[info.outOffset[j]] <<= 16;
\r
8259 in += info.inJump;
\r
8260 out += info.outJump;
\r
8263 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8264 Int16 *in = (Int16 *)inBuffer;
\r
8265 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8266 for (j=0; j<info.channels; j++) {
\r
8267 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8268 //out[info.outOffset[j]] <<= 8;
\r
8270 in += info.inJump;
\r
8271 out += info.outJump;
\r
8274 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8275 // Channel compensation and/or (de)interleaving only.
\r
8276 Int24 *in = (Int24 *)inBuffer;
\r
8277 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8278 for (j=0; j<info.channels; j++) {
\r
8279 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8281 in += info.inJump;
\r
8282 out += info.outJump;
\r
8285 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8286 Int32 *in = (Int32 *)inBuffer;
\r
8287 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8288 for (j=0; j<info.channels; j++) {
\r
8289 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8290 //out[info.outOffset[j]] >>= 8;
\r
8292 in += info.inJump;
\r
8293 out += info.outJump;
\r
8296 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8297 Float32 *in = (Float32 *)inBuffer;
\r
8298 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8299 for (j=0; j<info.channels; j++) {
\r
8300 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8302 in += info.inJump;
\r
8303 out += info.outJump;
\r
8306 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8307 Float64 *in = (Float64 *)inBuffer;
\r
8308 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8309 for (j=0; j<info.channels; j++) {
\r
8310 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8312 in += info.inJump;
\r
8313 out += info.outJump;
\r
8317 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8318 Int16 *out = (Int16 *)outBuffer;
\r
8319 if (info.inFormat == RTAUDIO_SINT8) {
\r
8320 signed char *in = (signed char *)inBuffer;
\r
8321 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8322 for (j=0; j<info.channels; j++) {
\r
8323 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8324 out[info.outOffset[j]] <<= 8;
\r
8326 in += info.inJump;
\r
8327 out += info.outJump;
\r
8330 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8331 // Channel compensation and/or (de)interleaving only.
\r
8332 Int16 *in = (Int16 *)inBuffer;
\r
8333 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8334 for (j=0; j<info.channels; j++) {
\r
8335 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8337 in += info.inJump;
\r
8338 out += info.outJump;
\r
8341 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8342 Int24 *in = (Int24 *)inBuffer;
\r
8343 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8344 for (j=0; j<info.channels; j++) {
\r
8345 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8347 in += info.inJump;
\r
8348 out += info.outJump;
\r
8351 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8352 Int32 *in = (Int32 *)inBuffer;
\r
8353 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8354 for (j=0; j<info.channels; j++) {
\r
8355 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8357 in += info.inJump;
\r
8358 out += info.outJump;
\r
8361 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8362 Float32 *in = (Float32 *)inBuffer;
\r
8363 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8364 for (j=0; j<info.channels; j++) {
\r
8365 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8367 in += info.inJump;
\r
8368 out += info.outJump;
\r
8371 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8372 Float64 *in = (Float64 *)inBuffer;
\r
8373 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8374 for (j=0; j<info.channels; j++) {
\r
8375 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8377 in += info.inJump;
\r
8378 out += info.outJump;
\r
8382 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8383 signed char *out = (signed char *)outBuffer;
\r
8384 if (info.inFormat == RTAUDIO_SINT8) {
\r
8385 // Channel compensation and/or (de)interleaving only.
\r
8386 signed char *in = (signed char *)inBuffer;
\r
8387 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8388 for (j=0; j<info.channels; j++) {
\r
8389 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8391 in += info.inJump;
\r
8392 out += info.outJump;
\r
8395 if (info.inFormat == RTAUDIO_SINT16) {
\r
8396 Int16 *in = (Int16 *)inBuffer;
\r
8397 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8398 for (j=0; j<info.channels; j++) {
\r
8399 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8401 in += info.inJump;
\r
8402 out += info.outJump;
\r
8405 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8406 Int24 *in = (Int24 *)inBuffer;
\r
8407 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8408 for (j=0; j<info.channels; j++) {
\r
8409 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8411 in += info.inJump;
\r
8412 out += info.outJump;
\r
8415 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8416 Int32 *in = (Int32 *)inBuffer;
\r
8417 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8418 for (j=0; j<info.channels; j++) {
\r
8419 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8421 in += info.inJump;
\r
8422 out += info.outJump;
\r
8425 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8426 Float32 *in = (Float32 *)inBuffer;
\r
8427 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8428 for (j=0; j<info.channels; j++) {
\r
8429 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8431 in += info.inJump;
\r
8432 out += info.outJump;
\r
8435 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8436 Float64 *in = (Float64 *)inBuffer;
\r
8437 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8438 for (j=0; j<info.channels; j++) {
\r
8439 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8441 in += info.inJump;
\r
8442 out += info.outJump;
\r
8448 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8449 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8450 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8452 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8454 register char val;
\r
8455 register char *ptr;
\r
8458 if ( format == RTAUDIO_SINT16 ) {
\r
8459 for ( unsigned int i=0; i<samples; i++ ) {
\r
8460 // Swap 1st and 2nd bytes.
\r
8462 *(ptr) = *(ptr+1);
\r
8465 // Increment 2 bytes.
\r
8469 else if ( format == RTAUDIO_SINT32 ||
\r
8470 format == RTAUDIO_FLOAT32 ) {
\r
8471 for ( unsigned int i=0; i<samples; i++ ) {
\r
8472 // Swap 1st and 4th bytes.
\r
8474 *(ptr) = *(ptr+3);
\r
8477 // Swap 2nd and 3rd bytes.
\r
8480 *(ptr) = *(ptr+1);
\r
8483 // Increment 3 more bytes.
\r
8487 else if ( format == RTAUDIO_SINT24 ) {
\r
8488 for ( unsigned int i=0; i<samples; i++ ) {
\r
8489 // Swap 1st and 3rd bytes.
\r
8491 *(ptr) = *(ptr+2);
\r
8494 // Increment 2 more bytes.
\r
8498 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8499 for ( unsigned int i=0; i<samples; i++ ) {
\r
8500 // Swap 1st and 8th bytes
\r
8502 *(ptr) = *(ptr+7);
\r
8505 // Swap 2nd and 7th bytes
\r
8508 *(ptr) = *(ptr+5);
\r
8511 // Swap 3rd and 6th bytes
\r
8514 *(ptr) = *(ptr+3);
\r
8517 // Swap 4th and 5th bytes
\r
8520 *(ptr) = *(ptr+1);
\r
8523 // Increment 5 more bytes.
\r
8529 // Indentation settings for Vim and Emacs
\r
8531 // Local Variables:
\r
8532 // c-basic-offset: 2
\r
8533 // indent-tabs-mode: nil
\r
8536 // vim: et sts=2 sw=2
\r