1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_DS__)
\r
105 apis.push_back( WINDOWS_DS );
\r
107 #if defined(__MACOSX_CORE__)
\r
108 apis.push_back( MACOSX_CORE );
\r
110 #if defined(__RTAUDIO_DUMMY__)
\r
111 apis.push_back( RTAUDIO_DUMMY );
\r
115 void RtAudio :: openRtApi( RtAudio::Api api )
\r
121 #if defined(__UNIX_JACK__)
\r
122 if ( api == UNIX_JACK )
\r
123 rtapi_ = new RtApiJack();
\r
125 #if defined(__LINUX_ALSA__)
\r
126 if ( api == LINUX_ALSA )
\r
127 rtapi_ = new RtApiAlsa();
\r
129 #if defined(__LINUX_PULSE__)
\r
130 if ( api == LINUX_PULSE )
\r
131 rtapi_ = new RtApiPulse();
\r
133 #if defined(__LINUX_OSS__)
\r
134 if ( api == LINUX_OSS )
\r
135 rtapi_ = new RtApiOss();
\r
137 #if defined(__WINDOWS_ASIO__)
\r
138 if ( api == WINDOWS_ASIO )
\r
139 rtapi_ = new RtApiAsio();
\r
141 #if defined(__WINDOWS_DS__)
\r
142 if ( api == WINDOWS_DS )
\r
143 rtapi_ = new RtApiDs();
\r
145 #if defined(__MACOSX_CORE__)
\r
146 if ( api == MACOSX_CORE )
\r
147 rtapi_ = new RtApiCore();
\r
149 #if defined(__RTAUDIO_DUMMY__)
\r
150 if ( api == RTAUDIO_DUMMY )
\r
151 rtapi_ = new RtApiDummy();
\r
155 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
159 if ( api != UNSPECIFIED ) {
\r
160 // Attempt to open the specified API.
\r
162 if ( rtapi_ ) return;
\r
164 // No compiled support for specified API value. Issue a debug
\r
165 // warning and continue as if no API was specified.
\r
166 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
169 // Iterate through the compiled APIs and return as soon as we find
\r
170 // one with at least one device or we reach the end of the list.
\r
171 std::vector< RtAudio::Api > apis;
\r
172 getCompiledApi( apis );
\r
173 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
174 openRtApi( apis[i] );
\r
175 if ( rtapi_->getDeviceCount() ) break;
\r
178 if ( rtapi_ ) return;
\r
180 // It should not be possible to get here because the preprocessor
\r
181 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
182 // API-specific definitions are passed to the compiler. But just in
\r
183 // case something weird happens, we'll print out an error message.
\r
184 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
185 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
188 RtAudio :: ~RtAudio() throw()
\r
193 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
194 RtAudio::StreamParameters *inputParameters,
\r
195 RtAudioFormat format, unsigned int sampleRate,
\r
196 unsigned int *bufferFrames,
\r
197 RtAudioCallback callback, void *userData,
\r
198 RtAudio::StreamOptions *options,
\r
199 RtAudioErrorCallback errorCallback )
\r
201 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
202 sampleRate, bufferFrames, callback,
\r
203 userData, options, errorCallback );
\r
206 // *************************************************** //
\r
208 // Public RtApi definitions (see end of file for
\r
209 // private or protected utility functions).
\r
211 // *************************************************** //
\r
215 stream_.state = STREAM_CLOSED;
\r
216 stream_.mode = UNINITIALIZED;
\r
217 stream_.apiHandle = 0;
\r
218 stream_.userBuffer[0] = 0;
\r
219 stream_.userBuffer[1] = 0;
\r
220 MUTEX_INITIALIZE( &stream_.mutex );
\r
221 showWarnings_ = true;
\r
222 firstErrorOccurred_ = false;
\r
227 MUTEX_DESTROY( &stream_.mutex );
\r
230 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
231 RtAudio::StreamParameters *iParams,
\r
232 RtAudioFormat format, unsigned int sampleRate,
\r
233 unsigned int *bufferFrames,
\r
234 RtAudioCallback callback, void *userData,
\r
235 RtAudio::StreamOptions *options,
\r
236 RtAudioErrorCallback errorCallback )
\r
238 if ( stream_.state != STREAM_CLOSED ) {
\r
239 errorText_ = "RtApi::openStream: a stream is already open!";
\r
240 error( RtAudioError::INVALID_USE );
\r
244 if ( oParams && oParams->nChannels < 1 ) {
\r
245 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
246 error( RtAudioError::INVALID_USE );
\r
250 if ( iParams && iParams->nChannels < 1 ) {
\r
251 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
252 error( RtAudioError::INVALID_USE );
\r
256 if ( oParams == NULL && iParams == NULL ) {
\r
257 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
258 error( RtAudioError::INVALID_USE );
\r
262 if ( formatBytes(format) == 0 ) {
\r
263 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 unsigned int nDevices = getDeviceCount();
\r
269 unsigned int oChannels = 0;
\r
271 oChannels = oParams->nChannels;
\r
272 if ( oParams->deviceId >= nDevices ) {
\r
273 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
274 error( RtAudioError::INVALID_USE );
\r
279 unsigned int iChannels = 0;
\r
281 iChannels = iParams->nChannels;
\r
282 if ( iParams->deviceId >= nDevices ) {
\r
283 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
284 error( RtAudioError::INVALID_USE );
\r
292 if ( oChannels > 0 ) {
\r
294 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
295 sampleRate, format, bufferFrames, options );
\r
296 if ( result == false ) {
\r
297 error( RtAudioError::SYSTEM_ERROR );
\r
302 if ( iChannels > 0 ) {
\r
304 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 if ( oChannels > 0 ) closeStream();
\r
308 error( RtAudioError::SYSTEM_ERROR );
\r
313 stream_.callbackInfo.callback = (void *) callback;
\r
314 stream_.callbackInfo.userData = userData;
\r
315 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
317 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
318 stream_.state = STREAM_STOPPED;
\r
321 unsigned int RtApi :: getDefaultInputDevice( void )
\r
323 // Should be implemented in subclasses if possible.
\r
327 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
329 // Should be implemented in subclasses if possible.
\r
333 void RtApi :: closeStream( void )
\r
335 // MUST be implemented in subclasses!
\r
339 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
340 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
341 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
342 RtAudio::StreamOptions * /*options*/ )
\r
344 // MUST be implemented in subclasses!
\r
348 void RtApi :: tickStreamTime( void )
\r
350 // Subclasses that do not provide their own implementation of
\r
351 // getStreamTime should call this function once per buffer I/O to
\r
352 // provide basic stream time support.
\r
354 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
356 #if defined( HAVE_GETTIMEOFDAY )
\r
357 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
361 long RtApi :: getStreamLatency( void )
\r
365 long totalLatency = 0;
\r
366 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
367 totalLatency = stream_.latency[0];
\r
368 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
369 totalLatency += stream_.latency[1];
\r
371 return totalLatency;
\r
374 double RtApi :: getStreamTime( void )
\r
378 #if defined( HAVE_GETTIMEOFDAY )
\r
379 // Return a very accurate estimate of the stream time by
\r
380 // adding in the elapsed time since the last tick.
\r
381 struct timeval then;
\r
382 struct timeval now;
\r
384 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
385 return stream_.streamTime;
\r
387 gettimeofday( &now, NULL );
\r
388 then = stream_.lastTickTimestamp;
\r
389 return stream_.streamTime +
\r
390 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
391 (then.tv_sec + 0.000001 * then.tv_usec));
\r
393 return stream_.streamTime;
\r
397 unsigned int RtApi :: getStreamSampleRate( void )
\r
401 return stream_.sampleRate;
\r
405 // *************************************************** //
\r
407 // OS/API-specific methods.
\r
409 // *************************************************** //
\r
411 #if defined(__MACOSX_CORE__)
\r
413 // The OS X CoreAudio API is designed to use a separate callback
\r
414 // procedure for each of its audio devices. A single RtAudio duplex
\r
415 // stream using two different devices is supported here, though it
\r
416 // cannot be guaranteed to always behave correctly because we cannot
\r
417 // synchronize these two callbacks.
\r
419 // A property listener is installed for over/underrun information.
\r
420 // However, no functionality is currently provided to allow property
\r
421 // listeners to trigger user handlers because it is unclear what could
\r
422 // be done if a critical stream parameter (buffer size, sample rate,
\r
423 // device disconnect) notification arrived. The listeners entail
\r
424 // quite a bit of extra code and most likely, a user program wouldn't
\r
425 // be prepared for the result anyway. However, we do provide a flag
\r
426 // to the client callback function to inform of an over/underrun.
\r
428 // A structure to hold various information related to the CoreAudio API
\r
430 struct CoreHandle {
\r
431 AudioDeviceID id[2]; // device ids
\r
432 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
433 AudioDeviceIOProcID procId[2];
\r
435 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
436 UInt32 nStreams[2]; // number of streams to use
\r
438 char *deviceBuffer;
\r
439 pthread_cond_t condition;
\r
440 int drainCounter; // Tracks callback counts when draining
\r
441 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
444 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
447 RtApiCore:: RtApiCore()
\r
449 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
450 // This is a largely undocumented but absolutely necessary
\r
451 // requirement starting with OS-X 10.6. If not called, queries and
\r
452 // updates to various audio device properties are not handled
\r
454 CFRunLoopRef theRunLoop = NULL;
\r
455 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
456 kAudioObjectPropertyScopeGlobal,
\r
457 kAudioObjectPropertyElementMaster };
\r
458 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
459 if ( result != noErr ) {
\r
460 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
461 error( RtAudioError::WARNING );
\r
466 RtApiCore :: ~RtApiCore()
\r
468 // The subclass destructor gets called before the base class
\r
469 // destructor, so close an existing stream before deallocating
\r
470 // apiDeviceId memory.
\r
471 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
474 unsigned int RtApiCore :: getDeviceCount( void )
\r
476 // Find out how many audio devices there are, if any.
\r
478 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
479 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
480 if ( result != noErr ) {
\r
481 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
482 error( RtAudioError::WARNING );
\r
486 return dataSize / sizeof( AudioDeviceID );
\r
489 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
491 unsigned int nDevices = getDeviceCount();
\r
492 if ( nDevices <= 1 ) return 0;
\r
495 UInt32 dataSize = sizeof( AudioDeviceID );
\r
496 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
497 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
498 if ( result != noErr ) {
\r
499 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
500 error( RtAudioError::WARNING );
\r
504 dataSize *= nDevices;
\r
505 AudioDeviceID deviceList[ nDevices ];
\r
506 property.mSelector = kAudioHardwarePropertyDevices;
\r
507 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
510 error( RtAudioError::WARNING );
\r
514 for ( unsigned int i=0; i<nDevices; i++ )
\r
515 if ( id == deviceList[i] ) return i;
\r
517 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
518 error( RtAudioError::WARNING );
\r
522 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
524 unsigned int nDevices = getDeviceCount();
\r
525 if ( nDevices <= 1 ) return 0;
\r
528 UInt32 dataSize = sizeof( AudioDeviceID );
\r
529 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
530 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
531 if ( result != noErr ) {
\r
532 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
533 error( RtAudioError::WARNING );
\r
537 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
538 AudioDeviceID deviceList[ nDevices ];
\r
539 property.mSelector = kAudioHardwarePropertyDevices;
\r
540 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
543 error( RtAudioError::WARNING );
\r
547 for ( unsigned int i=0; i<nDevices; i++ )
\r
548 if ( id == deviceList[i] ) return i;
\r
550 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
551 error( RtAudioError::WARNING );
\r
555 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
557 RtAudio::DeviceInfo info;
\r
558 info.probed = false;
\r
561 unsigned int nDevices = getDeviceCount();
\r
562 if ( nDevices == 0 ) {
\r
563 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
564 error( RtAudioError::INVALID_USE );
\r
568 if ( device >= nDevices ) {
\r
569 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
570 error( RtAudioError::INVALID_USE );
\r
574 AudioDeviceID deviceList[ nDevices ];
\r
575 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
576 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
577 kAudioObjectPropertyScopeGlobal,
\r
578 kAudioObjectPropertyElementMaster };
\r
579 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
580 0, NULL, &dataSize, (void *) &deviceList );
\r
581 if ( result != noErr ) {
\r
582 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
583 error( RtAudioError::WARNING );
\r
587 AudioDeviceID id = deviceList[ device ];
\r
589 // Get the device name.
\r
591 CFStringRef cfname;
\r
592 dataSize = sizeof( CFStringRef );
\r
593 property.mSelector = kAudioObjectPropertyManufacturer;
\r
594 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
595 if ( result != noErr ) {
\r
596 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
597 errorText_ = errorStream_.str();
\r
598 error( RtAudioError::WARNING );
\r
602 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
603 int length = CFStringGetLength(cfname);
\r
604 char *mname = (char *)malloc(length * 3 + 1);
\r
605 #if defined( UNICODE ) || defined( _UNICODE )
\r
606 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
608 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
610 info.name.append( (const char *)mname, strlen(mname) );
\r
611 info.name.append( ": " );
\r
612 CFRelease( cfname );
\r
615 property.mSelector = kAudioObjectPropertyName;
\r
616 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
617 if ( result != noErr ) {
\r
618 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
619 errorText_ = errorStream_.str();
\r
620 error( RtAudioError::WARNING );
\r
624 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
625 length = CFStringGetLength(cfname);
\r
626 char *name = (char *)malloc(length * 3 + 1);
\r
627 #if defined( UNICODE ) || defined( _UNICODE )
\r
628 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
630 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
632 info.name.append( (const char *)name, strlen(name) );
\r
633 CFRelease( cfname );
\r
636 // Get the output stream "configuration".
\r
637 AudioBufferList *bufferList = nil;
\r
638 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
639 property.mScope = kAudioDevicePropertyScopeOutput;
\r
640 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
642 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
643 if ( result != noErr || dataSize == 0 ) {
\r
644 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
645 errorText_ = errorStream_.str();
\r
646 error( RtAudioError::WARNING );
\r
650 // Allocate the AudioBufferList.
\r
651 bufferList = (AudioBufferList *) malloc( dataSize );
\r
652 if ( bufferList == NULL ) {
\r
653 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
654 error( RtAudioError::WARNING );
\r
658 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
659 if ( result != noErr || dataSize == 0 ) {
\r
660 free( bufferList );
\r
661 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
662 errorText_ = errorStream_.str();
\r
663 error( RtAudioError::WARNING );
\r
667 // Get output channel information.
\r
668 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
669 for ( i=0; i<nStreams; i++ )
\r
670 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
671 free( bufferList );
\r
673 // Get the input stream "configuration".
\r
674 property.mScope = kAudioDevicePropertyScopeInput;
\r
675 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
676 if ( result != noErr || dataSize == 0 ) {
\r
677 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
678 errorText_ = errorStream_.str();
\r
679 error( RtAudioError::WARNING );
\r
683 // Allocate the AudioBufferList.
\r
684 bufferList = (AudioBufferList *) malloc( dataSize );
\r
685 if ( bufferList == NULL ) {
\r
686 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
687 error( RtAudioError::WARNING );
\r
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
692 if (result != noErr || dataSize == 0) {
\r
693 free( bufferList );
\r
694 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
695 errorText_ = errorStream_.str();
\r
696 error( RtAudioError::WARNING );
\r
700 // Get input channel information.
\r
701 nStreams = bufferList->mNumberBuffers;
\r
702 for ( i=0; i<nStreams; i++ )
\r
703 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
704 free( bufferList );
\r
706 // If device opens for both playback and capture, we determine the channels.
\r
707 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
708 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
710 // Probe the device sample rates.
\r
711 bool isInput = false;
\r
712 if ( info.outputChannels == 0 ) isInput = true;
\r
714 // Determine the supported sample rates.
\r
715 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
716 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
717 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
718 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
719 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
720 errorText_ = errorStream_.str();
\r
721 error( RtAudioError::WARNING );
\r
725 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
726 AudioValueRange rangeList[ nRanges ];
\r
727 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
728 if ( result != kAudioHardwareNoError ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // The sample rate reporting mechanism is a bit of a mystery. It
\r
736 // seems that it can either return individual rates or a range of
\r
737 // rates. I assume that if the min / max range values are the same,
\r
738 // then that represents a single supported rate and if the min / max
\r
739 // range values are different, the device supports an arbitrary
\r
740 // range of values (though there might be multiple ranges, so we'll
\r
741 // use the most conservative range).
\r
742 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
743 bool haveValueRange = false;
\r
744 info.sampleRates.clear();
\r
745 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
746 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
747 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
749 haveValueRange = true;
\r
750 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
751 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
755 if ( haveValueRange ) {
\r
756 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
757 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
758 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
762 // Sort and remove any redundant values
\r
763 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
764 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
766 if ( info.sampleRates.size() == 0 ) {
\r
767 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
768 errorText_ = errorStream_.str();
\r
769 error( RtAudioError::WARNING );
\r
773 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
774 // Thus, any other "physical" formats supported by the device are of
\r
775 // no interest to the client.
\r
776 info.nativeFormats = RTAUDIO_FLOAT32;
\r
778 if ( info.outputChannels > 0 )
\r
779 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
780 if ( info.inputChannels > 0 )
\r
781 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
783 info.probed = true;
\r
787 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
788 const AudioTimeStamp* /*inNow*/,
\r
789 const AudioBufferList* inInputData,
\r
790 const AudioTimeStamp* /*inInputTime*/,
\r
791 AudioBufferList* outOutputData,
\r
792 const AudioTimeStamp* /*inOutputTime*/,
\r
793 void* infoPointer )
\r
795 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
797 RtApiCore *object = (RtApiCore *) info->object;
\r
798 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
799 return kAudioHardwareUnspecifiedError;
\r
801 return kAudioHardwareNoError;
\r
804 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
806 const AudioObjectPropertyAddress properties[],
\r
807 void* handlePointer )
\r
809 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
810 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
811 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
812 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
813 handle->xrun[1] = true;
\r
815 handle->xrun[0] = true;
\r
819 return kAudioHardwareNoError;
\r
822 static OSStatus rateListener( AudioObjectID inDevice,
\r
823 UInt32 /*nAddresses*/,
\r
824 const AudioObjectPropertyAddress /*properties*/[],
\r
825 void* ratePointer )
\r
827 Float64 *rate = (Float64 *) ratePointer;
\r
828 UInt32 dataSize = sizeof( Float64 );
\r
829 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
830 kAudioObjectPropertyScopeGlobal,
\r
831 kAudioObjectPropertyElementMaster };
\r
832 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
833 return kAudioHardwareNoError;
\r
836 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
837 unsigned int firstChannel, unsigned int sampleRate,
\r
838 RtAudioFormat format, unsigned int *bufferSize,
\r
839 RtAudio::StreamOptions *options )
\r
842 unsigned int nDevices = getDeviceCount();
\r
843 if ( nDevices == 0 ) {
\r
844 // This should not happen because a check is made before this function is called.
\r
845 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
849 if ( device >= nDevices ) {
\r
850 // This should not happen because a check is made before this function is called.
\r
851 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
855 AudioDeviceID deviceList[ nDevices ];
\r
856 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
857 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
858 kAudioObjectPropertyScopeGlobal,
\r
859 kAudioObjectPropertyElementMaster };
\r
860 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
861 0, NULL, &dataSize, (void *) &deviceList );
\r
862 if ( result != noErr ) {
\r
863 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
867 AudioDeviceID id = deviceList[ device ];
\r
869 // Setup for stream mode.
\r
870 bool isInput = false;
\r
871 if ( mode == INPUT ) {
\r
873 property.mScope = kAudioDevicePropertyScopeInput;
\r
876 property.mScope = kAudioDevicePropertyScopeOutput;
\r
878 // Get the stream "configuration".
\r
879 AudioBufferList *bufferList = nil;
\r
881 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
882 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
883 if ( result != noErr || dataSize == 0 ) {
\r
884 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
885 errorText_ = errorStream_.str();
\r
889 // Allocate the AudioBufferList.
\r
890 bufferList = (AudioBufferList *) malloc( dataSize );
\r
891 if ( bufferList == NULL ) {
\r
892 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
896 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
897 if (result != noErr || dataSize == 0) {
\r
898 free( bufferList );
\r
899 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
900 errorText_ = errorStream_.str();
\r
904 // Search for one or more streams that contain the desired number of
\r
905 // channels. CoreAudio devices can have an arbitrary number of
\r
906 // streams and each stream can have an arbitrary number of channels.
\r
907 // For each stream, a single buffer of interleaved samples is
\r
908 // provided. RtAudio prefers the use of one stream of interleaved
\r
909 // data or multiple consecutive single-channel streams. However, we
\r
910 // now support multiple consecutive multi-channel streams of
\r
911 // interleaved data as well.
\r
912 UInt32 iStream, offsetCounter = firstChannel;
\r
913 UInt32 nStreams = bufferList->mNumberBuffers;
\r
914 bool monoMode = false;
\r
915 bool foundStream = false;
\r
917 // First check that the device supports the requested number of
\r
919 UInt32 deviceChannels = 0;
\r
920 for ( iStream=0; iStream<nStreams; iStream++ )
\r
921 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
923 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
924 free( bufferList );
\r
925 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
926 errorText_ = errorStream_.str();
\r
930 // Look for a single stream meeting our needs.
\r
931 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
932 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
933 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
934 if ( streamChannels >= channels + offsetCounter ) {
\r
935 firstStream = iStream;
\r
936 channelOffset = offsetCounter;
\r
937 foundStream = true;
\r
940 if ( streamChannels > offsetCounter ) break;
\r
941 offsetCounter -= streamChannels;
\r
944 // If we didn't find a single stream above, then we should be able
\r
945 // to meet the channel specification with multiple streams.
\r
946 if ( foundStream == false ) {
\r
948 offsetCounter = firstChannel;
\r
949 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
950 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
951 if ( streamChannels > offsetCounter ) break;
\r
952 offsetCounter -= streamChannels;
\r
955 firstStream = iStream;
\r
956 channelOffset = offsetCounter;
\r
957 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
959 if ( streamChannels > 1 ) monoMode = false;
\r
960 while ( channelCounter > 0 ) {
\r
961 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
962 if ( streamChannels > 1 ) monoMode = false;
\r
963 channelCounter -= streamChannels;
\r
968 free( bufferList );
\r
970 // Determine the buffer size.
\r
971 AudioValueRange bufferRange;
\r
972 dataSize = sizeof( AudioValueRange );
\r
973 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
974 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
976 if ( result != noErr ) {
\r
977 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
978 errorText_ = errorStream_.str();
\r
982 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
983 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
984 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
986 // Set the buffer size. For multiple streams, I'm assuming we only
\r
987 // need to make this setting for the master channel.
\r
988 UInt32 theSize = (UInt32) *bufferSize;
\r
989 dataSize = sizeof( UInt32 );
\r
990 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
991 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
993 if ( result != noErr ) {
\r
994 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
995 errorText_ = errorStream_.str();
\r
999 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1000 // MUST be the same in both directions!
\r
1001 *bufferSize = theSize;
\r
1002 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1004 errorText_ = errorStream_.str();
\r
1008 stream_.bufferSize = *bufferSize;
\r
1009 stream_.nBuffers = 1;
\r
1011 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1012 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1014 dataSize = sizeof( hog_pid );
\r
1015 property.mSelector = kAudioDevicePropertyHogMode;
\r
1016 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1017 if ( result != noErr ) {
\r
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1019 errorText_ = errorStream_.str();
\r
1023 if ( hog_pid != getpid() ) {
\r
1024 hog_pid = getpid();
\r
1025 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1026 if ( result != noErr ) {
\r
1027 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1028 errorText_ = errorStream_.str();
\r
1034 // Check and if necessary, change the sample rate for the device.
\r
1035 Float64 nominalRate;
\r
1036 dataSize = sizeof( Float64 );
\r
1037 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1038 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1039 if ( result != noErr ) {
\r
1040 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1041 errorText_ = errorStream_.str();
\r
1045 // Only change the sample rate if off by more than 1 Hz.
\r
1046 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1048 // Set a property listener for the sample rate change
\r
1049 Float64 reportedRate = 0.0;
\r
1050 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1051 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1052 if ( result != noErr ) {
\r
1053 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1054 errorText_ = errorStream_.str();
\r
1058 nominalRate = (Float64) sampleRate;
\r
1059 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1060 if ( result != noErr ) {
\r
1061 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1063 errorText_ = errorStream_.str();
\r
1067 // Now wait until the reported nominal rate is what we just set.
\r
1068 UInt32 microCounter = 0;
\r
1069 while ( reportedRate != nominalRate ) {
\r
1070 microCounter += 5000;
\r
1071 if ( microCounter > 5000000 ) break;
\r
1075 // Remove the property listener.
\r
1076 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1078 if ( microCounter > 5000000 ) {
\r
1079 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1080 errorText_ = errorStream_.str();
\r
1085 // Now set the stream format for all streams. Also, check the
\r
1086 // physical format of the device and change that if necessary.
\r
1087 AudioStreamBasicDescription description;
\r
1088 dataSize = sizeof( AudioStreamBasicDescription );
\r
1089 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1090 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1091 if ( result != noErr ) {
\r
1092 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1093 errorText_ = errorStream_.str();
\r
1097 // Set the sample rate and data format id. However, only make the
\r
1098 // change if the sample rate is not within 1.0 of the desired
\r
1099 // rate and the format is not linear pcm.
\r
1100 bool updateFormat = false;
\r
1101 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1102 description.mSampleRate = (Float64) sampleRate;
\r
1103 updateFormat = true;
\r
1106 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1107 description.mFormatID = kAudioFormatLinearPCM;
\r
1108 updateFormat = true;
\r
1111 if ( updateFormat ) {
\r
1112 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1113 if ( result != noErr ) {
\r
1114 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1115 errorText_ = errorStream_.str();
\r
1120 // Now check the physical format.
\r
1121 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1122 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1129 //std::cout << "Current physical stream format:" << std::endl;
\r
1130 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1131 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1132 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1133 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1135 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1136 description.mFormatID = kAudioFormatLinearPCM;
\r
1137 //description.mSampleRate = (Float64) sampleRate;
\r
1138 AudioStreamBasicDescription testDescription = description;
\r
1139 UInt32 formatFlags;
\r
1141 // We'll try higher bit rates first and then work our way down.
\r
1142 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1143 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1144 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1145 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1146 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1147 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1148 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1149 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1150 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1151 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1152 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1153 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1156 bool setPhysicalFormat = false;
\r
1157 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1158 testDescription = description;
\r
1159 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1160 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1161 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1162 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1164 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1165 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1166 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1167 if ( result == noErr ) {
\r
1168 setPhysicalFormat = true;
\r
1169 //std::cout << "Updated physical stream format:" << std::endl;
\r
1170 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1171 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1172 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1173 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1178 if ( !setPhysicalFormat ) {
\r
1179 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1180 errorText_ = errorStream_.str();
\r
1183 } // done setting virtual/physical formats.
\r
1185 // Get the stream / device latency.
\r
1187 dataSize = sizeof( UInt32 );
\r
1188 property.mSelector = kAudioDevicePropertyLatency;
\r
1189 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1190 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1191 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1193 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1194 errorText_ = errorStream_.str();
\r
1195 error( RtAudioError::WARNING );
\r
1199 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1200 // always be presented in native-endian format, so we should never
\r
1201 // need to byte swap.
\r
1202 stream_.doByteSwap[mode] = false;
\r
1204 // From the CoreAudio documentation, PCM data must be supplied as
\r
1206 stream_.userFormat = format;
\r
1207 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1209 if ( streamCount == 1 )
\r
1210 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1211 else // multiple streams
\r
1212 stream_.nDeviceChannels[mode] = channels;
\r
1213 stream_.nUserChannels[mode] = channels;
\r
1214 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1215 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1216 else stream_.userInterleaved = true;
\r
1217 stream_.deviceInterleaved[mode] = true;
\r
1218 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1220 // Set flags for buffer conversion.
\r
1221 stream_.doConvertBuffer[mode] = false;
\r
1222 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1223 stream_.doConvertBuffer[mode] = true;
\r
1224 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1225 stream_.doConvertBuffer[mode] = true;
\r
1226 if ( streamCount == 1 ) {
\r
1227 if ( stream_.nUserChannels[mode] > 1 &&
\r
1228 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1229 stream_.doConvertBuffer[mode] = true;
\r
1231 else if ( monoMode && stream_.userInterleaved )
\r
1232 stream_.doConvertBuffer[mode] = true;
\r
1234 // Allocate our CoreHandle structure for the stream.
\r
1235 CoreHandle *handle = 0;
\r
1236 if ( stream_.apiHandle == 0 ) {
\r
1238 handle = new CoreHandle;
\r
1240 catch ( std::bad_alloc& ) {
\r
1241 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1245 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1246 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1249 stream_.apiHandle = (void *) handle;
\r
1252 handle = (CoreHandle *) stream_.apiHandle;
\r
1253 handle->iStream[mode] = firstStream;
\r
1254 handle->nStreams[mode] = streamCount;
\r
1255 handle->id[mode] = id;
\r
1257 // Allocate necessary internal buffers.
\r
1258 unsigned long bufferBytes;
\r
1259 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1260 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1261 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1262 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1263 if ( stream_.userBuffer[mode] == NULL ) {
\r
1264 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1268 // If possible, we will make use of the CoreAudio stream buffers as
\r
1269 // "device buffers". However, we can't do this if using multiple
\r
1271 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1273 bool makeBuffer = true;
\r
1274 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1275 if ( mode == INPUT ) {
\r
1276 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1277 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1278 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1282 if ( makeBuffer ) {
\r
1283 bufferBytes *= *bufferSize;
\r
1284 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1285 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1286 if ( stream_.deviceBuffer == NULL ) {
\r
1287 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1293 stream_.sampleRate = sampleRate;
\r
1294 stream_.device[mode] = device;
\r
1295 stream_.state = STREAM_STOPPED;
\r
1296 stream_.callbackInfo.object = (void *) this;
\r
1298 // Setup the buffer conversion information structure.
\r
1299 if ( stream_.doConvertBuffer[mode] ) {
\r
1300 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1301 else setConvertInfo( mode, channelOffset );
\r
1304 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1305 // Only one callback procedure per device.
\r
1306 stream_.mode = DUPLEX;
\r
1308 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1309 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1311 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1312 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1314 if ( result != noErr ) {
\r
1315 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1316 errorText_ = errorStream_.str();
\r
1319 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1320 stream_.mode = DUPLEX;
\r
1322 stream_.mode = mode;
\r
1325 // Setup the device property listener for over/underload.
\r
1326 property.mSelector = kAudioDeviceProcessorOverload;
\r
1327 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1328 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1334 pthread_cond_destroy( &handle->condition );
\r
1336 stream_.apiHandle = 0;
\r
1339 for ( int i=0; i<2; i++ ) {
\r
1340 if ( stream_.userBuffer[i] ) {
\r
1341 free( stream_.userBuffer[i] );
\r
1342 stream_.userBuffer[i] = 0;
\r
1346 if ( stream_.deviceBuffer ) {
\r
1347 free( stream_.deviceBuffer );
\r
1348 stream_.deviceBuffer = 0;
\r
1351 stream_.state = STREAM_CLOSED;
\r
1355 void RtApiCore :: closeStream( void )
\r
1357 if ( stream_.state == STREAM_CLOSED ) {
\r
1358 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1359 error( RtAudioError::WARNING );
\r
1363 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1364 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1365 if ( stream_.state == STREAM_RUNNING )
\r
1366 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1367 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1368 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1370 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1371 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1375 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1376 if ( stream_.state == STREAM_RUNNING )
\r
1377 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1378 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1379 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1381 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1382 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1386 for ( int i=0; i<2; i++ ) {
\r
1387 if ( stream_.userBuffer[i] ) {
\r
1388 free( stream_.userBuffer[i] );
\r
1389 stream_.userBuffer[i] = 0;
\r
1393 if ( stream_.deviceBuffer ) {
\r
1394 free( stream_.deviceBuffer );
\r
1395 stream_.deviceBuffer = 0;
\r
1398 // Destroy pthread condition variable.
\r
1399 pthread_cond_destroy( &handle->condition );
\r
1401 stream_.apiHandle = 0;
\r
1403 stream_.mode = UNINITIALIZED;
\r
1404 stream_.state = STREAM_CLOSED;
\r
1407 void RtApiCore :: startStream( void )
\r
1410 if ( stream_.state == STREAM_RUNNING ) {
\r
1411 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1412 error( RtAudioError::WARNING );
\r
1416 OSStatus result = noErr;
\r
1417 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1418 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1420 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1421 if ( result != noErr ) {
\r
1422 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1423 errorText_ = errorStream_.str();
\r
1428 if ( stream_.mode == INPUT ||
\r
1429 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1431 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1432 if ( result != noErr ) {
\r
1433 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1434 errorText_ = errorStream_.str();
\r
1439 handle->drainCounter = 0;
\r
1440 handle->internalDrain = false;
\r
1441 stream_.state = STREAM_RUNNING;
\r
1444 if ( result == noErr ) return;
\r
1445 error( RtAudioError::SYSTEM_ERROR );
\r
1448 void RtApiCore :: stopStream( void )
\r
1451 if ( stream_.state == STREAM_STOPPED ) {
\r
1452 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1453 error( RtAudioError::WARNING );
\r
1457 OSStatus result = noErr;
\r
1458 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1459 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1461 if ( handle->drainCounter == 0 ) {
\r
1462 handle->drainCounter = 2;
\r
1463 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1466 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1467 if ( result != noErr ) {
\r
1468 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1469 errorText_ = errorStream_.str();
\r
1474 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1476 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 stream_.state = STREAM_STOPPED;
\r
1487 if ( result == noErr ) return;
\r
1488 error( RtAudioError::SYSTEM_ERROR );
\r
1491 void RtApiCore :: abortStream( void )
\r
1494 if ( stream_.state == STREAM_STOPPED ) {
\r
1495 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1496 error( RtAudioError::WARNING );
\r
1500 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1501 handle->drainCounter = 2;
\r
1506 // This function will be called by a spawned thread when the user
\r
1507 // callback function signals that the stream should be stopped or
\r
1508 // aborted. It is better to handle it this way because the
\r
1509 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1510 // function is called.
\r
1511 static void *coreStopStream( void *ptr )
\r
1513 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1514 RtApiCore *object = (RtApiCore *) info->object;
\r
1516 object->stopStream();
\r
1517 pthread_exit( NULL );
\r
1520 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1521 const AudioBufferList *inBufferList,
\r
1522 const AudioBufferList *outBufferList )
\r
1524 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1525 if ( stream_.state == STREAM_CLOSED ) {
\r
1526 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1527 error( RtAudioError::WARNING );
\r
1531 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1532 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1534 // Check if we were draining the stream and signal is finished.
\r
1535 if ( handle->drainCounter > 3 ) {
\r
1536 ThreadHandle threadId;
\r
1538 stream_.state = STREAM_STOPPING;
\r
1539 if ( handle->internalDrain == true )
\r
1540 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1541 else // external call to stopStream()
\r
1542 pthread_cond_signal( &handle->condition );
\r
1546 AudioDeviceID outputDevice = handle->id[0];
\r
1548 // Invoke user callback to get fresh output data UNLESS we are
\r
1549 // draining stream or duplex mode AND the input/output devices are
\r
1550 // different AND this function is called for the input device.
\r
1551 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1552 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1553 double streamTime = getStreamTime();
\r
1554 RtAudioStreamStatus status = 0;
\r
1555 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1556 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1557 handle->xrun[0] = false;
\r
1559 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1560 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1561 handle->xrun[1] = false;
\r
1564 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1565 stream_.bufferSize, streamTime, status, info->userData );
\r
1566 if ( cbReturnValue == 2 ) {
\r
1567 stream_.state = STREAM_STOPPING;
\r
1568 handle->drainCounter = 2;
\r
1572 else if ( cbReturnValue == 1 ) {
\r
1573 handle->drainCounter = 1;
\r
1574 handle->internalDrain = true;
\r
1578 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1580 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1582 if ( handle->nStreams[0] == 1 ) {
\r
1583 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1585 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1587 else { // fill multiple streams with zeros
\r
1588 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1589 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1591 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1595 else if ( handle->nStreams[0] == 1 ) {
\r
1596 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1597 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1598 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1600 else { // copy from user buffer
\r
1601 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1602 stream_.userBuffer[0],
\r
1603 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1606 else { // fill multiple streams
\r
1607 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1608 if ( stream_.doConvertBuffer[0] ) {
\r
1609 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1613 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1614 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1615 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1616 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1617 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1620 else { // fill multiple multi-channel streams with interleaved data
\r
1621 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1622 Float32 *out, *in;
\r
1624 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1625 UInt32 inChannels = stream_.nUserChannels[0];
\r
1626 if ( stream_.doConvertBuffer[0] ) {
\r
1627 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1628 inChannels = stream_.nDeviceChannels[0];
\r
1631 if ( inInterleaved ) inOffset = 1;
\r
1632 else inOffset = stream_.bufferSize;
\r
1634 channelsLeft = inChannels;
\r
1635 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1637 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1638 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1641 // Account for possible channel offset in first stream
\r
1642 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1643 streamChannels -= stream_.channelOffset[0];
\r
1644 outJump = stream_.channelOffset[0];
\r
1648 // Account for possible unfilled channels at end of the last stream
\r
1649 if ( streamChannels > channelsLeft ) {
\r
1650 outJump = streamChannels - channelsLeft;
\r
1651 streamChannels = channelsLeft;
\r
1654 // Determine input buffer offsets and skips
\r
1655 if ( inInterleaved ) {
\r
1656 inJump = inChannels;
\r
1657 in += inChannels - channelsLeft;
\r
1661 in += (inChannels - channelsLeft) * inOffset;
\r
1664 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1665 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1666 *out++ = in[j*inOffset];
\r
1671 channelsLeft -= streamChannels;
\r
1676 if ( handle->drainCounter ) {
\r
1677 handle->drainCounter++;
\r
1682 AudioDeviceID inputDevice;
\r
1683 inputDevice = handle->id[1];
\r
1684 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1686 if ( handle->nStreams[1] == 1 ) {
\r
1687 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1688 convertBuffer( stream_.userBuffer[1],
\r
1689 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1690 stream_.convertInfo[1] );
\r
1692 else { // copy to user buffer
\r
1693 memcpy( stream_.userBuffer[1],
\r
1694 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1695 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1698 else { // read from multiple streams
\r
1699 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1700 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1702 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1703 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1704 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1705 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1706 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1709 else { // read from multiple multi-channel streams
\r
1710 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1711 Float32 *out, *in;
\r
1713 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1714 UInt32 outChannels = stream_.nUserChannels[1];
\r
1715 if ( stream_.doConvertBuffer[1] ) {
\r
1716 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1717 outChannels = stream_.nDeviceChannels[1];
\r
1720 if ( outInterleaved ) outOffset = 1;
\r
1721 else outOffset = stream_.bufferSize;
\r
1723 channelsLeft = outChannels;
\r
1724 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1726 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1727 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1730 // Account for possible channel offset in first stream
\r
1731 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1732 streamChannels -= stream_.channelOffset[1];
\r
1733 inJump = stream_.channelOffset[1];
\r
1737 // Account for possible unread channels at end of the last stream
\r
1738 if ( streamChannels > channelsLeft ) {
\r
1739 inJump = streamChannels - channelsLeft;
\r
1740 streamChannels = channelsLeft;
\r
1743 // Determine output buffer offsets and skips
\r
1744 if ( outInterleaved ) {
\r
1745 outJump = outChannels;
\r
1746 out += outChannels - channelsLeft;
\r
1750 out += (outChannels - channelsLeft) * outOffset;
\r
1753 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1754 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1755 out[j*outOffset] = *in++;
\r
1760 channelsLeft -= streamChannels;
\r
1764 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1765 convertBuffer( stream_.userBuffer[1],
\r
1766 stream_.deviceBuffer,
\r
1767 stream_.convertInfo[1] );
\r
1773 //MUTEX_UNLOCK( &stream_.mutex );
\r
1775 RtApi::tickStreamTime();
\r
1779 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1783 case kAudioHardwareNotRunningError:
\r
1784 return "kAudioHardwareNotRunningError";
\r
1786 case kAudioHardwareUnspecifiedError:
\r
1787 return "kAudioHardwareUnspecifiedError";
\r
1789 case kAudioHardwareUnknownPropertyError:
\r
1790 return "kAudioHardwareUnknownPropertyError";
\r
1792 case kAudioHardwareBadPropertySizeError:
\r
1793 return "kAudioHardwareBadPropertySizeError";
\r
1795 case kAudioHardwareIllegalOperationError:
\r
1796 return "kAudioHardwareIllegalOperationError";
\r
1798 case kAudioHardwareBadObjectError:
\r
1799 return "kAudioHardwareBadObjectError";
\r
1801 case kAudioHardwareBadDeviceError:
\r
1802 return "kAudioHardwareBadDeviceError";
\r
1804 case kAudioHardwareBadStreamError:
\r
1805 return "kAudioHardwareBadStreamError";
\r
1807 case kAudioHardwareUnsupportedOperationError:
\r
1808 return "kAudioHardwareUnsupportedOperationError";
\r
1810 case kAudioDeviceUnsupportedFormatError:
\r
1811 return "kAudioDeviceUnsupportedFormatError";
\r
1813 case kAudioDevicePermissionsError:
\r
1814 return "kAudioDevicePermissionsError";
\r
1817 return "CoreAudio unknown error";
\r
1821 //******************** End of __MACOSX_CORE__ *********************//
\r
1824 #if defined(__UNIX_JACK__)
\r
1826 // JACK is a low-latency audio server, originally written for the
\r
1827 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1828 // connect a number of different applications to an audio device, as
\r
1829 // well as allowing them to share audio between themselves.
\r
1831 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1832 // have ports connected to the server. The JACK server is typically
\r
1833 // started in a terminal as follows:
\r
1835 // .jackd -d alsa -d hw:0
\r
1837 // or through an interface program such as qjackctl. Many of the
\r
1838 // parameters normally set for a stream are fixed by the JACK server
\r
1839 // and can be specified when the JACK server is started. In
\r
1842 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1844 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1845 // frames, and number of buffers = 4. Once the server is running, it
\r
1846 // is not possible to override these values. If the values are not
\r
1847 // specified in the command-line, the JACK server uses default values.
\r
1849 // The JACK server does not have to be running when an instance of
\r
1850 // RtApiJack is created, though the function getDeviceCount() will
\r
1851 // report 0 devices found until JACK has been started. When no
\r
1852 // devices are available (i.e., the JACK server is not running), a
\r
1853 // stream cannot be opened.
\r
1855 #include <jack/jack.h>
\r
1856 #include <unistd.h>
\r
1859 // A structure to hold various information related to the Jack API
\r
1860 // implementation.
\r
1861 struct JackHandle {
\r
1862 jack_client_t *client;
\r
1863 jack_port_t **ports[2];
\r
1864 std::string deviceName[2];
\r
1866 pthread_cond_t condition;
\r
1867 int drainCounter; // Tracks callback counts when draining
\r
1868 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1871 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1874 static void jackSilentError( const char * ) {};
\r
1876 RtApiJack :: RtApiJack()
\r
1878 // Nothing to do here.
\r
1879 #if !defined(__RTAUDIO_DEBUG__)
\r
1880 // Turn off Jack's internal error reporting.
\r
1881 jack_set_error_function( &jackSilentError );
\r
1885 RtApiJack :: ~RtApiJack()
\r
1887 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1890 unsigned int RtApiJack :: getDeviceCount( void )
\r
1892 // See if we can become a jack client.
\r
1893 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1894 jack_status_t *status = NULL;
\r
1895 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1896 if ( client == 0 ) return 0;
\r
1898 const char **ports;
\r
1899 std::string port, previousPort;
\r
1900 unsigned int nChannels = 0, nDevices = 0;
\r
1901 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1903 // Parse the port names up to the first colon (:).
\r
1904 size_t iColon = 0;
\r
1906 port = (char *) ports[ nChannels ];
\r
1907 iColon = port.find(":");
\r
1908 if ( iColon != std::string::npos ) {
\r
1909 port = port.substr( 0, iColon + 1 );
\r
1910 if ( port != previousPort ) {
\r
1912 previousPort = port;
\r
1915 } while ( ports[++nChannels] );
\r
1919 jack_client_close( client );
\r
1923 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1925 RtAudio::DeviceInfo info;
\r
1926 info.probed = false;
\r
1928 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1929 jack_status_t *status = NULL;
\r
1930 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1931 if ( client == 0 ) {
\r
1932 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1933 error( RtAudioError::WARNING );
\r
1937 const char **ports;
\r
1938 std::string port, previousPort;
\r
1939 unsigned int nPorts = 0, nDevices = 0;
\r
1940 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1942 // Parse the port names up to the first colon (:).
\r
1943 size_t iColon = 0;
\r
1945 port = (char *) ports[ nPorts ];
\r
1946 iColon = port.find(":");
\r
1947 if ( iColon != std::string::npos ) {
\r
1948 port = port.substr( 0, iColon );
\r
1949 if ( port != previousPort ) {
\r
1950 if ( nDevices == device ) info.name = port;
\r
1952 previousPort = port;
\r
1955 } while ( ports[++nPorts] );
\r
1959 if ( device >= nDevices ) {
\r
1960 jack_client_close( client );
\r
1961 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1962 error( RtAudioError::INVALID_USE );
\r
1966 // Get the current jack server sample rate.
\r
1967 info.sampleRates.clear();
\r
1968 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1970 // Count the available ports containing the client name as device
\r
1971 // channels. Jack "input ports" equal RtAudio output channels.
\r
1972 unsigned int nChannels = 0;
\r
1973 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1975 while ( ports[ nChannels ] ) nChannels++;
\r
1977 info.outputChannels = nChannels;
\r
1980 // Jack "output ports" equal RtAudio input channels.
\r
1982 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1984 while ( ports[ nChannels ] ) nChannels++;
\r
1986 info.inputChannels = nChannels;
\r
1989 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1990 jack_client_close(client);
\r
1991 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1992 error( RtAudioError::WARNING );
\r
1996 // If device opens for both playback and capture, we determine the channels.
\r
1997 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1998 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2000 // Jack always uses 32-bit floats.
\r
2001 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2003 // Jack doesn't provide default devices so we'll use the first available one.
\r
2004 if ( device == 0 && info.outputChannels > 0 )
\r
2005 info.isDefaultOutput = true;
\r
2006 if ( device == 0 && info.inputChannels > 0 )
\r
2007 info.isDefaultInput = true;
\r
2009 jack_client_close(client);
\r
2010 info.probed = true;
\r
2014 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2016 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2018 RtApiJack *object = (RtApiJack *) info->object;
\r
2019 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2024 // This function will be called by a spawned thread when the Jack
\r
2025 // server signals that it is shutting down. It is necessary to handle
\r
2026 // it this way because the jackShutdown() function must return before
\r
2027 // the jack_deactivate() function (in closeStream()) will return.
\r
2028 static void *jackCloseStream( void *ptr )
\r
2030 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2031 RtApiJack *object = (RtApiJack *) info->object;
\r
2033 object->closeStream();
\r
2035 pthread_exit( NULL );
\r
2037 static void jackShutdown( void *infoPointer )
\r
2039 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2040 RtApiJack *object = (RtApiJack *) info->object;
\r
2042 // Check current stream state. If stopped, then we'll assume this
\r
2043 // was called as a result of a call to RtApiJack::stopStream (the
\r
2044 // deactivation of a client handle causes this function to be called).
\r
2045 // If not, we'll assume the Jack server is shutting down or some
\r
2046 // other problem occurred and we should close the stream.
\r
2047 if ( object->isStreamRunning() == false ) return;
\r
2049 ThreadHandle threadId;
\r
2050 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2051 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2054 static int jackXrun( void *infoPointer )
\r
2056 JackHandle *handle = (JackHandle *) infoPointer;
\r
2058 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2059 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2064 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2065 unsigned int firstChannel, unsigned int sampleRate,
\r
2066 RtAudioFormat format, unsigned int *bufferSize,
\r
2067 RtAudio::StreamOptions *options )
\r
2069 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2071 // Look for jack server and try to become a client (only do once per stream).
\r
2072 jack_client_t *client = 0;
\r
2073 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2074 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2075 jack_status_t *status = NULL;
\r
2076 if ( options && !options->streamName.empty() )
\r
2077 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2079 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2080 if ( client == 0 ) {
\r
2081 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2082 error( RtAudioError::WARNING );
\r
2087 // The handle must have been created on an earlier pass.
\r
2088 client = handle->client;
\r
2091 const char **ports;
\r
2092 std::string port, previousPort, deviceName;
\r
2093 unsigned int nPorts = 0, nDevices = 0;
\r
2094 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2096 // Parse the port names up to the first colon (:).
\r
2097 size_t iColon = 0;
\r
2099 port = (char *) ports[ nPorts ];
\r
2100 iColon = port.find(":");
\r
2101 if ( iColon != std::string::npos ) {
\r
2102 port = port.substr( 0, iColon );
\r
2103 if ( port != previousPort ) {
\r
2104 if ( nDevices == device ) deviceName = port;
\r
2106 previousPort = port;
\r
2109 } while ( ports[++nPorts] );
\r
2113 if ( device >= nDevices ) {
\r
2114 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2118 // Count the available ports containing the client name as device
\r
2119 // channels. Jack "input ports" equal RtAudio output channels.
\r
2120 unsigned int nChannels = 0;
\r
2121 unsigned long flag = JackPortIsInput;
\r
2122 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2123 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2125 while ( ports[ nChannels ] ) nChannels++;
\r
2129 // Compare the jack ports for specified client to the requested number of channels.
\r
2130 if ( nChannels < (channels + firstChannel) ) {
\r
2131 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2132 errorText_ = errorStream_.str();
\r
2136 // Check the jack server sample rate.
\r
2137 unsigned int jackRate = jack_get_sample_rate( client );
\r
2138 if ( sampleRate != jackRate ) {
\r
2139 jack_client_close( client );
\r
2140 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2141 errorText_ = errorStream_.str();
\r
2144 stream_.sampleRate = jackRate;
\r
2146 // Get the latency of the JACK port.
\r
2147 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2148 if ( ports[ firstChannel ] ) {
\r
2149 // Added by Ge Wang
\r
2150 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2151 // the range (usually the min and max are equal)
\r
2152 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2153 // get the latency range
\r
2154 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2155 // be optimistic, use the min!
\r
2156 stream_.latency[mode] = latrange.min;
\r
2157 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2161 // The jack server always uses 32-bit floating-point data.
\r
2162 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2163 stream_.userFormat = format;
\r
2165 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2166 else stream_.userInterleaved = true;
\r
2168 // Jack always uses non-interleaved buffers.
\r
2169 stream_.deviceInterleaved[mode] = false;
\r
2171 // Jack always provides host byte-ordered data.
\r
2172 stream_.doByteSwap[mode] = false;
\r
2174 // Get the buffer size. The buffer size and number of buffers
\r
2175 // (periods) is set when the jack server is started.
\r
2176 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2177 *bufferSize = stream_.bufferSize;
\r
2179 stream_.nDeviceChannels[mode] = channels;
\r
2180 stream_.nUserChannels[mode] = channels;
\r
2182 // Set flags for buffer conversion.
\r
2183 stream_.doConvertBuffer[mode] = false;
\r
2184 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2185 stream_.doConvertBuffer[mode] = true;
\r
2186 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2187 stream_.nUserChannels[mode] > 1 )
\r
2188 stream_.doConvertBuffer[mode] = true;
\r
2190 // Allocate our JackHandle structure for the stream.
\r
2191 if ( handle == 0 ) {
\r
2193 handle = new JackHandle;
\r
2195 catch ( std::bad_alloc& ) {
\r
2196 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2200 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2201 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2204 stream_.apiHandle = (void *) handle;
\r
2205 handle->client = client;
\r
2207 handle->deviceName[mode] = deviceName;
\r
2209 // Allocate necessary internal buffers.
\r
2210 unsigned long bufferBytes;
\r
2211 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2212 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2213 if ( stream_.userBuffer[mode] == NULL ) {
\r
2214 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2218 if ( stream_.doConvertBuffer[mode] ) {
\r
2220 bool makeBuffer = true;
\r
2221 if ( mode == OUTPUT )
\r
2222 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2223 else { // mode == INPUT
\r
2224 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2225 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2226 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2227 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2231 if ( makeBuffer ) {
\r
2232 bufferBytes *= *bufferSize;
\r
2233 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2234 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2235 if ( stream_.deviceBuffer == NULL ) {
\r
2236 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2242 // Allocate memory for the Jack ports (channels) identifiers.
\r
2243 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2244 if ( handle->ports[mode] == NULL ) {
\r
2245 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2249 stream_.device[mode] = device;
\r
2250 stream_.channelOffset[mode] = firstChannel;
\r
2251 stream_.state = STREAM_STOPPED;
\r
2252 stream_.callbackInfo.object = (void *) this;
\r
2254 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2255 // We had already set up the stream for output.
\r
2256 stream_.mode = DUPLEX;
\r
2258 stream_.mode = mode;
\r
2259 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2260 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2261 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2264 // Register our ports.
\r
2266 if ( mode == OUTPUT ) {
\r
2267 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2268 snprintf( label, 64, "outport %d", i );
\r
2269 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2270 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2274 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2275 snprintf( label, 64, "inport %d", i );
\r
2276 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2277 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2281 // Setup the buffer conversion information structure. We don't use
\r
2282 // buffers to do channel offsets, so we override that parameter
\r
2284 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2290 pthread_cond_destroy( &handle->condition );
\r
2291 jack_client_close( handle->client );
\r
2293 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2294 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2297 stream_.apiHandle = 0;
\r
2300 for ( int i=0; i<2; i++ ) {
\r
2301 if ( stream_.userBuffer[i] ) {
\r
2302 free( stream_.userBuffer[i] );
\r
2303 stream_.userBuffer[i] = 0;
\r
2307 if ( stream_.deviceBuffer ) {
\r
2308 free( stream_.deviceBuffer );
\r
2309 stream_.deviceBuffer = 0;
\r
2315 void RtApiJack :: closeStream( void )
\r
2317 if ( stream_.state == STREAM_CLOSED ) {
\r
2318 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2319 error( RtAudioError::WARNING );
\r
2323 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2326 if ( stream_.state == STREAM_RUNNING )
\r
2327 jack_deactivate( handle->client );
\r
2329 jack_client_close( handle->client );
\r
2333 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2334 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2335 pthread_cond_destroy( &handle->condition );
\r
2337 stream_.apiHandle = 0;
\r
2340 for ( int i=0; i<2; i++ ) {
\r
2341 if ( stream_.userBuffer[i] ) {
\r
2342 free( stream_.userBuffer[i] );
\r
2343 stream_.userBuffer[i] = 0;
\r
2347 if ( stream_.deviceBuffer ) {
\r
2348 free( stream_.deviceBuffer );
\r
2349 stream_.deviceBuffer = 0;
\r
2352 stream_.mode = UNINITIALIZED;
\r
2353 stream_.state = STREAM_CLOSED;
\r
2356 void RtApiJack :: startStream( void )
\r
2359 if ( stream_.state == STREAM_RUNNING ) {
\r
2360 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2361 error( RtAudioError::WARNING );
\r
2365 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2366 int result = jack_activate( handle->client );
\r
2368 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2372 const char **ports;
\r
2374 // Get the list of available ports.
\r
2375 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2377 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2378 if ( ports == NULL) {
\r
2379 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2383 // Now make the port connections. Since RtAudio wasn't designed to
\r
2384 // allow the user to select particular channels of a device, we'll
\r
2385 // just open the first "nChannels" ports with offset.
\r
2386 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2388 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2389 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2392 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2399 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2401 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2402 if ( ports == NULL) {
\r
2403 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2407 // Now make the port connections. See note above.
\r
2408 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2410 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2411 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2414 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2421 handle->drainCounter = 0;
\r
2422 handle->internalDrain = false;
\r
2423 stream_.state = STREAM_RUNNING;
\r
2426 if ( result == 0 ) return;
\r
2427 error( RtAudioError::SYSTEM_ERROR );
\r
2430 void RtApiJack :: stopStream( void )
\r
2433 if ( stream_.state == STREAM_STOPPED ) {
\r
2434 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2435 error( RtAudioError::WARNING );
\r
2439 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2440 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2442 if ( handle->drainCounter == 0 ) {
\r
2443 handle->drainCounter = 2;
\r
2444 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2448 jack_deactivate( handle->client );
\r
2449 stream_.state = STREAM_STOPPED;
\r
2452 void RtApiJack :: abortStream( void )
\r
2455 if ( stream_.state == STREAM_STOPPED ) {
\r
2456 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2457 error( RtAudioError::WARNING );
\r
2461 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2462 handle->drainCounter = 2;
\r
2467 // This function will be called by a spawned thread when the user
\r
2468 // callback function signals that the stream should be stopped or
\r
2469 // aborted. It is necessary to handle it this way because the
\r
2470 // callbackEvent() function must return before the jack_deactivate()
\r
2471 // function will return.
\r
2472 static void *jackStopStream( void *ptr )
\r
2474 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2475 RtApiJack *object = (RtApiJack *) info->object;
\r
2477 object->stopStream();
\r
2478 pthread_exit( NULL );
\r
2481 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2483 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2484 if ( stream_.state == STREAM_CLOSED ) {
\r
2485 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2486 error( RtAudioError::WARNING );
\r
2489 if ( stream_.bufferSize != nframes ) {
\r
2490 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2491 error( RtAudioError::WARNING );
\r
2495 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2496 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2498 // Check if we were draining the stream and signal is finished.
\r
2499 if ( handle->drainCounter > 3 ) {
\r
2500 ThreadHandle threadId;
\r
2502 stream_.state = STREAM_STOPPING;
\r
2503 if ( handle->internalDrain == true )
\r
2504 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2506 pthread_cond_signal( &handle->condition );
\r
2510 // Invoke user callback first, to get fresh output data.
\r
2511 if ( handle->drainCounter == 0 ) {
\r
2512 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2513 double streamTime = getStreamTime();
\r
2514 RtAudioStreamStatus status = 0;
\r
2515 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2516 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2517 handle->xrun[0] = false;
\r
2519 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2520 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2521 handle->xrun[1] = false;
\r
2523 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2524 stream_.bufferSize, streamTime, status, info->userData );
\r
2525 if ( cbReturnValue == 2 ) {
\r
2526 stream_.state = STREAM_STOPPING;
\r
2527 handle->drainCounter = 2;
\r
2529 pthread_create( &id, NULL, jackStopStream, info );
\r
2532 else if ( cbReturnValue == 1 ) {
\r
2533 handle->drainCounter = 1;
\r
2534 handle->internalDrain = true;
\r
2538 jack_default_audio_sample_t *jackbuffer;
\r
2539 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2540 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2542 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2544 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2545 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2546 memset( jackbuffer, 0, bufferBytes );
\r
2550 else if ( stream_.doConvertBuffer[0] ) {
\r
2552 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2554 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2555 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2556 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2559 else { // no buffer conversion
\r
2560 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2561 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2562 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2566 if ( handle->drainCounter ) {
\r
2567 handle->drainCounter++;
\r
2572 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2574 if ( stream_.doConvertBuffer[1] ) {
\r
2575 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2576 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2577 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2579 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2581 else { // no buffer conversion
\r
2582 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2583 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2584 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2590 RtApi::tickStreamTime();
\r
2593 //******************** End of __UNIX_JACK__ *********************//
\r
2596 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2598 // The ASIO API is designed around a callback scheme, so this
\r
2599 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2600 // Jack. The primary constraint with ASIO is that it only allows
\r
2601 // access to a single driver at a time. Thus, it is not possible to
\r
2602 // have more than one simultaneous RtAudio stream.
\r
2604 // This implementation also requires a number of external ASIO files
\r
2605 // and a few global variables. The ASIO callback scheme does not
\r
2606 // allow for the passing of user data, so we must create a global
\r
2607 // pointer to our callbackInfo structure.
\r
2609 // On unix systems, we make use of a pthread condition variable.
\r
2610 // Since there is no equivalent in Windows, I hacked something based
\r
2611 // on information found in
\r
2612 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2614 #include "asiosys.h"
\r
2616 #include "iasiothiscallresolver.h"
\r
2617 #include "asiodrivers.h"
\r
2620 static AsioDrivers drivers;
\r
2621 static ASIOCallbacks asioCallbacks;
\r
2622 static ASIODriverInfo driverInfo;
\r
2623 static CallbackInfo *asioCallbackInfo;
\r
2624 static bool asioXRun;
\r
2626 struct AsioHandle {
\r
2627 int drainCounter; // Tracks callback counts when draining
\r
2628 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2629 ASIOBufferInfo *bufferInfos;
\r
2633 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2636 // Function declarations (definitions at end of section)
\r
2637 static const char* getAsioErrorString( ASIOError result );
\r
2638 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2639 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2641 RtApiAsio :: RtApiAsio()
\r
2643 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2644 // CoInitialize beforehand, but it must be for appartment threading
\r
2645 // (in which case, CoInitilialize will return S_FALSE here).
\r
2646 coInitialized_ = false;
\r
2647 HRESULT hr = CoInitialize( NULL );
\r
2648 if ( FAILED(hr) ) {
\r
2649 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2650 error( RtAudioError::WARNING );
\r
2652 coInitialized_ = true;
\r
2654 drivers.removeCurrentDriver();
\r
2655 driverInfo.asioVersion = 2;
\r
2657 // See note in DirectSound implementation about GetDesktopWindow().
\r
2658 driverInfo.sysRef = GetForegroundWindow();
\r
2661 RtApiAsio :: ~RtApiAsio()
\r
2663 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2664 if ( coInitialized_ ) CoUninitialize();
\r
2667 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2669 return (unsigned int) drivers.asioGetNumDev();
\r
2672 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2674 RtAudio::DeviceInfo info;
\r
2675 info.probed = false;
\r
2678 unsigned int nDevices = getDeviceCount();
\r
2679 if ( nDevices == 0 ) {
\r
2680 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2681 error( RtAudioError::INVALID_USE );
\r
2685 if ( device >= nDevices ) {
\r
2686 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2687 error( RtAudioError::INVALID_USE );
\r
2691 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2692 if ( stream_.state != STREAM_CLOSED ) {
\r
2693 if ( device >= devices_.size() ) {
\r
2694 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2695 error( RtAudioError::WARNING );
\r
2698 return devices_[ device ];
\r
2701 char driverName[32];
\r
2702 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2703 if ( result != ASE_OK ) {
\r
2704 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2705 errorText_ = errorStream_.str();
\r
2706 error( RtAudioError::WARNING );
\r
2710 info.name = driverName;
\r
2712 if ( !drivers.loadDriver( driverName ) ) {
\r
2713 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2714 errorText_ = errorStream_.str();
\r
2715 error( RtAudioError::WARNING );
\r
2719 result = ASIOInit( &driverInfo );
\r
2720 if ( result != ASE_OK ) {
\r
2721 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2722 errorText_ = errorStream_.str();
\r
2723 error( RtAudioError::WARNING );
\r
2727 // Determine the device channel information.
\r
2728 long inputChannels, outputChannels;
\r
2729 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2730 if ( result != ASE_OK ) {
\r
2731 drivers.removeCurrentDriver();
\r
2732 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2733 errorText_ = errorStream_.str();
\r
2734 error( RtAudioError::WARNING );
\r
2738 info.outputChannels = outputChannels;
\r
2739 info.inputChannels = inputChannels;
\r
2740 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2741 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2743 // Determine the supported sample rates.
\r
2744 info.sampleRates.clear();
\r
2745 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2746 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2747 if ( result == ASE_OK )
\r
2748 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2751 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2752 ASIOChannelInfo channelInfo;
\r
2753 channelInfo.channel = 0;
\r
2754 channelInfo.isInput = true;
\r
2755 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2756 result = ASIOGetChannelInfo( &channelInfo );
\r
2757 if ( result != ASE_OK ) {
\r
2758 drivers.removeCurrentDriver();
\r
2759 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2760 errorText_ = errorStream_.str();
\r
2761 error( RtAudioError::WARNING );
\r
2765 info.nativeFormats = 0;
\r
2766 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2767 info.nativeFormats |= RTAUDIO_SINT16;
\r
2768 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2769 info.nativeFormats |= RTAUDIO_SINT32;
\r
2770 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2771 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2772 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2773 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2774 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2775 info.nativeFormats |= RTAUDIO_SINT24;
\r
2777 if ( info.outputChannels > 0 )
\r
2778 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2779 if ( info.inputChannels > 0 )
\r
2780 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2782 info.probed = true;
\r
2783 drivers.removeCurrentDriver();
\r
2787 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2789 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2790 object->callbackEvent( index );
\r
2793 void RtApiAsio :: saveDeviceInfo( void )
\r
2797 unsigned int nDevices = getDeviceCount();
\r
2798 devices_.resize( nDevices );
\r
2799 for ( unsigned int i=0; i<nDevices; i++ )
\r
2800 devices_[i] = getDeviceInfo( i );
\r
2803 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2804 unsigned int firstChannel, unsigned int sampleRate,
\r
2805 RtAudioFormat format, unsigned int *bufferSize,
\r
2806 RtAudio::StreamOptions *options )
\r
2808 // For ASIO, a duplex stream MUST use the same driver.
\r
2809 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2810 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2814 char driverName[32];
\r
2815 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2816 if ( result != ASE_OK ) {
\r
2817 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2818 errorText_ = errorStream_.str();
\r
2822 // Only load the driver once for duplex stream.
\r
2823 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2824 // The getDeviceInfo() function will not work when a stream is open
\r
2825 // because ASIO does not allow multiple devices to run at the same
\r
2826 // time. Thus, we'll probe the system before opening a stream and
\r
2827 // save the results for use by getDeviceInfo().
\r
2828 this->saveDeviceInfo();
\r
2830 if ( !drivers.loadDriver( driverName ) ) {
\r
2831 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2832 errorText_ = errorStream_.str();
\r
2836 result = ASIOInit( &driverInfo );
\r
2837 if ( result != ASE_OK ) {
\r
2838 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2839 errorText_ = errorStream_.str();
\r
2844 // Check the device channel count.
\r
2845 long inputChannels, outputChannels;
\r
2846 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2847 if ( result != ASE_OK ) {
\r
2848 drivers.removeCurrentDriver();
\r
2849 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2850 errorText_ = errorStream_.str();
\r
2854 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2855 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2856 drivers.removeCurrentDriver();
\r
2857 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2858 errorText_ = errorStream_.str();
\r
2861 stream_.nDeviceChannels[mode] = channels;
\r
2862 stream_.nUserChannels[mode] = channels;
\r
2863 stream_.channelOffset[mode] = firstChannel;
\r
2865 // Verify the sample rate is supported.
\r
2866 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2867 if ( result != ASE_OK ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2870 errorText_ = errorStream_.str();
\r
2874 // Get the current sample rate
\r
2875 ASIOSampleRate currentRate;
\r
2876 result = ASIOGetSampleRate( ¤tRate );
\r
2877 if ( result != ASE_OK ) {
\r
2878 drivers.removeCurrentDriver();
\r
2879 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2880 errorText_ = errorStream_.str();
\r
2884 // Set the sample rate only if necessary
\r
2885 if ( currentRate != sampleRate ) {
\r
2886 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2890 errorText_ = errorStream_.str();
\r
2895 // Determine the driver data type.
\r
2896 ASIOChannelInfo channelInfo;
\r
2897 channelInfo.channel = 0;
\r
2898 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2899 else channelInfo.isInput = true;
\r
2900 result = ASIOGetChannelInfo( &channelInfo );
\r
2901 if ( result != ASE_OK ) {
\r
2902 drivers.removeCurrentDriver();
\r
2903 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2904 errorText_ = errorStream_.str();
\r
2908 // Assuming WINDOWS host is always little-endian.
\r
2909 stream_.doByteSwap[mode] = false;
\r
2910 stream_.userFormat = format;
\r
2911 stream_.deviceFormat[mode] = 0;
\r
2912 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2913 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2914 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2916 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2917 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2918 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2920 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2921 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2922 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2924 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2925 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2926 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2928 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2929 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2930 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2933 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2934 drivers.removeCurrentDriver();
\r
2935 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2936 errorText_ = errorStream_.str();
\r
2940 // Set the buffer size. For a duplex stream, this will end up
\r
2941 // setting the buffer size based on the input constraints, which
\r
2943 long minSize, maxSize, preferSize, granularity;
\r
2944 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2945 if ( result != ASE_OK ) {
\r
2946 drivers.removeCurrentDriver();
\r
2947 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2948 errorText_ = errorStream_.str();
\r
2952 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2953 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2954 else if ( granularity == -1 ) {
\r
2955 // Make sure bufferSize is a power of two.
\r
2956 int log2_of_min_size = 0;
\r
2957 int log2_of_max_size = 0;
\r
2959 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2960 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2961 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2964 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2965 int min_delta_num = log2_of_min_size;
\r
2967 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2968 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2969 if (current_delta < min_delta) {
\r
2970 min_delta = current_delta;
\r
2971 min_delta_num = i;
\r
2975 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2976 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2977 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2979 else if ( granularity != 0 ) {
\r
2980 // Set to an even multiple of granularity, rounding up.
\r
2981 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2984 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2985 drivers.removeCurrentDriver();
\r
2986 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2990 stream_.bufferSize = *bufferSize;
\r
2991 stream_.nBuffers = 2;
\r
2993 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2994 else stream_.userInterleaved = true;
\r
2996 // ASIO always uses non-interleaved buffers.
\r
2997 stream_.deviceInterleaved[mode] = false;
\r
2999 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3000 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3001 if ( handle == 0 ) {
\r
3003 handle = new AsioHandle;
\r
3005 catch ( std::bad_alloc& ) {
\r
3006 //if ( handle == NULL ) {
\r
3007 drivers.removeCurrentDriver();
\r
3008 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3011 handle->bufferInfos = 0;
\r
3013 // Create a manual-reset event.
\r
3014 handle->condition = CreateEvent( NULL, // no security
\r
3015 TRUE, // manual-reset
\r
3016 FALSE, // non-signaled initially
\r
3017 NULL ); // unnamed
\r
3018 stream_.apiHandle = (void *) handle;
\r
3021 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3022 // and output separately, we'll have to dispose of previously
\r
3023 // created output buffers for a duplex stream.
\r
3024 long inputLatency, outputLatency;
\r
3025 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3026 ASIODisposeBuffers();
\r
3027 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3030 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3031 bool buffersAllocated = false;
\r
3032 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3033 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3034 if ( handle->bufferInfos == NULL ) {
\r
3035 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3036 errorText_ = errorStream_.str();
\r
3040 ASIOBufferInfo *infos;
\r
3041 infos = handle->bufferInfos;
\r
3042 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3043 infos->isInput = ASIOFalse;
\r
3044 infos->channelNum = i + stream_.channelOffset[0];
\r
3045 infos->buffers[0] = infos->buffers[1] = 0;
\r
3047 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3048 infos->isInput = ASIOTrue;
\r
3049 infos->channelNum = i + stream_.channelOffset[1];
\r
3050 infos->buffers[0] = infos->buffers[1] = 0;
\r
3053 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3054 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3055 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3056 asioCallbacks.asioMessage = &asioMessages;
\r
3057 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3058 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3059 if ( result != ASE_OK ) {
\r
3060 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3061 errorText_ = errorStream_.str();
\r
3064 buffersAllocated = true;
\r
3066 // Set flags for buffer conversion.
\r
3067 stream_.doConvertBuffer[mode] = false;
\r
3068 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3069 stream_.doConvertBuffer[mode] = true;
\r
3070 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3071 stream_.nUserChannels[mode] > 1 )
\r
3072 stream_.doConvertBuffer[mode] = true;
\r
3074 // Allocate necessary internal buffers
\r
3075 unsigned long bufferBytes;
\r
3076 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3077 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3078 if ( stream_.userBuffer[mode] == NULL ) {
\r
3079 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3083 if ( stream_.doConvertBuffer[mode] ) {
\r
3085 bool makeBuffer = true;
\r
3086 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3087 if ( mode == INPUT ) {
\r
3088 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3089 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3090 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3094 if ( makeBuffer ) {
\r
3095 bufferBytes *= *bufferSize;
\r
3096 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3097 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3098 if ( stream_.deviceBuffer == NULL ) {
\r
3099 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3105 stream_.sampleRate = sampleRate;
\r
3106 stream_.device[mode] = device;
\r
3107 stream_.state = STREAM_STOPPED;
\r
3108 asioCallbackInfo = &stream_.callbackInfo;
\r
3109 stream_.callbackInfo.object = (void *) this;
\r
3110 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3111 // We had already set up an output stream.
\r
3112 stream_.mode = DUPLEX;
\r
3114 stream_.mode = mode;
\r
3116 // Determine device latencies
\r
3117 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3118 if ( result != ASE_OK ) {
\r
3119 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3120 errorText_ = errorStream_.str();
\r
3121 error( RtAudioError::WARNING); // warn but don't fail
\r
3124 stream_.latency[0] = outputLatency;
\r
3125 stream_.latency[1] = inputLatency;
\r
3128 // Setup the buffer conversion information structure. We don't use
\r
3129 // buffers to do channel offsets, so we override that parameter
\r
3131 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3136 if ( buffersAllocated )
\r
3137 ASIODisposeBuffers();
\r
3138 drivers.removeCurrentDriver();
\r
3141 CloseHandle( handle->condition );
\r
3142 if ( handle->bufferInfos )
\r
3143 free( handle->bufferInfos );
\r
3145 stream_.apiHandle = 0;
\r
3148 for ( int i=0; i<2; i++ ) {
\r
3149 if ( stream_.userBuffer[i] ) {
\r
3150 free( stream_.userBuffer[i] );
\r
3151 stream_.userBuffer[i] = 0;
\r
3155 if ( stream_.deviceBuffer ) {
\r
3156 free( stream_.deviceBuffer );
\r
3157 stream_.deviceBuffer = 0;
\r
3163 void RtApiAsio :: closeStream()
\r
3165 if ( stream_.state == STREAM_CLOSED ) {
\r
3166 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3167 error( RtAudioError::WARNING );
\r
3171 if ( stream_.state == STREAM_RUNNING ) {
\r
3172 stream_.state = STREAM_STOPPED;
\r
3175 ASIODisposeBuffers();
\r
3176 drivers.removeCurrentDriver();
\r
3178 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3180 CloseHandle( handle->condition );
\r
3181 if ( handle->bufferInfos )
\r
3182 free( handle->bufferInfos );
\r
3184 stream_.apiHandle = 0;
\r
3187 for ( int i=0; i<2; i++ ) {
\r
3188 if ( stream_.userBuffer[i] ) {
\r
3189 free( stream_.userBuffer[i] );
\r
3190 stream_.userBuffer[i] = 0;
\r
3194 if ( stream_.deviceBuffer ) {
\r
3195 free( stream_.deviceBuffer );
\r
3196 stream_.deviceBuffer = 0;
\r
3199 stream_.mode = UNINITIALIZED;
\r
3200 stream_.state = STREAM_CLOSED;
\r
3203 bool stopThreadCalled = false;
\r
3205 void RtApiAsio :: startStream()
\r
3208 if ( stream_.state == STREAM_RUNNING ) {
\r
3209 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3210 error( RtAudioError::WARNING );
\r
3214 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3215 ASIOError result = ASIOStart();
\r
3216 if ( result != ASE_OK ) {
\r
3217 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3218 errorText_ = errorStream_.str();
\r
3222 handle->drainCounter = 0;
\r
3223 handle->internalDrain = false;
\r
3224 ResetEvent( handle->condition );
\r
3225 stream_.state = STREAM_RUNNING;
\r
3229 stopThreadCalled = false;
\r
3231 if ( result == ASE_OK ) return;
\r
3232 error( RtAudioError::SYSTEM_ERROR );
\r
3235 void RtApiAsio :: stopStream()
\r
3238 if ( stream_.state == STREAM_STOPPED ) {
\r
3239 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3240 error( RtAudioError::WARNING );
\r
3244 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3245 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3246 if ( handle->drainCounter == 0 ) {
\r
3247 handle->drainCounter = 2;
\r
3248 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3252 stream_.state = STREAM_STOPPED;
\r
3254 ASIOError result = ASIOStop();
\r
3255 if ( result != ASE_OK ) {
\r
3256 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3257 errorText_ = errorStream_.str();
\r
3260 if ( result == ASE_OK ) return;
\r
3261 error( RtAudioError::SYSTEM_ERROR );
\r
3264 void RtApiAsio :: abortStream()
\r
3267 if ( stream_.state == STREAM_STOPPED ) {
\r
3268 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3269 error( RtAudioError::WARNING );
\r
3273 // The following lines were commented-out because some behavior was
\r
3274 // noted where the device buffers need to be zeroed to avoid
\r
3275 // continuing sound, even when the device buffers are completely
\r
3276 // disposed. So now, calling abort is the same as calling stop.
\r
3277 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3278 // handle->drainCounter = 2;
\r
3282 // This function will be called by a spawned thread when the user
\r
3283 // callback function signals that the stream should be stopped or
\r
3284 // aborted. It is necessary to handle it this way because the
\r
3285 // callbackEvent() function must return before the ASIOStop()
\r
3286 // function will return.
\r
3287 static unsigned __stdcall asioStopStream( void *ptr )
\r
3289 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3290 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3292 object->stopStream();
\r
3293 _endthreadex( 0 );
\r
3297 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3299 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3300 if ( stream_.state == STREAM_CLOSED ) {
\r
3301 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3302 error( RtAudioError::WARNING );
\r
3306 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3307 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3309 // Check if we were draining the stream and signal if finished.
\r
3310 if ( handle->drainCounter > 3 ) {
\r
3312 stream_.state = STREAM_STOPPING;
\r
3313 if ( handle->internalDrain == false )
\r
3314 SetEvent( handle->condition );
\r
3315 else { // spawn a thread to stop the stream
\r
3316 unsigned threadId;
\r
3317 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3318 &stream_.callbackInfo, 0, &threadId );
\r
3323 // Invoke user callback to get fresh output data UNLESS we are
\r
3324 // draining stream.
\r
3325 if ( handle->drainCounter == 0 ) {
\r
3326 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3327 double streamTime = getStreamTime();
\r
3328 RtAudioStreamStatus status = 0;
\r
3329 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3330 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3333 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3334 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3337 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3338 stream_.bufferSize, streamTime, status, info->userData );
\r
3339 if ( cbReturnValue == 2 ) {
\r
3340 stream_.state = STREAM_STOPPING;
\r
3341 handle->drainCounter = 2;
\r
3342 unsigned threadId;
\r
3343 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3344 &stream_.callbackInfo, 0, &threadId );
\r
3347 else if ( cbReturnValue == 1 ) {
\r
3348 handle->drainCounter = 1;
\r
3349 handle->internalDrain = true;
\r
3353 unsigned int nChannels, bufferBytes, i, j;
\r
3354 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3355 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3357 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3359 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3361 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3362 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3363 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3367 else if ( stream_.doConvertBuffer[0] ) {
\r
3369 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3370 if ( stream_.doByteSwap[0] )
\r
3371 byteSwapBuffer( stream_.deviceBuffer,
\r
3372 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3373 stream_.deviceFormat[0] );
\r
3375 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3376 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3377 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3378 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3384 if ( stream_.doByteSwap[0] )
\r
3385 byteSwapBuffer( stream_.userBuffer[0],
\r
3386 stream_.bufferSize * stream_.nUserChannels[0],
\r
3387 stream_.userFormat );
\r
3389 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3390 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3391 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3392 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3397 if ( handle->drainCounter ) {
\r
3398 handle->drainCounter++;
\r
3403 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3405 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3407 if (stream_.doConvertBuffer[1]) {
\r
3409 // Always interleave ASIO input data.
\r
3410 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3411 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3412 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3413 handle->bufferInfos[i].buffers[bufferIndex],
\r
3417 if ( stream_.doByteSwap[1] )
\r
3418 byteSwapBuffer( stream_.deviceBuffer,
\r
3419 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3420 stream_.deviceFormat[1] );
\r
3421 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3425 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3426 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3427 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3428 handle->bufferInfos[i].buffers[bufferIndex],
\r
3433 if ( stream_.doByteSwap[1] )
\r
3434 byteSwapBuffer( stream_.userBuffer[1],
\r
3435 stream_.bufferSize * stream_.nUserChannels[1],
\r
3436 stream_.userFormat );
\r
3441 // The following call was suggested by Malte Clasen. While the API
\r
3442 // documentation indicates it should not be required, some device
\r
3443 // drivers apparently do not function correctly without it.
\r
3444 ASIOOutputReady();
\r
3446 RtApi::tickStreamTime();
\r
3450 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3452 // The ASIO documentation says that this usually only happens during
\r
3453 // external sync. Audio processing is not stopped by the driver,
\r
3454 // actual sample rate might not have even changed, maybe only the
\r
3455 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3458 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3460 object->stopStream();
\r
3462 catch ( RtAudioError &exception ) {
\r
3463 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3467 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3470 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3474 switch( selector ) {
\r
3475 case kAsioSelectorSupported:
\r
3476 if ( value == kAsioResetRequest
\r
3477 || value == kAsioEngineVersion
\r
3478 || value == kAsioResyncRequest
\r
3479 || value == kAsioLatenciesChanged
\r
3480 // The following three were added for ASIO 2.0, you don't
\r
3481 // necessarily have to support them.
\r
3482 || value == kAsioSupportsTimeInfo
\r
3483 || value == kAsioSupportsTimeCode
\r
3484 || value == kAsioSupportsInputMonitor)
\r
3487 case kAsioResetRequest:
\r
3488 // Defer the task and perform the reset of the driver during the
\r
3489 // next "safe" situation. You cannot reset the driver right now,
\r
3490 // as this code is called from the driver. Reset the driver is
\r
3491 // done by completely destruct is. I.e. ASIOStop(),
\r
3492 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3494 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3497 case kAsioResyncRequest:
\r
3498 // This informs the application that the driver encountered some
\r
3499 // non-fatal data loss. It is used for synchronization purposes
\r
3500 // of different media. Added mainly to work around the Win16Mutex
\r
3501 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3502 // which could lose data because the Mutex was held too long by
\r
3503 // another thread. However a driver can issue it in other
\r
3504 // situations, too.
\r
3505 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3509 case kAsioLatenciesChanged:
\r
3510 // This will inform the host application that the drivers were
\r
3511 // latencies changed. Beware, it this does not mean that the
\r
3512 // buffer sizes have changed! You might need to update internal
\r
3514 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3517 case kAsioEngineVersion:
\r
3518 // Return the supported ASIO version of the host application. If
\r
3519 // a host application does not implement this selector, ASIO 1.0
\r
3520 // is assumed by the driver.
\r
3523 case kAsioSupportsTimeInfo:
\r
3524 // Informs the driver whether the
\r
3525 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3526 // For compatibility with ASIO 1.0 drivers the host application
\r
3527 // should always support the "old" bufferSwitch method, too.
\r
3530 case kAsioSupportsTimeCode:
\r
3531 // Informs the driver whether application is interested in time
\r
3532 // code info. If an application does not need to know about time
\r
3533 // code, the driver has less work to do.
\r
3540 static const char* getAsioErrorString( ASIOError result )
\r
3545 const char*message;
\r
3548 static const Messages m[] =
\r
3550 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3551 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3552 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3553 { ASE_InvalidMode, "Invalid mode." },
\r
3554 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3555 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3556 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3559 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3560 if ( m[i].value == result ) return m[i].message;
\r
3562 return "Unknown error.";
\r
3564 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3568 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3570 // Modified by Robin Davies, October 2005
\r
3571 // - Improvements to DirectX pointer chasing.
\r
3572 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3573 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3574 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3575 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3577 #include <dsound.h>
\r
3578 #include <assert.h>
\r
3579 #include <algorithm>
\r
3581 #if defined(__MINGW32__)
\r
3582 // missing from latest mingw winapi
\r
3583 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3584 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3585 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3586 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3589 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3591 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3592 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3595 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3597 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3598 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3599 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3600 return pointer >= earlierPointer && pointer < laterPointer;
\r
3603 // A structure to hold various information related to the DirectSound
\r
3604 // API implementation.
\r
3606 unsigned int drainCounter; // Tracks callback counts when draining
\r
3607 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3611 UINT bufferPointer[2];
\r
3612 DWORD dsBufferSize[2];
\r
3613 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3617 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3620 // Declarations for utility functions, callbacks, and structures
\r
3621 // specific to the DirectSound implementation.
\r
3622 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3623 LPCTSTR description,
\r
3625 LPVOID lpContext );
\r
3627 static const char* getErrorString( int code );
\r
3629 static unsigned __stdcall callbackHandler( void *ptr );
\r
3638 : found(false) { validId[0] = false; validId[1] = false; }
\r
3641 struct DsProbeData {
\r
3643 std::vector<struct DsDevice>* dsDevices;
\r
3646 RtApiDs :: RtApiDs()
\r
3648 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3649 // accept whatever the mainline chose for a threading model.
\r
3650 coInitialized_ = false;
\r
3651 HRESULT hr = CoInitialize( NULL );
\r
3652 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3655 RtApiDs :: ~RtApiDs()
\r
3657 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3658 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3661 // The DirectSound default output is always the first device.
\r
3662 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3667 // The DirectSound default input is always the first input device,
\r
3668 // which is the first capture device enumerated.
\r
3669 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3674 unsigned int RtApiDs :: getDeviceCount( void )
\r
3676 // Set query flag for previously found devices to false, so that we
\r
3677 // can check for any devices that have disappeared.
\r
3678 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3679 dsDevices[i].found = false;
\r
3681 // Query DirectSound devices.
\r
3682 struct DsProbeData probeInfo;
\r
3683 probeInfo.isInput = false;
\r
3684 probeInfo.dsDevices = &dsDevices;
\r
3685 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3686 if ( FAILED( result ) ) {
\r
3687 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3688 errorText_ = errorStream_.str();
\r
3689 error( RtAudioError::WARNING );
\r
3692 // Query DirectSoundCapture devices.
\r
3693 probeInfo.isInput = true;
\r
3694 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3695 if ( FAILED( result ) ) {
\r
3696 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3697 errorText_ = errorStream_.str();
\r
3698 error( RtAudioError::WARNING );
\r
3701 // Clean out any devices that may have disappeared.
\r
3702 std::vector< int > indices;
\r
3703 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3704 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3705 unsigned int nErased = 0;
\r
3706 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3707 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3709 return static_cast<unsigned int>(dsDevices.size());
\r
3712 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3714 RtAudio::DeviceInfo info;
\r
3715 info.probed = false;
\r
3717 if ( dsDevices.size() == 0 ) {
\r
3718 // Force a query of all devices
\r
3720 if ( dsDevices.size() == 0 ) {
\r
3721 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3722 error( RtAudioError::INVALID_USE );
\r
3727 if ( device >= dsDevices.size() ) {
\r
3728 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3729 error( RtAudioError::INVALID_USE );
\r
3734 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3736 LPDIRECTSOUND output;
\r
3738 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3739 if ( FAILED( result ) ) {
\r
3740 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3741 errorText_ = errorStream_.str();
\r
3742 error( RtAudioError::WARNING );
\r
3746 outCaps.dwSize = sizeof( outCaps );
\r
3747 result = output->GetCaps( &outCaps );
\r
3748 if ( FAILED( result ) ) {
\r
3749 output->Release();
\r
3750 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3751 errorText_ = errorStream_.str();
\r
3752 error( RtAudioError::WARNING );
\r
3756 // Get output channel information.
\r
3757 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3759 // Get sample rate information.
\r
3760 info.sampleRates.clear();
\r
3761 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3762 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3763 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3764 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3767 // Get format information.
\r
3768 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3769 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3771 output->Release();
\r
3773 if ( getDefaultOutputDevice() == device )
\r
3774 info.isDefaultOutput = true;
\r
3776 if ( dsDevices[ device ].validId[1] == false ) {
\r
3777 info.name = dsDevices[ device ].name;
\r
3778 info.probed = true;
\r
3784 LPDIRECTSOUNDCAPTURE input;
\r
3785 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3786 if ( FAILED( result ) ) {
\r
3787 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3788 errorText_ = errorStream_.str();
\r
3789 error( RtAudioError::WARNING );
\r
3794 inCaps.dwSize = sizeof( inCaps );
\r
3795 result = input->GetCaps( &inCaps );
\r
3796 if ( FAILED( result ) ) {
\r
3798 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3799 errorText_ = errorStream_.str();
\r
3800 error( RtAudioError::WARNING );
\r
3804 // Get input channel information.
\r
3805 info.inputChannels = inCaps.dwChannels;
\r
3807 // Get sample rate and format information.
\r
3808 std::vector<unsigned int> rates;
\r
3809 if ( inCaps.dwChannels >= 2 ) {
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3813 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3814 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3815 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3817 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3819 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3820 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3821 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3822 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3823 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3825 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3826 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3827 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3828 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3829 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3832 else if ( inCaps.dwChannels == 1 ) {
\r
3833 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3834 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3835 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3836 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3837 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3838 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3839 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3840 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3842 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3843 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3844 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3845 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3846 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3848 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3849 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3850 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3851 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3852 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3855 else info.inputChannels = 0; // technically, this would be an error
\r
3859 if ( info.inputChannels == 0 ) return info;
\r
3861 // Copy the supported rates to the info structure but avoid duplication.
\r
3863 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3865 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3866 if ( rates[i] == info.sampleRates[j] ) {
\r
3871 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3873 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3875 // If device opens for both playback and capture, we determine the channels.
\r
3876 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3877 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3879 if ( device == 0 ) info.isDefaultInput = true;
\r
3881 // Copy name and return.
\r
3882 info.name = dsDevices[ device ].name;
\r
3883 info.probed = true;
\r
3887 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3888 unsigned int firstChannel, unsigned int sampleRate,
\r
3889 RtAudioFormat format, unsigned int *bufferSize,
\r
3890 RtAudio::StreamOptions *options )
\r
3892 if ( channels + firstChannel > 2 ) {
\r
3893 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3897 size_t nDevices = dsDevices.size();
\r
3898 if ( nDevices == 0 ) {
\r
3899 // This should not happen because a check is made before this function is called.
\r
3900 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3904 if ( device >= nDevices ) {
\r
3905 // This should not happen because a check is made before this function is called.
\r
3906 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3910 if ( mode == OUTPUT ) {
\r
3911 if ( dsDevices[ device ].validId[0] == false ) {
\r
3912 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3913 errorText_ = errorStream_.str();
\r
3917 else { // mode == INPUT
\r
3918 if ( dsDevices[ device ].validId[1] == false ) {
\r
3919 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3920 errorText_ = errorStream_.str();
\r
3925 // According to a note in PortAudio, using GetDesktopWindow()
\r
3926 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3927 // that occur when the application's window is not the foreground
\r
3928 // window. Also, if the application window closes before the
\r
3929 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3930 // problems when using GetDesktopWindow() but it seems fine now
\r
3931 // (January 2010). I'll leave it commented here.
\r
3932 // HWND hWnd = GetForegroundWindow();
\r
3933 HWND hWnd = GetDesktopWindow();
\r
3935 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3936 // two. This is a judgement call and a value of two is probably too
\r
3937 // low for capture, but it should work for playback.
\r
3939 if ( options ) nBuffers = options->numberOfBuffers;
\r
3940 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3941 if ( nBuffers < 2 ) nBuffers = 3;
\r
3943 // Check the lower range of the user-specified buffer size and set
\r
3944 // (arbitrarily) to a lower bound of 32.
\r
3945 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3947 // Create the wave format structure. The data format setting will
\r
3948 // be determined later.
\r
3949 WAVEFORMATEX waveFormat;
\r
3950 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3951 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3952 waveFormat.nChannels = channels + firstChannel;
\r
3953 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3955 // Determine the device buffer size. By default, we'll use the value
\r
3956 // defined above (32K), but we will grow it to make allowances for
\r
3957 // very large software buffer sizes.
\r
3958 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3959 DWORD dsPointerLeadTime = 0;
\r
3961 void *ohandle = 0, *bhandle = 0;
\r
3963 if ( mode == OUTPUT ) {
\r
3965 LPDIRECTSOUND output;
\r
3966 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3967 if ( FAILED( result ) ) {
\r
3968 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3969 errorText_ = errorStream_.str();
\r
3974 outCaps.dwSize = sizeof( outCaps );
\r
3975 result = output->GetCaps( &outCaps );
\r
3976 if ( FAILED( result ) ) {
\r
3977 output->Release();
\r
3978 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3979 errorText_ = errorStream_.str();
\r
3983 // Check channel information.
\r
3984 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3985 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3986 errorText_ = errorStream_.str();
\r
3990 // Check format information. Use 16-bit format unless not
\r
3991 // supported or user requests 8-bit.
\r
3992 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3993 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3994 waveFormat.wBitsPerSample = 16;
\r
3995 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3998 waveFormat.wBitsPerSample = 8;
\r
3999 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4001 stream_.userFormat = format;
\r
4003 // Update wave format structure and buffer information.
\r
4004 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4005 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4006 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4008 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4009 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4010 dsBufferSize *= 2;
\r
4012 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
4013 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
4014 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
4015 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
4016 if ( FAILED( result ) ) {
\r
4017 output->Release();
\r
4018 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
4019 errorText_ = errorStream_.str();
\r
4023 // Even though we will write to the secondary buffer, we need to
\r
4024 // access the primary buffer to set the correct output format
\r
4025 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
4026 // buffer description.
\r
4027 DSBUFFERDESC bufferDescription;
\r
4028 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4029 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4030 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
4032 // Obtain the primary buffer
\r
4033 LPDIRECTSOUNDBUFFER buffer;
\r
4034 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4035 if ( FAILED( result ) ) {
\r
4036 output->Release();
\r
4037 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4038 errorText_ = errorStream_.str();
\r
4042 // Set the primary DS buffer sound format.
\r
4043 result = buffer->SetFormat( &waveFormat );
\r
4044 if ( FAILED( result ) ) {
\r
4045 output->Release();
\r
4046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4047 errorText_ = errorStream_.str();
\r
4051 // Setup the secondary DS buffer description.
\r
4052 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4053 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4054 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4055 DSBCAPS_GLOBALFOCUS |
\r
4056 DSBCAPS_GETCURRENTPOSITION2 |
\r
4057 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4058 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4059 bufferDescription.lpwfxFormat = &waveFormat;
\r
4061 // Try to create the secondary DS buffer. If that doesn't work,
\r
4062 // try to use software mixing. Otherwise, there's a problem.
\r
4063 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4064 if ( FAILED( result ) ) {
\r
4065 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4066 DSBCAPS_GLOBALFOCUS |
\r
4067 DSBCAPS_GETCURRENTPOSITION2 |
\r
4068 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4069 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4070 if ( FAILED( result ) ) {
\r
4071 output->Release();
\r
4072 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4073 errorText_ = errorStream_.str();
\r
4078 // Get the buffer size ... might be different from what we specified.
\r
4080 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4081 result = buffer->GetCaps( &dsbcaps );
\r
4082 if ( FAILED( result ) ) {
\r
4083 output->Release();
\r
4084 buffer->Release();
\r
4085 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4086 errorText_ = errorStream_.str();
\r
4090 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4092 // Lock the DS buffer
\r
4095 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4096 if ( FAILED( result ) ) {
\r
4097 output->Release();
\r
4098 buffer->Release();
\r
4099 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4100 errorText_ = errorStream_.str();
\r
4104 // Zero the DS buffer
\r
4105 ZeroMemory( audioPtr, dataLen );
\r
4107 // Unlock the DS buffer
\r
4108 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4109 if ( FAILED( result ) ) {
\r
4110 output->Release();
\r
4111 buffer->Release();
\r
4112 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4113 errorText_ = errorStream_.str();
\r
4117 ohandle = (void *) output;
\r
4118 bhandle = (void *) buffer;
\r
4121 if ( mode == INPUT ) {
\r
4123 LPDIRECTSOUNDCAPTURE input;
\r
4124 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4125 if ( FAILED( result ) ) {
\r
4126 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4127 errorText_ = errorStream_.str();
\r
4132 inCaps.dwSize = sizeof( inCaps );
\r
4133 result = input->GetCaps( &inCaps );
\r
4134 if ( FAILED( result ) ) {
\r
4136 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4137 errorText_ = errorStream_.str();
\r
4141 // Check channel information.
\r
4142 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4143 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4147 // Check format information. Use 16-bit format unless user
\r
4148 // requests 8-bit.
\r
4149 DWORD deviceFormats;
\r
4150 if ( channels + firstChannel == 2 ) {
\r
4151 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4152 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4153 waveFormat.wBitsPerSample = 8;
\r
4154 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4156 else { // assume 16-bit is supported
\r
4157 waveFormat.wBitsPerSample = 16;
\r
4158 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4161 else { // channel == 1
\r
4162 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4163 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4164 waveFormat.wBitsPerSample = 8;
\r
4165 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4167 else { // assume 16-bit is supported
\r
4168 waveFormat.wBitsPerSample = 16;
\r
4169 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4172 stream_.userFormat = format;
\r
4174 // Update wave format structure and buffer information.
\r
4175 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4176 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4177 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4179 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4180 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4181 dsBufferSize *= 2;
\r
4183 // Setup the secondary DS buffer description.
\r
4184 DSCBUFFERDESC bufferDescription;
\r
4185 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4186 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4187 bufferDescription.dwFlags = 0;
\r
4188 bufferDescription.dwReserved = 0;
\r
4189 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4190 bufferDescription.lpwfxFormat = &waveFormat;
\r
4192 // Create the capture buffer.
\r
4193 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4194 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4195 if ( FAILED( result ) ) {
\r
4197 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4198 errorText_ = errorStream_.str();
\r
4202 // Get the buffer size ... might be different from what we specified.
\r
4203 DSCBCAPS dscbcaps;
\r
4204 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4205 result = buffer->GetCaps( &dscbcaps );
\r
4206 if ( FAILED( result ) ) {
\r
4208 buffer->Release();
\r
4209 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4210 errorText_ = errorStream_.str();
\r
4214 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4216 // NOTE: We could have a problem here if this is a duplex stream
\r
4217 // and the play and capture hardware buffer sizes are different
\r
4218 // (I'm actually not sure if that is a problem or not).
\r
4219 // Currently, we are not verifying that.
\r
4221 // Lock the capture buffer
\r
4224 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4225 if ( FAILED( result ) ) {
\r
4227 buffer->Release();
\r
4228 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4229 errorText_ = errorStream_.str();
\r
4233 // Zero the buffer
\r
4234 ZeroMemory( audioPtr, dataLen );
\r
4236 // Unlock the buffer
\r
4237 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4238 if ( FAILED( result ) ) {
\r
4240 buffer->Release();
\r
4241 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4242 errorText_ = errorStream_.str();
\r
4246 ohandle = (void *) input;
\r
4247 bhandle = (void *) buffer;
\r
4250 // Set various stream parameters
\r
4251 DsHandle *handle = 0;
\r
4252 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4253 stream_.nUserChannels[mode] = channels;
\r
4254 stream_.bufferSize = *bufferSize;
\r
4255 stream_.channelOffset[mode] = firstChannel;
\r
4256 stream_.deviceInterleaved[mode] = true;
\r
4257 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4258 else stream_.userInterleaved = true;
\r
4260 // Set flag for buffer conversion
\r
4261 stream_.doConvertBuffer[mode] = false;
\r
4262 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4263 stream_.doConvertBuffer[mode] = true;
\r
4264 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4265 stream_.doConvertBuffer[mode] = true;
\r
4266 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4267 stream_.nUserChannels[mode] > 1 )
\r
4268 stream_.doConvertBuffer[mode] = true;
\r
4270 // Allocate necessary internal buffers
\r
4271 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4272 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4273 if ( stream_.userBuffer[mode] == NULL ) {
\r
4274 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4278 if ( stream_.doConvertBuffer[mode] ) {
\r
4280 bool makeBuffer = true;
\r
4281 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4282 if ( mode == INPUT ) {
\r
4283 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4284 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4285 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4289 if ( makeBuffer ) {
\r
4290 bufferBytes *= *bufferSize;
\r
4291 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4292 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4293 if ( stream_.deviceBuffer == NULL ) {
\r
4294 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4300 // Allocate our DsHandle structures for the stream.
\r
4301 if ( stream_.apiHandle == 0 ) {
\r
4303 handle = new DsHandle;
\r
4305 catch ( std::bad_alloc& ) {
\r
4306 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4310 // Create a manual-reset event.
\r
4311 handle->condition = CreateEvent( NULL, // no security
\r
4312 TRUE, // manual-reset
\r
4313 FALSE, // non-signaled initially
\r
4314 NULL ); // unnamed
\r
4315 stream_.apiHandle = (void *) handle;
\r
4318 handle = (DsHandle *) stream_.apiHandle;
\r
4319 handle->id[mode] = ohandle;
\r
4320 handle->buffer[mode] = bhandle;
\r
4321 handle->dsBufferSize[mode] = dsBufferSize;
\r
4322 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4324 stream_.device[mode] = device;
\r
4325 stream_.state = STREAM_STOPPED;
\r
4326 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4327 // We had already set up an output stream.
\r
4328 stream_.mode = DUPLEX;
\r
4330 stream_.mode = mode;
\r
4331 stream_.nBuffers = nBuffers;
\r
4332 stream_.sampleRate = sampleRate;
\r
4334 // Setup the buffer conversion information structure.
\r
4335 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4337 // Setup the callback thread.
\r
4338 if ( stream_.callbackInfo.isRunning == false ) {
\r
4339 unsigned threadId;
\r
4340 stream_.callbackInfo.isRunning = true;
\r
4341 stream_.callbackInfo.object = (void *) this;
\r
4342 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4343 &stream_.callbackInfo, 0, &threadId );
\r
4344 if ( stream_.callbackInfo.thread == 0 ) {
\r
4345 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4349 // Boost DS thread priority
\r
4350 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4356 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4357 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4358 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4359 if ( buffer ) buffer->Release();
\r
4360 object->Release();
\r
4362 if ( handle->buffer[1] ) {
\r
4363 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4364 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4365 if ( buffer ) buffer->Release();
\r
4366 object->Release();
\r
4368 CloseHandle( handle->condition );
\r
4370 stream_.apiHandle = 0;
\r
4373 for ( int i=0; i<2; i++ ) {
\r
4374 if ( stream_.userBuffer[i] ) {
\r
4375 free( stream_.userBuffer[i] );
\r
4376 stream_.userBuffer[i] = 0;
\r
4380 if ( stream_.deviceBuffer ) {
\r
4381 free( stream_.deviceBuffer );
\r
4382 stream_.deviceBuffer = 0;
\r
4385 stream_.state = STREAM_CLOSED;
\r
4389 void RtApiDs :: closeStream()
\r
4391 if ( stream_.state == STREAM_CLOSED ) {
\r
4392 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4393 error( RtAudioError::WARNING );
\r
4397 // Stop the callback thread.
\r
4398 stream_.callbackInfo.isRunning = false;
\r
4399 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4400 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4402 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4404 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4405 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4406 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4409 buffer->Release();
\r
4411 object->Release();
\r
4413 if ( handle->buffer[1] ) {
\r
4414 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4415 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4418 buffer->Release();
\r
4420 object->Release();
\r
4422 CloseHandle( handle->condition );
\r
4424 stream_.apiHandle = 0;
\r
4427 for ( int i=0; i<2; i++ ) {
\r
4428 if ( stream_.userBuffer[i] ) {
\r
4429 free( stream_.userBuffer[i] );
\r
4430 stream_.userBuffer[i] = 0;
\r
4434 if ( stream_.deviceBuffer ) {
\r
4435 free( stream_.deviceBuffer );
\r
4436 stream_.deviceBuffer = 0;
\r
4439 stream_.mode = UNINITIALIZED;
\r
4440 stream_.state = STREAM_CLOSED;
\r
4443 void RtApiDs :: startStream()
\r
4446 if ( stream_.state == STREAM_RUNNING ) {
\r
4447 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4448 error( RtAudioError::WARNING );
\r
4452 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4454 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4455 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4456 // this is already in effect.
\r
4457 timeBeginPeriod( 1 );
\r
4459 buffersRolling = false;
\r
4460 duplexPrerollBytes = 0;
\r
4462 if ( stream_.mode == DUPLEX ) {
\r
4463 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4464 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4467 HRESULT result = 0;
\r
4468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4470 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4471 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4472 if ( FAILED( result ) ) {
\r
4473 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4474 errorText_ = errorStream_.str();
\r
4479 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4481 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4482 result = buffer->Start( DSCBSTART_LOOPING );
\r
4483 if ( FAILED( result ) ) {
\r
4484 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4485 errorText_ = errorStream_.str();
\r
4490 handle->drainCounter = 0;
\r
4491 handle->internalDrain = false;
\r
4492 ResetEvent( handle->condition );
\r
4493 stream_.state = STREAM_RUNNING;
\r
4496 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4499 void RtApiDs :: stopStream()
\r
4502 if ( stream_.state == STREAM_STOPPED ) {
\r
4503 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4504 error( RtAudioError::WARNING );
\r
4508 HRESULT result = 0;
\r
4511 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4513 if ( handle->drainCounter == 0 ) {
\r
4514 handle->drainCounter = 2;
\r
4515 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4518 stream_.state = STREAM_STOPPED;
\r
4520 // Stop the buffer and clear memory
\r
4521 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4522 result = buffer->Stop();
\r
4523 if ( FAILED( result ) ) {
\r
4524 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4525 errorText_ = errorStream_.str();
\r
4529 // Lock the buffer and clear it so that if we start to play again,
\r
4530 // we won't have old data playing.
\r
4531 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4532 if ( FAILED( result ) ) {
\r
4533 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4534 errorText_ = errorStream_.str();
\r
4538 // Zero the DS buffer
\r
4539 ZeroMemory( audioPtr, dataLen );
\r
4541 // Unlock the DS buffer
\r
4542 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4543 if ( FAILED( result ) ) {
\r
4544 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4545 errorText_ = errorStream_.str();
\r
4549 // If we start playing again, we must begin at beginning of buffer.
\r
4550 handle->bufferPointer[0] = 0;
\r
4553 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4554 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4558 stream_.state = STREAM_STOPPED;
\r
4560 result = buffer->Stop();
\r
4561 if ( FAILED( result ) ) {
\r
4562 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4563 errorText_ = errorStream_.str();
\r
4567 // Lock the buffer and clear it so that if we start to play again,
\r
4568 // we won't have old data playing.
\r
4569 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4570 if ( FAILED( result ) ) {
\r
4571 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4572 errorText_ = errorStream_.str();
\r
4576 // Zero the DS buffer
\r
4577 ZeroMemory( audioPtr, dataLen );
\r
4579 // Unlock the DS buffer
\r
4580 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4581 if ( FAILED( result ) ) {
\r
4582 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4583 errorText_ = errorStream_.str();
\r
4587 // If we start recording again, we must begin at beginning of buffer.
\r
4588 handle->bufferPointer[1] = 0;
\r
4592 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4593 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4596 void RtApiDs :: abortStream()
\r
4599 if ( stream_.state == STREAM_STOPPED ) {
\r
4600 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4601 error( RtAudioError::WARNING );
\r
4605 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4606 handle->drainCounter = 2;
\r
4611 void RtApiDs :: callbackEvent()
\r
4613 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4614 Sleep( 50 ); // sleep 50 milliseconds
\r
4618 if ( stream_.state == STREAM_CLOSED ) {
\r
4619 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4620 error( RtAudioError::WARNING );
\r
4624 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4625 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4627 // Check if we were draining the stream and signal is finished.
\r
4628 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4630 stream_.state = STREAM_STOPPING;
\r
4631 if ( handle->internalDrain == false )
\r
4632 SetEvent( handle->condition );
\r
4638 // Invoke user callback to get fresh output data UNLESS we are
\r
4639 // draining stream.
\r
4640 if ( handle->drainCounter == 0 ) {
\r
4641 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4642 double streamTime = getStreamTime();
\r
4643 RtAudioStreamStatus status = 0;
\r
4644 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4645 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4646 handle->xrun[0] = false;
\r
4648 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4649 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4650 handle->xrun[1] = false;
\r
4652 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4653 stream_.bufferSize, streamTime, status, info->userData );
\r
4654 if ( cbReturnValue == 2 ) {
\r
4655 stream_.state = STREAM_STOPPING;
\r
4656 handle->drainCounter = 2;
\r
4660 else if ( cbReturnValue == 1 ) {
\r
4661 handle->drainCounter = 1;
\r
4662 handle->internalDrain = true;
\r
4667 DWORD currentWritePointer, safeWritePointer;
\r
4668 DWORD currentReadPointer, safeReadPointer;
\r
4669 UINT nextWritePointer;
\r
4671 LPVOID buffer1 = NULL;
\r
4672 LPVOID buffer2 = NULL;
\r
4673 DWORD bufferSize1 = 0;
\r
4674 DWORD bufferSize2 = 0;
\r
4679 if ( buffersRolling == false ) {
\r
4680 if ( stream_.mode == DUPLEX ) {
\r
4681 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4683 // It takes a while for the devices to get rolling. As a result,
\r
4684 // there's no guarantee that the capture and write device pointers
\r
4685 // will move in lockstep. Wait here for both devices to start
\r
4686 // rolling, and then set our buffer pointers accordingly.
\r
4687 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4688 // bytes later than the write buffer.
\r
4690 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4691 // take place between the two GetCurrentPosition calls... but I'm
\r
4692 // really not sure how to solve the problem. Temporarily boost to
\r
4693 // Realtime priority, maybe; but I'm not sure what priority the
\r
4694 // DirectSound service threads run at. We *should* be roughly
\r
4695 // within a ms or so of correct.
\r
4697 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4698 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4700 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4702 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4703 if ( FAILED( result ) ) {
\r
4704 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4705 errorText_ = errorStream_.str();
\r
4706 error( RtAudioError::SYSTEM_ERROR );
\r
4709 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4710 if ( FAILED( result ) ) {
\r
4711 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4712 errorText_ = errorStream_.str();
\r
4713 error( RtAudioError::SYSTEM_ERROR );
\r
4717 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4718 if ( FAILED( result ) ) {
\r
4719 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4720 errorText_ = errorStream_.str();
\r
4721 error( RtAudioError::SYSTEM_ERROR );
\r
4724 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4725 if ( FAILED( result ) ) {
\r
4726 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4727 errorText_ = errorStream_.str();
\r
4728 error( RtAudioError::SYSTEM_ERROR );
\r
4731 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4735 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4737 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4738 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4739 handle->bufferPointer[1] = safeReadPointer;
\r
4741 else if ( stream_.mode == OUTPUT ) {
\r
4743 // Set the proper nextWritePosition after initial startup.
\r
4744 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4745 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4746 if ( FAILED( result ) ) {
\r
4747 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4748 errorText_ = errorStream_.str();
\r
4749 error( RtAudioError::SYSTEM_ERROR );
\r
4752 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4753 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4756 buffersRolling = true;
\r
4759 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4761 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4763 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4764 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4765 bufferBytes *= formatBytes( stream_.userFormat );
\r
4766 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4769 // Setup parameters and do buffer conversion if necessary.
\r
4770 if ( stream_.doConvertBuffer[0] ) {
\r
4771 buffer = stream_.deviceBuffer;
\r
4772 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4773 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4774 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4777 buffer = stream_.userBuffer[0];
\r
4778 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4779 bufferBytes *= formatBytes( stream_.userFormat );
\r
4782 // No byte swapping necessary in DirectSound implementation.
\r
4784 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4785 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4787 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4788 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4790 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4791 nextWritePointer = handle->bufferPointer[0];
\r
4793 DWORD endWrite, leadPointer;
\r
4795 // Find out where the read and "safe write" pointers are.
\r
4796 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4797 if ( FAILED( result ) ) {
\r
4798 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4799 errorText_ = errorStream_.str();
\r
4800 error( RtAudioError::SYSTEM_ERROR );
\r
4804 // We will copy our output buffer into the region between
\r
4805 // safeWritePointer and leadPointer. If leadPointer is not
\r
4806 // beyond the next endWrite position, wait until it is.
\r
4807 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4808 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4809 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4810 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4811 endWrite = nextWritePointer + bufferBytes;
\r
4813 // Check whether the entire write region is behind the play pointer.
\r
4814 if ( leadPointer >= endWrite ) break;
\r
4816 // If we are here, then we must wait until the leadPointer advances
\r
4817 // beyond the end of our next write region. We use the
\r
4818 // Sleep() function to suspend operation until that happens.
\r
4819 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4820 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4821 if ( millis < 1.0 ) millis = 1.0;
\r
4822 Sleep( (DWORD) millis );
\r
4825 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4826 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4827 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4828 handle->xrun[0] = true;
\r
4829 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4830 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4831 handle->bufferPointer[0] = nextWritePointer;
\r
4832 endWrite = nextWritePointer + bufferBytes;
\r
4835 // Lock free space in the buffer
\r
4836 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4837 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4838 if ( FAILED( result ) ) {
\r
4839 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4840 errorText_ = errorStream_.str();
\r
4841 error( RtAudioError::SYSTEM_ERROR );
\r
4845 // Copy our buffer into the DS buffer
\r
4846 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4847 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4849 // Update our buffer offset and unlock sound buffer
\r
4850 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4851 if ( FAILED( result ) ) {
\r
4852 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4853 errorText_ = errorStream_.str();
\r
4854 error( RtAudioError::SYSTEM_ERROR );
\r
4857 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4858 handle->bufferPointer[0] = nextWritePointer;
\r
4860 if ( handle->drainCounter ) {
\r
4861 handle->drainCounter++;
\r
4866 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4868 // Setup parameters.
\r
4869 if ( stream_.doConvertBuffer[1] ) {
\r
4870 buffer = stream_.deviceBuffer;
\r
4871 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4872 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4875 buffer = stream_.userBuffer[1];
\r
4876 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4877 bufferBytes *= formatBytes( stream_.userFormat );
\r
4880 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4881 long nextReadPointer = handle->bufferPointer[1];
\r
4882 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4884 // Find out where the write and "safe read" pointers are.
\r
4885 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4886 if ( FAILED( result ) ) {
\r
4887 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4888 errorText_ = errorStream_.str();
\r
4889 error( RtAudioError::SYSTEM_ERROR );
\r
4893 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4894 DWORD endRead = nextReadPointer + bufferBytes;
\r
4896 // Handling depends on whether we are INPUT or DUPLEX.
\r
4897 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4898 // then a wait here will drag the write pointers into the forbidden zone.
\r
4900 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4901 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4902 // practical way to sync up the read and write pointers reliably, given the
\r
4903 // the very complex relationship between phase and increment of the read and write
\r
4906 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4907 // provide a pre-roll period of 0.5 seconds in which we return
\r
4908 // zeros from the read buffer while the pointers sync up.
\r
4910 if ( stream_.mode == DUPLEX ) {
\r
4911 if ( safeReadPointer < endRead ) {
\r
4912 if ( duplexPrerollBytes <= 0 ) {
\r
4913 // Pre-roll time over. Be more agressive.
\r
4914 int adjustment = endRead-safeReadPointer;
\r
4916 handle->xrun[1] = true;
\r
4918 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4919 // and perform fine adjustments later.
\r
4920 // - small adjustments: back off by twice as much.
\r
4921 if ( adjustment >= 2*bufferBytes )
\r
4922 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4924 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4926 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4930 // In pre=roll time. Just do it.
\r
4931 nextReadPointer = safeReadPointer - bufferBytes;
\r
4932 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4934 endRead = nextReadPointer + bufferBytes;
\r
4937 else { // mode == INPUT
\r
4938 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4939 // See comments for playback.
\r
4940 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4941 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4942 if ( millis < 1.0 ) millis = 1.0;
\r
4943 Sleep( (DWORD) millis );
\r
4945 // Wake up and find out where we are now.
\r
4946 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4947 if ( FAILED( result ) ) {
\r
4948 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4949 errorText_ = errorStream_.str();
\r
4950 error( RtAudioError::SYSTEM_ERROR );
\r
4954 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4958 // Lock free space in the buffer
\r
4959 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4960 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4961 if ( FAILED( result ) ) {
\r
4962 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4963 errorText_ = errorStream_.str();
\r
4964 error( RtAudioError::SYSTEM_ERROR );
\r
4968 if ( duplexPrerollBytes <= 0 ) {
\r
4969 // Copy our buffer into the DS buffer
\r
4970 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4971 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4974 memset( buffer, 0, bufferSize1 );
\r
4975 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4976 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4979 // Update our buffer offset and unlock sound buffer
\r
4980 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4981 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4982 if ( FAILED( result ) ) {
\r
4983 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4984 errorText_ = errorStream_.str();
\r
4985 error( RtAudioError::SYSTEM_ERROR );
\r
4988 handle->bufferPointer[1] = nextReadPointer;
\r
4990 // No byte swapping necessary in DirectSound implementation.
\r
4992 // If necessary, convert 8-bit data from unsigned to signed.
\r
4993 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4994 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4996 // Do buffer conversion if necessary.
\r
4997 if ( stream_.doConvertBuffer[1] )
\r
4998 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
5002 RtApi::tickStreamTime();
\r
5005 // Definitions for utility functions and callbacks
\r
5006 // specific to the DirectSound implementation.
\r
5008 static unsigned __stdcall callbackHandler( void *ptr )
\r
5010 CallbackInfo *info = (CallbackInfo *) ptr;
\r
5011 RtApiDs *object = (RtApiDs *) info->object;
\r
5012 bool* isRunning = &info->isRunning;
\r
5014 while ( *isRunning == true ) {
\r
5015 object->callbackEvent();
\r
5018 _endthreadex( 0 );
\r
5022 #include "tchar.h"
\r
5024 static std::string convertTChar( LPCTSTR name )
\r
5026 #if defined( UNICODE ) || defined( _UNICODE )
\r
5027 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
5028 std::string s( length-1, '\0' );
\r
5029 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
5031 std::string s( name );
\r
5037 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5038 LPCTSTR description,
\r
5039 LPCTSTR /*module*/,
\r
5040 LPVOID lpContext )
\r
5042 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
5043 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
5046 bool validDevice = false;
\r
5047 if ( probeInfo.isInput == true ) {
\r
5049 LPDIRECTSOUNDCAPTURE object;
\r
5051 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5052 if ( hr != DS_OK ) return TRUE;
\r
5054 caps.dwSize = sizeof(caps);
\r
5055 hr = object->GetCaps( &caps );
\r
5056 if ( hr == DS_OK ) {
\r
5057 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5058 validDevice = true;
\r
5060 object->Release();
\r
5064 LPDIRECTSOUND object;
\r
5065 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5066 if ( hr != DS_OK ) return TRUE;
\r
5068 caps.dwSize = sizeof(caps);
\r
5069 hr = object->GetCaps( &caps );
\r
5070 if ( hr == DS_OK ) {
\r
5071 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5072 validDevice = true;
\r
5074 object->Release();
\r
5077 // If good device, then save its name and guid.
\r
5078 std::string name = convertTChar( description );
\r
5079 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5080 if ( lpguid == NULL )
\r
5081 name = "Default Device";
\r
5082 if ( validDevice ) {
\r
5083 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5084 if ( dsDevices[i].name == name ) {
\r
5085 dsDevices[i].found = true;
\r
5086 if ( probeInfo.isInput ) {
\r
5087 dsDevices[i].id[1] = lpguid;
\r
5088 dsDevices[i].validId[1] = true;
\r
5091 dsDevices[i].id[0] = lpguid;
\r
5092 dsDevices[i].validId[0] = true;
\r
5099 device.name = name;
\r
5100 device.found = true;
\r
5101 if ( probeInfo.isInput ) {
\r
5102 device.id[1] = lpguid;
\r
5103 device.validId[1] = true;
\r
5106 device.id[0] = lpguid;
\r
5107 device.validId[0] = true;
\r
5109 dsDevices.push_back( device );
\r
5115 static const char* getErrorString( int code )
\r
5119 case DSERR_ALLOCATED:
\r
5120 return "Already allocated";
\r
5122 case DSERR_CONTROLUNAVAIL:
\r
5123 return "Control unavailable";
\r
5125 case DSERR_INVALIDPARAM:
\r
5126 return "Invalid parameter";
\r
5128 case DSERR_INVALIDCALL:
\r
5129 return "Invalid call";
\r
5131 case DSERR_GENERIC:
\r
5132 return "Generic error";
\r
5134 case DSERR_PRIOLEVELNEEDED:
\r
5135 return "Priority level needed";
\r
5137 case DSERR_OUTOFMEMORY:
\r
5138 return "Out of memory";
\r
5140 case DSERR_BADFORMAT:
\r
5141 return "The sample rate or the channel format is not supported";
\r
5143 case DSERR_UNSUPPORTED:
\r
5144 return "Not supported";
\r
5146 case DSERR_NODRIVER:
\r
5147 return "No driver";
\r
5149 case DSERR_ALREADYINITIALIZED:
\r
5150 return "Already initialized";
\r
5152 case DSERR_NOAGGREGATION:
\r
5153 return "No aggregation";
\r
5155 case DSERR_BUFFERLOST:
\r
5156 return "Buffer lost";
\r
5158 case DSERR_OTHERAPPHASPRIO:
\r
5159 return "Another application already has priority";
\r
5161 case DSERR_UNINITIALIZED:
\r
5162 return "Uninitialized";
\r
5165 return "DirectSound unknown error";
\r
5168 //******************** End of __WINDOWS_DS__ *********************//
\r
5172 #if defined(__LINUX_ALSA__)
\r
5174 #include <alsa/asoundlib.h>
\r
5175 #include <unistd.h>
\r
5177 // A structure to hold various information related to the ALSA API
\r
5178 // implementation.
\r
5179 struct AlsaHandle {
\r
5180 snd_pcm_t *handles[2];
\r
5181 bool synchronized;
\r
5183 pthread_cond_t runnable_cv;
\r
5187 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5190 static void *alsaCallbackHandler( void * ptr );
\r
5192 RtApiAlsa :: RtApiAlsa()
\r
5194 // Nothing to do here.
\r
5197 RtApiAlsa :: ~RtApiAlsa()
\r
5199 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5202 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5204 unsigned nDevices = 0;
\r
5205 int result, subdevice, card;
\r
5207 snd_ctl_t *handle;
\r
5209 // Count cards and devices
\r
5211 snd_card_next( &card );
\r
5212 while ( card >= 0 ) {
\r
5213 sprintf( name, "hw:%d", card );
\r
5214 result = snd_ctl_open( &handle, name, 0 );
\r
5215 if ( result < 0 ) {
\r
5216 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5217 errorText_ = errorStream_.str();
\r
5218 error( RtAudioError::WARNING );
\r
5223 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5224 if ( result < 0 ) {
\r
5225 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5226 errorText_ = errorStream_.str();
\r
5227 error( RtAudioError::WARNING );
\r
5230 if ( subdevice < 0 )
\r
5235 snd_ctl_close( handle );
\r
5236 snd_card_next( &card );
\r
5239 result = snd_ctl_open( &handle, "default", 0 );
\r
5240 if (result == 0) {
\r
5242 snd_ctl_close( handle );
\r
5248 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5250 RtAudio::DeviceInfo info;
\r
5251 info.probed = false;
\r
5253 unsigned nDevices = 0;
\r
5254 int result, subdevice, card;
\r
5256 snd_ctl_t *chandle;
\r
5258 // Count cards and devices
\r
5260 snd_card_next( &card );
\r
5261 while ( card >= 0 ) {
\r
5262 sprintf( name, "hw:%d", card );
\r
5263 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5264 if ( result < 0 ) {
\r
5265 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5266 errorText_ = errorStream_.str();
\r
5267 error( RtAudioError::WARNING );
\r
5272 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5273 if ( result < 0 ) {
\r
5274 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5275 errorText_ = errorStream_.str();
\r
5276 error( RtAudioError::WARNING );
\r
5279 if ( subdevice < 0 ) break;
\r
5280 if ( nDevices == device ) {
\r
5281 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5287 snd_ctl_close( chandle );
\r
5288 snd_card_next( &card );
\r
5291 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5292 if ( result == 0 ) {
\r
5293 if ( nDevices == device ) {
\r
5294 strcpy( name, "default" );
\r
5300 if ( nDevices == 0 ) {
\r
5301 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5302 error( RtAudioError::INVALID_USE );
\r
5306 if ( device >= nDevices ) {
\r
5307 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5308 error( RtAudioError::INVALID_USE );
\r
5314 // If a stream is already open, we cannot probe the stream devices.
\r
5315 // Thus, use the saved results.
\r
5316 if ( stream_.state != STREAM_CLOSED &&
\r
5317 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5318 snd_ctl_close( chandle );
\r
5319 if ( device >= devices_.size() ) {
\r
5320 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5321 error( RtAudioError::WARNING );
\r
5324 return devices_[ device ];
\r
5327 int openMode = SND_PCM_ASYNC;
\r
5328 snd_pcm_stream_t stream;
\r
5329 snd_pcm_info_t *pcminfo;
\r
5330 snd_pcm_info_alloca( &pcminfo );
\r
5331 snd_pcm_t *phandle;
\r
5332 snd_pcm_hw_params_t *params;
\r
5333 snd_pcm_hw_params_alloca( ¶ms );
\r
5335 // First try for playback unless default device (which has subdev -1)
\r
5336 stream = SND_PCM_STREAM_PLAYBACK;
\r
5337 snd_pcm_info_set_stream( pcminfo, stream );
\r
5338 if ( subdevice != -1 ) {
\r
5339 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5340 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5342 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5343 if ( result < 0 ) {
\r
5344 // Device probably doesn't support playback.
\r
5345 goto captureProbe;
\r
5349 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5350 if ( result < 0 ) {
\r
5351 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5352 errorText_ = errorStream_.str();
\r
5353 error( RtAudioError::WARNING );
\r
5354 goto captureProbe;
\r
5357 // The device is open ... fill the parameter structure.
\r
5358 result = snd_pcm_hw_params_any( phandle, params );
\r
5359 if ( result < 0 ) {
\r
5360 snd_pcm_close( phandle );
\r
5361 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5362 errorText_ = errorStream_.str();
\r
5363 error( RtAudioError::WARNING );
\r
5364 goto captureProbe;
\r
5367 // Get output channel information.
\r
5368 unsigned int value;
\r
5369 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5370 if ( result < 0 ) {
\r
5371 snd_pcm_close( phandle );
\r
5372 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5373 errorText_ = errorStream_.str();
\r
5374 error( RtAudioError::WARNING );
\r
5375 goto captureProbe;
\r
5377 info.outputChannels = value;
\r
5378 snd_pcm_close( phandle );
\r
5381 stream = SND_PCM_STREAM_CAPTURE;
\r
5382 snd_pcm_info_set_stream( pcminfo, stream );
\r
5384 // Now try for capture unless default device (with subdev = -1)
\r
5385 if ( subdevice != -1 ) {
\r
5386 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5387 snd_ctl_close( chandle );
\r
5388 if ( result < 0 ) {
\r
5389 // Device probably doesn't support capture.
\r
5390 if ( info.outputChannels == 0 ) return info;
\r
5391 goto probeParameters;
\r
5395 snd_ctl_close( chandle );
\r
5397 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5398 if ( result < 0 ) {
\r
5399 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5400 errorText_ = errorStream_.str();
\r
5401 error( RtAudioError::WARNING );
\r
5402 if ( info.outputChannels == 0 ) return info;
\r
5403 goto probeParameters;
\r
5406 // The device is open ... fill the parameter structure.
\r
5407 result = snd_pcm_hw_params_any( phandle, params );
\r
5408 if ( result < 0 ) {
\r
5409 snd_pcm_close( phandle );
\r
5410 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5411 errorText_ = errorStream_.str();
\r
5412 error( RtAudioError::WARNING );
\r
5413 if ( info.outputChannels == 0 ) return info;
\r
5414 goto probeParameters;
\r
5417 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5418 if ( result < 0 ) {
\r
5419 snd_pcm_close( phandle );
\r
5420 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5421 errorText_ = errorStream_.str();
\r
5422 error( RtAudioError::WARNING );
\r
5423 if ( info.outputChannels == 0 ) return info;
\r
5424 goto probeParameters;
\r
5426 info.inputChannels = value;
\r
5427 snd_pcm_close( phandle );
\r
5429 // If device opens for both playback and capture, we determine the channels.
\r
5430 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5431 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5433 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5434 if ( device == 0 && info.outputChannels > 0 )
\r
5435 info.isDefaultOutput = true;
\r
5436 if ( device == 0 && info.inputChannels > 0 )
\r
5437 info.isDefaultInput = true;
\r
5440 // At this point, we just need to figure out the supported data
\r
5441 // formats and sample rates. We'll proceed by opening the device in
\r
5442 // the direction with the maximum number of channels, or playback if
\r
5443 // they are equal. This might limit our sample rate options, but so
\r
5446 if ( info.outputChannels >= info.inputChannels )
\r
5447 stream = SND_PCM_STREAM_PLAYBACK;
\r
5449 stream = SND_PCM_STREAM_CAPTURE;
\r
5450 snd_pcm_info_set_stream( pcminfo, stream );
\r
5452 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5453 if ( result < 0 ) {
\r
5454 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5455 errorText_ = errorStream_.str();
\r
5456 error( RtAudioError::WARNING );
\r
5460 // The device is open ... fill the parameter structure.
\r
5461 result = snd_pcm_hw_params_any( phandle, params );
\r
5462 if ( result < 0 ) {
\r
5463 snd_pcm_close( phandle );
\r
5464 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5465 errorText_ = errorStream_.str();
\r
5466 error( RtAudioError::WARNING );
\r
5470 // Test our discrete set of sample rate values.
\r
5471 info.sampleRates.clear();
\r
5472 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5473 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5474 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5476 if ( info.sampleRates.size() == 0 ) {
\r
5477 snd_pcm_close( phandle );
\r
5478 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5479 errorText_ = errorStream_.str();
\r
5480 error( RtAudioError::WARNING );
\r
5484 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5485 snd_pcm_format_t format;
\r
5486 info.nativeFormats = 0;
\r
5487 format = SND_PCM_FORMAT_S8;
\r
5488 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5489 info.nativeFormats |= RTAUDIO_SINT8;
\r
5490 format = SND_PCM_FORMAT_S16;
\r
5491 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5492 info.nativeFormats |= RTAUDIO_SINT16;
\r
5493 format = SND_PCM_FORMAT_S24;
\r
5494 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5495 info.nativeFormats |= RTAUDIO_SINT24;
\r
5496 format = SND_PCM_FORMAT_S32;
\r
5497 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5498 info.nativeFormats |= RTAUDIO_SINT32;
\r
5499 format = SND_PCM_FORMAT_FLOAT;
\r
5500 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5501 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5502 format = SND_PCM_FORMAT_FLOAT64;
\r
5503 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5504 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5506 // Check that we have at least one supported format
\r
5507 if ( info.nativeFormats == 0 ) {
\r
5508 snd_pcm_close( phandle );
\r
5509 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5510 errorText_ = errorStream_.str();
\r
5511 error( RtAudioError::WARNING );
\r
5515 // Get the device name
\r
5517 result = snd_card_get_name( card, &cardname );
\r
5518 if ( result >= 0 ) {
\r
5519 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5524 // That's all ... close the device and return
\r
5525 snd_pcm_close( phandle );
\r
5526 info.probed = true;
\r
5530 void RtApiAlsa :: saveDeviceInfo( void )
\r
5534 unsigned int nDevices = getDeviceCount();
\r
5535 devices_.resize( nDevices );
\r
5536 for ( unsigned int i=0; i<nDevices; i++ )
\r
5537 devices_[i] = getDeviceInfo( i );
\r
5540 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5541 unsigned int firstChannel, unsigned int sampleRate,
\r
5542 RtAudioFormat format, unsigned int *bufferSize,
\r
5543 RtAudio::StreamOptions *options )
\r
5546 #if defined(__RTAUDIO_DEBUG__)
\r
5547 snd_output_t *out;
\r
5548 snd_output_stdio_attach(&out, stderr, 0);
\r
5551 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5553 unsigned nDevices = 0;
\r
5554 int result, subdevice, card;
\r
5556 snd_ctl_t *chandle;
\r
5558 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5559 snprintf(name, sizeof(name), "%s", "default");
\r
5561 // Count cards and devices
\r
5563 snd_card_next( &card );
\r
5564 while ( card >= 0 ) {
\r
5565 sprintf( name, "hw:%d", card );
\r
5566 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5567 if ( result < 0 ) {
\r
5568 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5569 errorText_ = errorStream_.str();
\r
5574 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5575 if ( result < 0 ) break;
\r
5576 if ( subdevice < 0 ) break;
\r
5577 if ( nDevices == device ) {
\r
5578 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5579 snd_ctl_close( chandle );
\r
5584 snd_ctl_close( chandle );
\r
5585 snd_card_next( &card );
\r
5588 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5589 if ( result == 0 ) {
\r
5590 if ( nDevices == device ) {
\r
5591 strcpy( name, "default" );
\r
5597 if ( nDevices == 0 ) {
\r
5598 // This should not happen because a check is made before this function is called.
\r
5599 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5603 if ( device >= nDevices ) {
\r
5604 // This should not happen because a check is made before this function is called.
\r
5605 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5612 // The getDeviceInfo() function will not work for a device that is
\r
5613 // already open. Thus, we'll probe the system before opening a
\r
5614 // stream and save the results for use by getDeviceInfo().
\r
5615 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5616 this->saveDeviceInfo();
\r
5618 snd_pcm_stream_t stream;
\r
5619 if ( mode == OUTPUT )
\r
5620 stream = SND_PCM_STREAM_PLAYBACK;
\r
5622 stream = SND_PCM_STREAM_CAPTURE;
\r
5624 snd_pcm_t *phandle;
\r
5625 int openMode = SND_PCM_ASYNC;
\r
5626 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5627 if ( result < 0 ) {
\r
5628 if ( mode == OUTPUT )
\r
5629 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5631 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5632 errorText_ = errorStream_.str();
\r
5636 // Fill the parameter structure.
\r
5637 snd_pcm_hw_params_t *hw_params;
\r
5638 snd_pcm_hw_params_alloca( &hw_params );
\r
5639 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5640 if ( result < 0 ) {
\r
5641 snd_pcm_close( phandle );
\r
5642 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5643 errorText_ = errorStream_.str();
\r
5647 #if defined(__RTAUDIO_DEBUG__)
\r
5648 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5649 snd_pcm_hw_params_dump( hw_params, out );
\r
5652 // Set access ... check user preference.
\r
5653 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5654 stream_.userInterleaved = false;
\r
5655 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5656 if ( result < 0 ) {
\r
5657 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5658 stream_.deviceInterleaved[mode] = true;
\r
5661 stream_.deviceInterleaved[mode] = false;
\r
5664 stream_.userInterleaved = true;
\r
5665 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5666 if ( result < 0 ) {
\r
5667 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5668 stream_.deviceInterleaved[mode] = false;
\r
5671 stream_.deviceInterleaved[mode] = true;
\r
5674 if ( result < 0 ) {
\r
5675 snd_pcm_close( phandle );
\r
5676 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5677 errorText_ = errorStream_.str();
\r
5681 // Determine how to set the device format.
\r
5682 stream_.userFormat = format;
\r
5683 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5685 if ( format == RTAUDIO_SINT8 )
\r
5686 deviceFormat = SND_PCM_FORMAT_S8;
\r
5687 else if ( format == RTAUDIO_SINT16 )
\r
5688 deviceFormat = SND_PCM_FORMAT_S16;
\r
5689 else if ( format == RTAUDIO_SINT24 )
\r
5690 deviceFormat = SND_PCM_FORMAT_S24;
\r
5691 else if ( format == RTAUDIO_SINT32 )
\r
5692 deviceFormat = SND_PCM_FORMAT_S32;
\r
5693 else if ( format == RTAUDIO_FLOAT32 )
\r
5694 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5695 else if ( format == RTAUDIO_FLOAT64 )
\r
5696 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5698 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5699 stream_.deviceFormat[mode] = format;
\r
5703 // The user requested format is not natively supported by the device.
\r
5704 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5705 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5706 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5710 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5711 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5712 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5716 deviceFormat = SND_PCM_FORMAT_S32;
\r
5717 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5718 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5722 deviceFormat = SND_PCM_FORMAT_S24;
\r
5723 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5724 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5728 deviceFormat = SND_PCM_FORMAT_S16;
\r
5729 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5730 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5734 deviceFormat = SND_PCM_FORMAT_S8;
\r
5735 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5736 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5740 // If we get here, no supported format was found.
\r
5741 snd_pcm_close( phandle );
\r
5742 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5743 errorText_ = errorStream_.str();
\r
5747 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5748 if ( result < 0 ) {
\r
5749 snd_pcm_close( phandle );
\r
5750 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5751 errorText_ = errorStream_.str();
\r
5755 // Determine whether byte-swaping is necessary.
\r
5756 stream_.doByteSwap[mode] = false;
\r
5757 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5758 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5759 if ( result == 0 )
\r
5760 stream_.doByteSwap[mode] = true;
\r
5761 else if (result < 0) {
\r
5762 snd_pcm_close( phandle );
\r
5763 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5764 errorText_ = errorStream_.str();
\r
5769 // Set the sample rate.
\r
5770 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5771 if ( result < 0 ) {
\r
5772 snd_pcm_close( phandle );
\r
5773 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5774 errorText_ = errorStream_.str();
\r
5778 // Determine the number of channels for this device. We support a possible
\r
5779 // minimum device channel number > than the value requested by the user.
\r
5780 stream_.nUserChannels[mode] = channels;
\r
5781 unsigned int value;
\r
5782 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5783 unsigned int deviceChannels = value;
\r
5784 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5785 snd_pcm_close( phandle );
\r
5786 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5787 errorText_ = errorStream_.str();
\r
5791 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5792 if ( result < 0 ) {
\r
5793 snd_pcm_close( phandle );
\r
5794 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5795 errorText_ = errorStream_.str();
\r
5798 deviceChannels = value;
\r
5799 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5800 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5802 // Set the device channels.
\r
5803 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5804 if ( result < 0 ) {
\r
5805 snd_pcm_close( phandle );
\r
5806 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5807 errorText_ = errorStream_.str();
\r
5811 // Set the buffer (or period) size.
\r
5813 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5814 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5815 if ( result < 0 ) {
\r
5816 snd_pcm_close( phandle );
\r
5817 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5818 errorText_ = errorStream_.str();
\r
5821 *bufferSize = periodSize;
\r
5823 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5824 unsigned int periods = 0;
\r
5825 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5826 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5827 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5828 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5829 if ( result < 0 ) {
\r
5830 snd_pcm_close( phandle );
\r
5831 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5832 errorText_ = errorStream_.str();
\r
5836 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5837 // MUST be the same in both directions!
\r
5838 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5839 snd_pcm_close( phandle );
\r
5840 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5841 errorText_ = errorStream_.str();
\r
5845 stream_.bufferSize = *bufferSize;
\r
5847 // Install the hardware configuration
\r
5848 result = snd_pcm_hw_params( phandle, hw_params );
\r
5849 if ( result < 0 ) {
\r
5850 snd_pcm_close( phandle );
\r
5851 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5852 errorText_ = errorStream_.str();
\r
5856 #if defined(__RTAUDIO_DEBUG__)
\r
5857 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5858 snd_pcm_hw_params_dump( hw_params, out );
\r
5861 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5862 snd_pcm_sw_params_t *sw_params = NULL;
\r
5863 snd_pcm_sw_params_alloca( &sw_params );
\r
5864 snd_pcm_sw_params_current( phandle, sw_params );
\r
5865 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5866 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5867 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5869 // The following two settings were suggested by Theo Veenker
\r
5870 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5871 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5873 // here are two options for a fix
\r
5874 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5875 snd_pcm_uframes_t val;
\r
5876 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5877 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5879 result = snd_pcm_sw_params( phandle, sw_params );
\r
5880 if ( result < 0 ) {
\r
5881 snd_pcm_close( phandle );
\r
5882 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5883 errorText_ = errorStream_.str();
\r
5887 #if defined(__RTAUDIO_DEBUG__)
\r
5888 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5889 snd_pcm_sw_params_dump( sw_params, out );
\r
5892 // Set flags for buffer conversion
\r
5893 stream_.doConvertBuffer[mode] = false;
\r
5894 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5895 stream_.doConvertBuffer[mode] = true;
\r
5896 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5897 stream_.doConvertBuffer[mode] = true;
\r
5898 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5899 stream_.nUserChannels[mode] > 1 )
\r
5900 stream_.doConvertBuffer[mode] = true;
\r
5902 // Allocate the ApiHandle if necessary and then save.
\r
5903 AlsaHandle *apiInfo = 0;
\r
5904 if ( stream_.apiHandle == 0 ) {
\r
5906 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5908 catch ( std::bad_alloc& ) {
\r
5909 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5913 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5914 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5918 stream_.apiHandle = (void *) apiInfo;
\r
5919 apiInfo->handles[0] = 0;
\r
5920 apiInfo->handles[1] = 0;
\r
5923 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5925 apiInfo->handles[mode] = phandle;
\r
5928 // Allocate necessary internal buffers.
\r
5929 unsigned long bufferBytes;
\r
5930 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5931 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5932 if ( stream_.userBuffer[mode] == NULL ) {
\r
5933 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5937 if ( stream_.doConvertBuffer[mode] ) {
\r
5939 bool makeBuffer = true;
\r
5940 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5941 if ( mode == INPUT ) {
\r
5942 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5943 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5944 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5948 if ( makeBuffer ) {
\r
5949 bufferBytes *= *bufferSize;
\r
5950 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5951 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5952 if ( stream_.deviceBuffer == NULL ) {
\r
5953 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5959 stream_.sampleRate = sampleRate;
\r
5960 stream_.nBuffers = periods;
\r
5961 stream_.device[mode] = device;
\r
5962 stream_.state = STREAM_STOPPED;
\r
5964 // Setup the buffer conversion information structure.
\r
5965 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5967 // Setup thread if necessary.
\r
5968 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5969 // We had already set up an output stream.
\r
5970 stream_.mode = DUPLEX;
\r
5971 // Link the streams if possible.
\r
5972 apiInfo->synchronized = false;
\r
5973 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5974 apiInfo->synchronized = true;
\r
5976 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5977 error( RtAudioError::WARNING );
\r
5981 stream_.mode = mode;
\r
5983 // Setup callback thread.
\r
5984 stream_.callbackInfo.object = (void *) this;
\r
5986 // Set the thread attributes for joinable and realtime scheduling
\r
5987 // priority (optional). The higher priority will only take affect
\r
5988 // if the program is run as root or suid. Note, under Linux
\r
5989 // processes with CAP_SYS_NICE privilege, a user can change
\r
5990 // scheduling policy and priority (thus need not be root). See
\r
5991 // POSIX "capabilities".
\r
5992 pthread_attr_t attr;
\r
5993 pthread_attr_init( &attr );
\r
5994 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5996 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5997 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5998 // We previously attempted to increase the audio callback priority
\r
5999 // to SCHED_RR here via the attributes. However, while no errors
\r
6000 // were reported in doing so, it did not work. So, now this is
\r
6001 // done in the alsaCallbackHandler function.
\r
6002 stream_.callbackInfo.doRealtime = true;
\r
6003 int priority = options->priority;
\r
6004 int min = sched_get_priority_min( SCHED_RR );
\r
6005 int max = sched_get_priority_max( SCHED_RR );
\r
6006 if ( priority < min ) priority = min;
\r
6007 else if ( priority > max ) priority = max;
\r
6008 stream_.callbackInfo.priority = priority;
\r
6012 stream_.callbackInfo.isRunning = true;
\r
6013 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
6014 pthread_attr_destroy( &attr );
\r
6016 stream_.callbackInfo.isRunning = false;
\r
6017 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
6026 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6027 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6028 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6030 stream_.apiHandle = 0;
\r
6033 if ( phandle) snd_pcm_close( phandle );
\r
6035 for ( int i=0; i<2; i++ ) {
\r
6036 if ( stream_.userBuffer[i] ) {
\r
6037 free( stream_.userBuffer[i] );
\r
6038 stream_.userBuffer[i] = 0;
\r
6042 if ( stream_.deviceBuffer ) {
\r
6043 free( stream_.deviceBuffer );
\r
6044 stream_.deviceBuffer = 0;
\r
6047 stream_.state = STREAM_CLOSED;
\r
6051 void RtApiAlsa :: closeStream()
\r
6053 if ( stream_.state == STREAM_CLOSED ) {
\r
6054 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6055 error( RtAudioError::WARNING );
\r
6059 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6060 stream_.callbackInfo.isRunning = false;
\r
6061 MUTEX_LOCK( &stream_.mutex );
\r
6062 if ( stream_.state == STREAM_STOPPED ) {
\r
6063 apiInfo->runnable = true;
\r
6064 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6066 MUTEX_UNLOCK( &stream_.mutex );
\r
6067 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6069 if ( stream_.state == STREAM_RUNNING ) {
\r
6070 stream_.state = STREAM_STOPPED;
\r
6071 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6072 snd_pcm_drop( apiInfo->handles[0] );
\r
6073 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6074 snd_pcm_drop( apiInfo->handles[1] );
\r
6078 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6079 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6080 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6082 stream_.apiHandle = 0;
\r
6085 for ( int i=0; i<2; i++ ) {
\r
6086 if ( stream_.userBuffer[i] ) {
\r
6087 free( stream_.userBuffer[i] );
\r
6088 stream_.userBuffer[i] = 0;
\r
6092 if ( stream_.deviceBuffer ) {
\r
6093 free( stream_.deviceBuffer );
\r
6094 stream_.deviceBuffer = 0;
\r
6097 stream_.mode = UNINITIALIZED;
\r
6098 stream_.state = STREAM_CLOSED;
\r
6101 void RtApiAlsa :: startStream()
\r
6103 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6106 if ( stream_.state == STREAM_RUNNING ) {
\r
6107 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6108 error( RtAudioError::WARNING );
\r
6112 MUTEX_LOCK( &stream_.mutex );
\r
6115 snd_pcm_state_t state;
\r
6116 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6117 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6118 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6119 state = snd_pcm_state( handle[0] );
\r
6120 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6121 result = snd_pcm_prepare( handle[0] );
\r
6122 if ( result < 0 ) {
\r
6123 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6124 errorText_ = errorStream_.str();
\r
6130 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6131 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
6132 state = snd_pcm_state( handle[1] );
\r
6133 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6134 result = snd_pcm_prepare( handle[1] );
\r
6135 if ( result < 0 ) {
\r
6136 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6137 errorText_ = errorStream_.str();
\r
6143 stream_.state = STREAM_RUNNING;
\r
6146 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6147 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6148 MUTEX_UNLOCK( &stream_.mutex );
\r
6150 if ( result >= 0 ) return;
\r
6151 error( RtAudioError::SYSTEM_ERROR );
\r
6154 void RtApiAlsa :: stopStream()
\r
6157 if ( stream_.state == STREAM_STOPPED ) {
\r
6158 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6159 error( RtAudioError::WARNING );
\r
6163 stream_.state = STREAM_STOPPED;
\r
6164 MUTEX_LOCK( &stream_.mutex );
\r
6167 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6168 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6169 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6170 if ( apiInfo->synchronized )
\r
6171 result = snd_pcm_drop( handle[0] );
\r
6173 result = snd_pcm_drain( handle[0] );
\r
6174 if ( result < 0 ) {
\r
6175 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6176 errorText_ = errorStream_.str();
\r
6181 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6182 result = snd_pcm_drop( handle[1] );
\r
6183 if ( result < 0 ) {
\r
6184 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6185 errorText_ = errorStream_.str();
\r
6191 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6192 MUTEX_UNLOCK( &stream_.mutex );
\r
6194 if ( result >= 0 ) return;
\r
6195 error( RtAudioError::SYSTEM_ERROR );
\r
6198 void RtApiAlsa :: abortStream()
\r
6201 if ( stream_.state == STREAM_STOPPED ) {
\r
6202 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6203 error( RtAudioError::WARNING );
\r
6207 stream_.state = STREAM_STOPPED;
\r
6208 MUTEX_LOCK( &stream_.mutex );
\r
6211 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6212 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6213 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6214 result = snd_pcm_drop( handle[0] );
\r
6215 if ( result < 0 ) {
\r
6216 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6217 errorText_ = errorStream_.str();
\r
6222 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6223 result = snd_pcm_drop( handle[1] );
\r
6224 if ( result < 0 ) {
\r
6225 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6226 errorText_ = errorStream_.str();
\r
6232 MUTEX_UNLOCK( &stream_.mutex );
\r
6234 if ( result >= 0 ) return;
\r
6235 error( RtAudioError::SYSTEM_ERROR );
\r
6238 void RtApiAlsa :: callbackEvent()
\r
6240 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6241 if ( stream_.state == STREAM_STOPPED ) {
\r
6242 MUTEX_LOCK( &stream_.mutex );
\r
6243 while ( !apiInfo->runnable )
\r
6244 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6246 if ( stream_.state != STREAM_RUNNING ) {
\r
6247 MUTEX_UNLOCK( &stream_.mutex );
\r
6250 MUTEX_UNLOCK( &stream_.mutex );
\r
6253 if ( stream_.state == STREAM_CLOSED ) {
\r
6254 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6255 error( RtAudioError::WARNING );
\r
6259 int doStopStream = 0;
\r
6260 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6261 double streamTime = getStreamTime();
\r
6262 RtAudioStreamStatus status = 0;
\r
6263 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6264 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6265 apiInfo->xrun[0] = false;
\r
6267 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6268 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6269 apiInfo->xrun[1] = false;
\r
6271 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6272 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6274 if ( doStopStream == 2 ) {
\r
6279 MUTEX_LOCK( &stream_.mutex );
\r
6281 // The state might change while waiting on a mutex.
\r
6282 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6287 snd_pcm_t **handle;
\r
6288 snd_pcm_sframes_t frames;
\r
6289 RtAudioFormat format;
\r
6290 handle = (snd_pcm_t **) apiInfo->handles;
\r
6292 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6294 // Setup parameters.
\r
6295 if ( stream_.doConvertBuffer[1] ) {
\r
6296 buffer = stream_.deviceBuffer;
\r
6297 channels = stream_.nDeviceChannels[1];
\r
6298 format = stream_.deviceFormat[1];
\r
6301 buffer = stream_.userBuffer[1];
\r
6302 channels = stream_.nUserChannels[1];
\r
6303 format = stream_.userFormat;
\r
6306 // Read samples from device in interleaved/non-interleaved format.
\r
6307 if ( stream_.deviceInterleaved[1] )
\r
6308 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6310 void *bufs[channels];
\r
6311 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6312 for ( int i=0; i<channels; i++ )
\r
6313 bufs[i] = (void *) (buffer + (i * offset));
\r
6314 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6317 if ( result < (int) stream_.bufferSize ) {
\r
6318 // Either an error or overrun occured.
\r
6319 if ( result == -EPIPE ) {
\r
6320 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6321 if ( state == SND_PCM_STATE_XRUN ) {
\r
6322 apiInfo->xrun[1] = true;
\r
6323 result = snd_pcm_prepare( handle[1] );
\r
6324 if ( result < 0 ) {
\r
6325 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6326 errorText_ = errorStream_.str();
\r
6330 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6331 errorText_ = errorStream_.str();
\r
6335 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6336 errorText_ = errorStream_.str();
\r
6338 error( RtAudioError::WARNING );
\r
6342 // Do byte swapping if necessary.
\r
6343 if ( stream_.doByteSwap[1] )
\r
6344 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6346 // Do buffer conversion if necessary.
\r
6347 if ( stream_.doConvertBuffer[1] )
\r
6348 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6350 // Check stream latency
\r
6351 result = snd_pcm_delay( handle[1], &frames );
\r
6352 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6357 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6359 // Setup parameters and do buffer conversion if necessary.
\r
6360 if ( stream_.doConvertBuffer[0] ) {
\r
6361 buffer = stream_.deviceBuffer;
\r
6362 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6363 channels = stream_.nDeviceChannels[0];
\r
6364 format = stream_.deviceFormat[0];
\r
6367 buffer = stream_.userBuffer[0];
\r
6368 channels = stream_.nUserChannels[0];
\r
6369 format = stream_.userFormat;
\r
6372 // Do byte swapping if necessary.
\r
6373 if ( stream_.doByteSwap[0] )
\r
6374 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6376 // Write samples to device in interleaved/non-interleaved format.
\r
6377 if ( stream_.deviceInterleaved[0] )
\r
6378 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6380 void *bufs[channels];
\r
6381 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6382 for ( int i=0; i<channels; i++ )
\r
6383 bufs[i] = (void *) (buffer + (i * offset));
\r
6384 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6387 if ( result < (int) stream_.bufferSize ) {
\r
6388 // Either an error or underrun occured.
\r
6389 if ( result == -EPIPE ) {
\r
6390 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6391 if ( state == SND_PCM_STATE_XRUN ) {
\r
6392 apiInfo->xrun[0] = true;
\r
6393 result = snd_pcm_prepare( handle[0] );
\r
6394 if ( result < 0 ) {
\r
6395 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6396 errorText_ = errorStream_.str();
\r
6400 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6401 errorText_ = errorStream_.str();
\r
6405 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6406 errorText_ = errorStream_.str();
\r
6408 error( RtAudioError::WARNING );
\r
6412 // Check stream latency
\r
6413 result = snd_pcm_delay( handle[0], &frames );
\r
6414 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6418 MUTEX_UNLOCK( &stream_.mutex );
\r
6420 RtApi::tickStreamTime();
\r
6421 if ( doStopStream == 1 ) this->stopStream();
\r
6424 static void *alsaCallbackHandler( void *ptr )
\r
6426 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6427 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6428 bool *isRunning = &info->isRunning;
\r
6430 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6431 if ( &info->doRealtime ) {
\r
6432 pthread_t tID = pthread_self(); // ID of this thread
\r
6433 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6434 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6438 while ( *isRunning == true ) {
\r
6439 pthread_testcancel();
\r
6440 object->callbackEvent();
\r
6443 pthread_exit( NULL );
\r
6446 //******************** End of __LINUX_ALSA__ *********************//
\r
6449 #if defined(__LINUX_PULSE__)
\r
6451 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6452 // and Tristan Matthews.
\r
6454 #include <pulse/error.h>
\r
6455 #include <pulse/simple.h>
\r
6458 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6459 44100, 48000, 96000, 0};
\r
6461 struct rtaudio_pa_format_mapping_t {
\r
6462 RtAudioFormat rtaudio_format;
\r
6463 pa_sample_format_t pa_format;
\r
6466 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6467 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6468 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6469 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6470 {0, PA_SAMPLE_INVALID}};
\r
6472 struct PulseAudioHandle {
\r
6473 pa_simple *s_play;
\r
6476 pthread_cond_t runnable_cv;
\r
6478 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6481 RtApiPulse::~RtApiPulse()
\r
6483 if ( stream_.state != STREAM_CLOSED )
\r
6487 unsigned int RtApiPulse::getDeviceCount( void )
\r
6492 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6494 RtAudio::DeviceInfo info;
\r
6495 info.probed = true;
\r
6496 info.name = "PulseAudio";
\r
6497 info.outputChannels = 2;
\r
6498 info.inputChannels = 2;
\r
6499 info.duplexChannels = 2;
\r
6500 info.isDefaultOutput = true;
\r
6501 info.isDefaultInput = true;
\r
6503 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6504 info.sampleRates.push_back( *sr );
\r
6506 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6511 static void *pulseaudio_callback( void * user )
\r
6513 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6514 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6515 volatile bool *isRunning = &cbi->isRunning;
\r
6517 while ( *isRunning ) {
\r
6518 pthread_testcancel();
\r
6519 context->callbackEvent();
\r
6522 pthread_exit( NULL );
\r
6525 void RtApiPulse::closeStream( void )
\r
6527 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6529 stream_.callbackInfo.isRunning = false;
\r
6531 MUTEX_LOCK( &stream_.mutex );
\r
6532 if ( stream_.state == STREAM_STOPPED ) {
\r
6533 pah->runnable = true;
\r
6534 pthread_cond_signal( &pah->runnable_cv );
\r
6536 MUTEX_UNLOCK( &stream_.mutex );
\r
6538 pthread_join( pah->thread, 0 );
\r
6539 if ( pah->s_play ) {
\r
6540 pa_simple_flush( pah->s_play, NULL );
\r
6541 pa_simple_free( pah->s_play );
\r
6544 pa_simple_free( pah->s_rec );
\r
6546 pthread_cond_destroy( &pah->runnable_cv );
\r
6548 stream_.apiHandle = 0;
\r
6551 if ( stream_.userBuffer[0] ) {
\r
6552 free( stream_.userBuffer[0] );
\r
6553 stream_.userBuffer[0] = 0;
\r
6555 if ( stream_.userBuffer[1] ) {
\r
6556 free( stream_.userBuffer[1] );
\r
6557 stream_.userBuffer[1] = 0;
\r
6560 stream_.state = STREAM_CLOSED;
\r
6561 stream_.mode = UNINITIALIZED;
\r
6564 void RtApiPulse::callbackEvent( void )
\r
6566 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6568 if ( stream_.state == STREAM_STOPPED ) {
\r
6569 MUTEX_LOCK( &stream_.mutex );
\r
6570 while ( !pah->runnable )
\r
6571 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6573 if ( stream_.state != STREAM_RUNNING ) {
\r
6574 MUTEX_UNLOCK( &stream_.mutex );
\r
6577 MUTEX_UNLOCK( &stream_.mutex );
\r
6580 if ( stream_.state == STREAM_CLOSED ) {
\r
6581 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6582 "this shouldn't happen!";
\r
6583 error( RtAudioError::WARNING );
\r
6587 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6588 double streamTime = getStreamTime();
\r
6589 RtAudioStreamStatus status = 0;
\r
6590 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6591 stream_.bufferSize, streamTime, status,
\r
6592 stream_.callbackInfo.userData );
\r
6594 if ( doStopStream == 2 ) {
\r
6599 MUTEX_LOCK( &stream_.mutex );
\r
6600 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6601 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6603 if ( stream_.state != STREAM_RUNNING )
\r
6608 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6609 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6610 convertBuffer( stream_.deviceBuffer,
\r
6611 stream_.userBuffer[OUTPUT],
\r
6612 stream_.convertInfo[OUTPUT] );
\r
6613 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6614 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6616 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6617 formatBytes( stream_.userFormat );
\r
6619 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6620 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6621 pa_strerror( pa_error ) << ".";
\r
6622 errorText_ = errorStream_.str();
\r
6623 error( RtAudioError::WARNING );
\r
6627 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6628 if ( stream_.doConvertBuffer[INPUT] )
\r
6629 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6630 formatBytes( stream_.deviceFormat[INPUT] );
\r
6632 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6633 formatBytes( stream_.userFormat );
\r
6635 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6636 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6637 pa_strerror( pa_error ) << ".";
\r
6638 errorText_ = errorStream_.str();
\r
6639 error( RtAudioError::WARNING );
\r
6641 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6642 convertBuffer( stream_.userBuffer[INPUT],
\r
6643 stream_.deviceBuffer,
\r
6644 stream_.convertInfo[INPUT] );
\r
6649 MUTEX_UNLOCK( &stream_.mutex );
\r
6650 RtApi::tickStreamTime();
\r
6652 if ( doStopStream == 1 )
\r
6656 void RtApiPulse::startStream( void )
\r
6658 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6660 if ( stream_.state == STREAM_CLOSED ) {
\r
6661 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6662 error( RtAudioError::INVALID_USE );
\r
6665 if ( stream_.state == STREAM_RUNNING ) {
\r
6666 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6667 error( RtAudioError::WARNING );
\r
6671 MUTEX_LOCK( &stream_.mutex );
\r
6673 stream_.state = STREAM_RUNNING;
\r
6675 pah->runnable = true;
\r
6676 pthread_cond_signal( &pah->runnable_cv );
\r
6677 MUTEX_UNLOCK( &stream_.mutex );
\r
6680 void RtApiPulse::stopStream( void )
\r
6682 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6684 if ( stream_.state == STREAM_CLOSED ) {
\r
6685 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6686 error( RtAudioError::INVALID_USE );
\r
6689 if ( stream_.state == STREAM_STOPPED ) {
\r
6690 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6691 error( RtAudioError::WARNING );
\r
6695 stream_.state = STREAM_STOPPED;
\r
6696 MUTEX_LOCK( &stream_.mutex );
\r
6698 if ( pah && pah->s_play ) {
\r
6700 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6701 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6702 pa_strerror( pa_error ) << ".";
\r
6703 errorText_ = errorStream_.str();
\r
6704 MUTEX_UNLOCK( &stream_.mutex );
\r
6705 error( RtAudioError::SYSTEM_ERROR );
\r
6710 stream_.state = STREAM_STOPPED;
\r
6711 MUTEX_UNLOCK( &stream_.mutex );
\r
6714 void RtApiPulse::abortStream( void )
\r
6716 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6718 if ( stream_.state == STREAM_CLOSED ) {
\r
6719 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6720 error( RtAudioError::INVALID_USE );
\r
6723 if ( stream_.state == STREAM_STOPPED ) {
\r
6724 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6725 error( RtAudioError::WARNING );
\r
6729 stream_.state = STREAM_STOPPED;
\r
6730 MUTEX_LOCK( &stream_.mutex );
\r
6732 if ( pah && pah->s_play ) {
\r
6734 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6735 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6736 pa_strerror( pa_error ) << ".";
\r
6737 errorText_ = errorStream_.str();
\r
6738 MUTEX_UNLOCK( &stream_.mutex );
\r
6739 error( RtAudioError::SYSTEM_ERROR );
\r
6744 stream_.state = STREAM_STOPPED;
\r
6745 MUTEX_UNLOCK( &stream_.mutex );
\r
6748 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6749 unsigned int channels, unsigned int firstChannel,
\r
6750 unsigned int sampleRate, RtAudioFormat format,
\r
6751 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6753 PulseAudioHandle *pah = 0;
\r
6754 unsigned long bufferBytes = 0;
\r
6755 pa_sample_spec ss;
\r
6757 if ( device != 0 ) return false;
\r
6758 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6759 if ( channels != 1 && channels != 2 ) {
\r
6760 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6763 ss.channels = channels;
\r
6765 if ( firstChannel != 0 ) return false;
\r
6767 bool sr_found = false;
\r
6768 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6769 if ( sampleRate == *sr ) {
\r
6771 stream_.sampleRate = sampleRate;
\r
6772 ss.rate = sampleRate;
\r
6776 if ( !sr_found ) {
\r
6777 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6781 bool sf_found = 0;
\r
6782 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6783 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6784 if ( format == sf->rtaudio_format ) {
\r
6786 stream_.userFormat = sf->rtaudio_format;
\r
6787 ss.format = sf->pa_format;
\r
6791 if ( !sf_found ) {
\r
6792 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6796 // Set interleaving parameters.
\r
6797 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6798 else stream_.userInterleaved = true;
\r
6799 stream_.deviceInterleaved[mode] = true;
\r
6800 stream_.nBuffers = 1;
\r
6801 stream_.doByteSwap[mode] = false;
\r
6802 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6803 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6804 stream_.nUserChannels[mode] = channels;
\r
6805 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6806 stream_.channelOffset[mode] = 0;
\r
6808 // Allocate necessary internal buffers.
\r
6809 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6810 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6811 if ( stream_.userBuffer[mode] == NULL ) {
\r
6812 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6815 stream_.bufferSize = *bufferSize;
\r
6817 if ( stream_.doConvertBuffer[mode] ) {
\r
6819 bool makeBuffer = true;
\r
6820 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6821 if ( mode == INPUT ) {
\r
6822 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6823 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6824 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6828 if ( makeBuffer ) {
\r
6829 bufferBytes *= *bufferSize;
\r
6830 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6831 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6832 if ( stream_.deviceBuffer == NULL ) {
\r
6833 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6839 stream_.device[mode] = device;
\r
6841 // Setup the buffer conversion information structure.
\r
6842 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6844 if ( !stream_.apiHandle ) {
\r
6845 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6847 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6851 stream_.apiHandle = pah;
\r
6852 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6853 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6857 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6860 std::string streamName = "RtAudio";
\r
6861 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
6864 pa_buffer_attr buffer_attr;
\r
6865 buffer_attr.fragsize = bufferBytes;
\r
6866 buffer_attr.maxlength = -1;
\r
6868 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
6869 if ( !pah->s_rec ) {
\r
6870 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6875 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6876 if ( !pah->s_play ) {
\r
6877 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6885 if ( stream_.mode == UNINITIALIZED )
\r
6886 stream_.mode = mode;
\r
6887 else if ( stream_.mode == mode )
\r
6890 stream_.mode = DUPLEX;
\r
6892 if ( !stream_.callbackInfo.isRunning ) {
\r
6893 stream_.callbackInfo.object = this;
\r
6894 stream_.callbackInfo.isRunning = true;
\r
6895 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6896 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6901 stream_.state = STREAM_STOPPED;
\r
6905 if ( pah && stream_.callbackInfo.isRunning ) {
\r
6906 pthread_cond_destroy( &pah->runnable_cv );
\r
6908 stream_.apiHandle = 0;
\r
6911 for ( int i=0; i<2; i++ ) {
\r
6912 if ( stream_.userBuffer[i] ) {
\r
6913 free( stream_.userBuffer[i] );
\r
6914 stream_.userBuffer[i] = 0;
\r
6918 if ( stream_.deviceBuffer ) {
\r
6919 free( stream_.deviceBuffer );
\r
6920 stream_.deviceBuffer = 0;
\r
6926 //******************** End of __LINUX_PULSE__ *********************//
\r
6929 #if defined(__LINUX_OSS__)
\r
6931 #include <unistd.h>
\r
6932 #include <sys/ioctl.h>
\r
6933 #include <unistd.h>
\r
6934 #include <fcntl.h>
\r
6935 #include <sys/soundcard.h>
\r
6936 #include <errno.h>
\r
6939 static void *ossCallbackHandler(void * ptr);
\r
6941 // A structure to hold various information related to the OSS API
\r
6942 // implementation.
\r
6943 struct OssHandle {
\r
6944 int id[2]; // device ids
\r
6947 pthread_cond_t runnable;
\r
6950 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6953 RtApiOss :: RtApiOss()
\r
6955 // Nothing to do here.
\r
6958 RtApiOss :: ~RtApiOss()
\r
6960 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6963 unsigned int RtApiOss :: getDeviceCount( void )
\r
6965 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6966 if ( mixerfd == -1 ) {
\r
6967 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6968 error( RtAudioError::WARNING );
\r
6972 oss_sysinfo sysinfo;
\r
6973 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6975 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6976 error( RtAudioError::WARNING );
\r
6981 return sysinfo.numaudios;
\r
6984 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6986 RtAudio::DeviceInfo info;
\r
6987 info.probed = false;
\r
6989 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6990 if ( mixerfd == -1 ) {
\r
6991 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6992 error( RtAudioError::WARNING );
\r
6996 oss_sysinfo sysinfo;
\r
6997 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6998 if ( result == -1 ) {
\r
7000 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7001 error( RtAudioError::WARNING );
\r
7005 unsigned nDevices = sysinfo.numaudios;
\r
7006 if ( nDevices == 0 ) {
\r
7008 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
7009 error( RtAudioError::INVALID_USE );
\r
7013 if ( device >= nDevices ) {
\r
7015 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
7016 error( RtAudioError::INVALID_USE );
\r
7020 oss_audioinfo ainfo;
\r
7021 ainfo.dev = device;
\r
7022 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7024 if ( result == -1 ) {
\r
7025 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7026 errorText_ = errorStream_.str();
\r
7027 error( RtAudioError::WARNING );
\r
7032 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
7033 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
7034 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
7035 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
7036 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7039 // Probe data formats ... do for input
\r
7040 unsigned long mask = ainfo.iformats;
\r
7041 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
7042 info.nativeFormats |= RTAUDIO_SINT16;
\r
7043 if ( mask & AFMT_S8 )
\r
7044 info.nativeFormats |= RTAUDIO_SINT8;
\r
7045 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
7046 info.nativeFormats |= RTAUDIO_SINT32;
\r
7047 if ( mask & AFMT_FLOAT )
\r
7048 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7049 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
7050 info.nativeFormats |= RTAUDIO_SINT24;
\r
7052 // Check that we have at least one supported format
\r
7053 if ( info.nativeFormats == 0 ) {
\r
7054 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7055 errorText_ = errorStream_.str();
\r
7056 error( RtAudioError::WARNING );
\r
7060 // Probe the supported sample rates.
\r
7061 info.sampleRates.clear();
\r
7062 if ( ainfo.nrates ) {
\r
7063 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
7064 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7065 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
7066 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7073 // Check min and max rate values;
\r
7074 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7075 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
7076 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7080 if ( info.sampleRates.size() == 0 ) {
\r
7081 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
7082 errorText_ = errorStream_.str();
\r
7083 error( RtAudioError::WARNING );
\r
7086 info.probed = true;
\r
7087 info.name = ainfo.name;
\r
7094 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7095 unsigned int firstChannel, unsigned int sampleRate,
\r
7096 RtAudioFormat format, unsigned int *bufferSize,
\r
7097 RtAudio::StreamOptions *options )
\r
7099 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
7100 if ( mixerfd == -1 ) {
\r
7101 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
7105 oss_sysinfo sysinfo;
\r
7106 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
7107 if ( result == -1 ) {
\r
7109 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7113 unsigned nDevices = sysinfo.numaudios;
\r
7114 if ( nDevices == 0 ) {
\r
7115 // This should not happen because a check is made before this function is called.
\r
7117 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7121 if ( device >= nDevices ) {
\r
7122 // This should not happen because a check is made before this function is called.
\r
7124 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7128 oss_audioinfo ainfo;
\r
7129 ainfo.dev = device;
\r
7130 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7132 if ( result == -1 ) {
\r
7133 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7134 errorText_ = errorStream_.str();
\r
7138 // Check if device supports input or output
\r
7139 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7140 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7141 if ( mode == OUTPUT )
\r
7142 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7144 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7145 errorText_ = errorStream_.str();
\r
7150 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7151 if ( mode == OUTPUT )
\r
7152 flags |= O_WRONLY;
\r
7153 else { // mode == INPUT
\r
7154 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7155 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7156 close( handle->id[0] );
\r
7157 handle->id[0] = 0;
\r
7158 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7159 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7160 errorText_ = errorStream_.str();
\r
7163 // Check that the number previously set channels is the same.
\r
7164 if ( stream_.nUserChannels[0] != channels ) {
\r
7165 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7166 errorText_ = errorStream_.str();
\r
7172 flags |= O_RDONLY;
\r
7175 // Set exclusive access if specified.
\r
7176 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7178 // Try to open the device.
\r
7180 fd = open( ainfo.devnode, flags, 0 );
\r
7182 if ( errno == EBUSY )
\r
7183 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7185 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7186 errorText_ = errorStream_.str();
\r
7190 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7192 if ( flags | O_RDWR ) {
\r
7193 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7194 if ( result == -1) {
\r
7195 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7196 errorText_ = errorStream_.str();
\r
7202 // Check the device channel support.
\r
7203 stream_.nUserChannels[mode] = channels;
\r
7204 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7206 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7207 errorText_ = errorStream_.str();
\r
7211 // Set the number of channels.
\r
7212 int deviceChannels = channels + firstChannel;
\r
7213 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7214 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7216 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7217 errorText_ = errorStream_.str();
\r
7220 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7222 // Get the data format mask
\r
7224 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7225 if ( result == -1 ) {
\r
7227 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7228 errorText_ = errorStream_.str();
\r
7232 // Determine how to set the device format.
\r
7233 stream_.userFormat = format;
\r
7234 int deviceFormat = -1;
\r
7235 stream_.doByteSwap[mode] = false;
\r
7236 if ( format == RTAUDIO_SINT8 ) {
\r
7237 if ( mask & AFMT_S8 ) {
\r
7238 deviceFormat = AFMT_S8;
\r
7239 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7242 else if ( format == RTAUDIO_SINT16 ) {
\r
7243 if ( mask & AFMT_S16_NE ) {
\r
7244 deviceFormat = AFMT_S16_NE;
\r
7245 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7247 else if ( mask & AFMT_S16_OE ) {
\r
7248 deviceFormat = AFMT_S16_OE;
\r
7249 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7250 stream_.doByteSwap[mode] = true;
\r
7253 else if ( format == RTAUDIO_SINT24 ) {
\r
7254 if ( mask & AFMT_S24_NE ) {
\r
7255 deviceFormat = AFMT_S24_NE;
\r
7256 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7258 else if ( mask & AFMT_S24_OE ) {
\r
7259 deviceFormat = AFMT_S24_OE;
\r
7260 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7261 stream_.doByteSwap[mode] = true;
\r
7264 else if ( format == RTAUDIO_SINT32 ) {
\r
7265 if ( mask & AFMT_S32_NE ) {
\r
7266 deviceFormat = AFMT_S32_NE;
\r
7267 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7269 else if ( mask & AFMT_S32_OE ) {
\r
7270 deviceFormat = AFMT_S32_OE;
\r
7271 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7272 stream_.doByteSwap[mode] = true;
\r
7276 if ( deviceFormat == -1 ) {
\r
7277 // The user requested format is not natively supported by the device.
\r
7278 if ( mask & AFMT_S16_NE ) {
\r
7279 deviceFormat = AFMT_S16_NE;
\r
7280 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7282 else if ( mask & AFMT_S32_NE ) {
\r
7283 deviceFormat = AFMT_S32_NE;
\r
7284 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7286 else if ( mask & AFMT_S24_NE ) {
\r
7287 deviceFormat = AFMT_S24_NE;
\r
7288 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7290 else if ( mask & AFMT_S16_OE ) {
\r
7291 deviceFormat = AFMT_S16_OE;
\r
7292 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7293 stream_.doByteSwap[mode] = true;
\r
7295 else if ( mask & AFMT_S32_OE ) {
\r
7296 deviceFormat = AFMT_S32_OE;
\r
7297 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7298 stream_.doByteSwap[mode] = true;
\r
7300 else if ( mask & AFMT_S24_OE ) {
\r
7301 deviceFormat = AFMT_S24_OE;
\r
7302 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7303 stream_.doByteSwap[mode] = true;
\r
7305 else if ( mask & AFMT_S8) {
\r
7306 deviceFormat = AFMT_S8;
\r
7307 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7311 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7312 // This really shouldn't happen ...
\r
7314 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7315 errorText_ = errorStream_.str();
\r
7319 // Set the data format.
\r
7320 int temp = deviceFormat;
\r
7321 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7322 if ( result == -1 || deviceFormat != temp ) {
\r
7324 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7325 errorText_ = errorStream_.str();
\r
7329 // Attempt to set the buffer size. According to OSS, the minimum
\r
7330 // number of buffers is two. The supposed minimum buffer size is 16
\r
7331 // bytes, so that will be our lower bound. The argument to this
\r
7332 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7333 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7334 // We'll check the actual value used near the end of the setup
\r
7336 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7337 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7339 if ( options ) buffers = options->numberOfBuffers;
\r
7340 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7341 if ( buffers < 2 ) buffers = 3;
\r
7342 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7343 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7344 if ( result == -1 ) {
\r
7346 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7347 errorText_ = errorStream_.str();
\r
7350 stream_.nBuffers = buffers;
\r
7352 // Save buffer size (in sample frames).
\r
7353 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7354 stream_.bufferSize = *bufferSize;
\r
7356 // Set the sample rate.
\r
7357 int srate = sampleRate;
\r
7358 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7359 if ( result == -1 ) {
\r
7361 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7362 errorText_ = errorStream_.str();
\r
7366 // Verify the sample rate setup worked.
\r
7367 if ( abs( srate - sampleRate ) > 100 ) {
\r
7369 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7370 errorText_ = errorStream_.str();
\r
7373 stream_.sampleRate = sampleRate;
\r
7375 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7376 // We're doing duplex setup here.
\r
7377 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7378 stream_.nDeviceChannels[0] = deviceChannels;
\r
7381 // Set interleaving parameters.
\r
7382 stream_.userInterleaved = true;
\r
7383 stream_.deviceInterleaved[mode] = true;
\r
7384 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7385 stream_.userInterleaved = false;
\r
7387 // Set flags for buffer conversion
\r
7388 stream_.doConvertBuffer[mode] = false;
\r
7389 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7390 stream_.doConvertBuffer[mode] = true;
\r
7391 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7392 stream_.doConvertBuffer[mode] = true;
\r
7393 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7394 stream_.nUserChannels[mode] > 1 )
\r
7395 stream_.doConvertBuffer[mode] = true;
\r
7397 // Allocate the stream handles if necessary and then save.
\r
7398 if ( stream_.apiHandle == 0 ) {
\r
7400 handle = new OssHandle;
\r
7402 catch ( std::bad_alloc& ) {
\r
7403 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7407 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7408 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7412 stream_.apiHandle = (void *) handle;
\r
7415 handle = (OssHandle *) stream_.apiHandle;
\r
7417 handle->id[mode] = fd;
\r
7419 // Allocate necessary internal buffers.
\r
7420 unsigned long bufferBytes;
\r
7421 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7422 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7423 if ( stream_.userBuffer[mode] == NULL ) {
\r
7424 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7428 if ( stream_.doConvertBuffer[mode] ) {
\r
7430 bool makeBuffer = true;
\r
7431 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7432 if ( mode == INPUT ) {
\r
7433 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7434 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7435 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7439 if ( makeBuffer ) {
\r
7440 bufferBytes *= *bufferSize;
\r
7441 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7442 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7443 if ( stream_.deviceBuffer == NULL ) {
\r
7444 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7450 stream_.device[mode] = device;
\r
7451 stream_.state = STREAM_STOPPED;
\r
7453 // Setup the buffer conversion information structure.
\r
7454 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7456 // Setup thread if necessary.
\r
7457 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7458 // We had already set up an output stream.
\r
7459 stream_.mode = DUPLEX;
\r
7460 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7463 stream_.mode = mode;
\r
7465 // Setup callback thread.
\r
7466 stream_.callbackInfo.object = (void *) this;
\r
7468 // Set the thread attributes for joinable and realtime scheduling
\r
7469 // priority. The higher priority will only take affect if the
\r
7470 // program is run as root or suid.
\r
7471 pthread_attr_t attr;
\r
7472 pthread_attr_init( &attr );
\r
7473 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7474 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7475 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7476 struct sched_param param;
\r
7477 int priority = options->priority;
\r
7478 int min = sched_get_priority_min( SCHED_RR );
\r
7479 int max = sched_get_priority_max( SCHED_RR );
\r
7480 if ( priority < min ) priority = min;
\r
7481 else if ( priority > max ) priority = max;
\r
7482 param.sched_priority = priority;
\r
7483 pthread_attr_setschedparam( &attr, ¶m );
\r
7484 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7487 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7489 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7492 stream_.callbackInfo.isRunning = true;
\r
7493 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7494 pthread_attr_destroy( &attr );
\r
7496 stream_.callbackInfo.isRunning = false;
\r
7497 errorText_ = "RtApiOss::error creating callback thread!";
\r
7506 pthread_cond_destroy( &handle->runnable );
\r
7507 if ( handle->id[0] ) close( handle->id[0] );
\r
7508 if ( handle->id[1] ) close( handle->id[1] );
\r
7510 stream_.apiHandle = 0;
\r
7513 for ( int i=0; i<2; i++ ) {
\r
7514 if ( stream_.userBuffer[i] ) {
\r
7515 free( stream_.userBuffer[i] );
\r
7516 stream_.userBuffer[i] = 0;
\r
7520 if ( stream_.deviceBuffer ) {
\r
7521 free( stream_.deviceBuffer );
\r
7522 stream_.deviceBuffer = 0;
\r
7528 void RtApiOss :: closeStream()
\r
7530 if ( stream_.state == STREAM_CLOSED ) {
\r
7531 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7532 error( RtAudioError::WARNING );
\r
7536 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7537 stream_.callbackInfo.isRunning = false;
\r
7538 MUTEX_LOCK( &stream_.mutex );
\r
7539 if ( stream_.state == STREAM_STOPPED )
\r
7540 pthread_cond_signal( &handle->runnable );
\r
7541 MUTEX_UNLOCK( &stream_.mutex );
\r
7542 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7544 if ( stream_.state == STREAM_RUNNING ) {
\r
7545 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7546 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7548 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7549 stream_.state = STREAM_STOPPED;
\r
7553 pthread_cond_destroy( &handle->runnable );
\r
7554 if ( handle->id[0] ) close( handle->id[0] );
\r
7555 if ( handle->id[1] ) close( handle->id[1] );
\r
7557 stream_.apiHandle = 0;
\r
7560 for ( int i=0; i<2; i++ ) {
\r
7561 if ( stream_.userBuffer[i] ) {
\r
7562 free( stream_.userBuffer[i] );
\r
7563 stream_.userBuffer[i] = 0;
\r
7567 if ( stream_.deviceBuffer ) {
\r
7568 free( stream_.deviceBuffer );
\r
7569 stream_.deviceBuffer = 0;
\r
7572 stream_.mode = UNINITIALIZED;
\r
7573 stream_.state = STREAM_CLOSED;
\r
7576 void RtApiOss :: startStream()
\r
7579 if ( stream_.state == STREAM_RUNNING ) {
\r
7580 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7581 error( RtAudioError::WARNING );
\r
7585 MUTEX_LOCK( &stream_.mutex );
\r
7587 stream_.state = STREAM_RUNNING;
\r
7589 // No need to do anything else here ... OSS automatically starts
\r
7590 // when fed samples.
\r
7592 MUTEX_UNLOCK( &stream_.mutex );
\r
7594 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7595 pthread_cond_signal( &handle->runnable );
\r
7598 void RtApiOss :: stopStream()
\r
7601 if ( stream_.state == STREAM_STOPPED ) {
\r
7602 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7603 error( RtAudioError::WARNING );
\r
7607 MUTEX_LOCK( &stream_.mutex );
\r
7609 // The state might change while waiting on a mutex.
\r
7610 if ( stream_.state == STREAM_STOPPED ) {
\r
7611 MUTEX_UNLOCK( &stream_.mutex );
\r
7616 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7617 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7619 // Flush the output with zeros a few times.
\r
7622 RtAudioFormat format;
\r
7624 if ( stream_.doConvertBuffer[0] ) {
\r
7625 buffer = stream_.deviceBuffer;
\r
7626 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7627 format = stream_.deviceFormat[0];
\r
7630 buffer = stream_.userBuffer[0];
\r
7631 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7632 format = stream_.userFormat;
\r
7635 memset( buffer, 0, samples * formatBytes(format) );
\r
7636 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7637 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7638 if ( result == -1 ) {
\r
7639 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7640 error( RtAudioError::WARNING );
\r
7644 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7645 if ( result == -1 ) {
\r
7646 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7647 errorText_ = errorStream_.str();
\r
7650 handle->triggered = false;
\r
7653 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7654 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7655 if ( result == -1 ) {
\r
7656 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7657 errorText_ = errorStream_.str();
\r
7663 stream_.state = STREAM_STOPPED;
\r
7664 MUTEX_UNLOCK( &stream_.mutex );
\r
7666 if ( result != -1 ) return;
\r
7667 error( RtAudioError::SYSTEM_ERROR );
\r
7670 void RtApiOss :: abortStream()
\r
7673 if ( stream_.state == STREAM_STOPPED ) {
\r
7674 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7675 error( RtAudioError::WARNING );
\r
7679 MUTEX_LOCK( &stream_.mutex );
\r
7681 // The state might change while waiting on a mutex.
\r
7682 if ( stream_.state == STREAM_STOPPED ) {
\r
7683 MUTEX_UNLOCK( &stream_.mutex );
\r
7688 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7689 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7690 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7691 if ( result == -1 ) {
\r
7692 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7693 errorText_ = errorStream_.str();
\r
7696 handle->triggered = false;
\r
7699 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7700 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7701 if ( result == -1 ) {
\r
7702 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7703 errorText_ = errorStream_.str();
\r
7709 stream_.state = STREAM_STOPPED;
\r
7710 MUTEX_UNLOCK( &stream_.mutex );
\r
7712 if ( result != -1 ) return;
\r
7713 error( RtAudioError::SYSTEM_ERROR );
\r
7716 void RtApiOss :: callbackEvent()
\r
7718 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7719 if ( stream_.state == STREAM_STOPPED ) {
\r
7720 MUTEX_LOCK( &stream_.mutex );
\r
7721 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7722 if ( stream_.state != STREAM_RUNNING ) {
\r
7723 MUTEX_UNLOCK( &stream_.mutex );
\r
7726 MUTEX_UNLOCK( &stream_.mutex );
\r
7729 if ( stream_.state == STREAM_CLOSED ) {
\r
7730 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7731 error( RtAudioError::WARNING );
\r
7735 // Invoke user callback to get fresh output data.
\r
7736 int doStopStream = 0;
\r
7737 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7738 double streamTime = getStreamTime();
\r
7739 RtAudioStreamStatus status = 0;
\r
7740 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7741 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7742 handle->xrun[0] = false;
\r
7744 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7745 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7746 handle->xrun[1] = false;
\r
7748 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7749 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7750 if ( doStopStream == 2 ) {
\r
7751 this->abortStream();
\r
7755 MUTEX_LOCK( &stream_.mutex );
\r
7757 // The state might change while waiting on a mutex.
\r
7758 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7763 RtAudioFormat format;
\r
7765 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7767 // Setup parameters and do buffer conversion if necessary.
\r
7768 if ( stream_.doConvertBuffer[0] ) {
\r
7769 buffer = stream_.deviceBuffer;
\r
7770 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7771 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7772 format = stream_.deviceFormat[0];
\r
7775 buffer = stream_.userBuffer[0];
\r
7776 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7777 format = stream_.userFormat;
\r
7780 // Do byte swapping if necessary.
\r
7781 if ( stream_.doByteSwap[0] )
\r
7782 byteSwapBuffer( buffer, samples, format );
\r
7784 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7786 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7787 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7788 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7789 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7790 handle->triggered = true;
\r
7793 // Write samples to device.
\r
7794 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7796 if ( result == -1 ) {
\r
7797 // We'll assume this is an underrun, though there isn't a
\r
7798 // specific means for determining that.
\r
7799 handle->xrun[0] = true;
\r
7800 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7801 error( RtAudioError::WARNING );
\r
7802 // Continue on to input section.
\r
7806 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7808 // Setup parameters.
\r
7809 if ( stream_.doConvertBuffer[1] ) {
\r
7810 buffer = stream_.deviceBuffer;
\r
7811 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7812 format = stream_.deviceFormat[1];
\r
7815 buffer = stream_.userBuffer[1];
\r
7816 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7817 format = stream_.userFormat;
\r
7820 // Read samples from device.
\r
7821 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7823 if ( result == -1 ) {
\r
7824 // We'll assume this is an overrun, though there isn't a
\r
7825 // specific means for determining that.
\r
7826 handle->xrun[1] = true;
\r
7827 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7828 error( RtAudioError::WARNING );
\r
7832 // Do byte swapping if necessary.
\r
7833 if ( stream_.doByteSwap[1] )
\r
7834 byteSwapBuffer( buffer, samples, format );
\r
7836 // Do buffer conversion if necessary.
\r
7837 if ( stream_.doConvertBuffer[1] )
\r
7838 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7842 MUTEX_UNLOCK( &stream_.mutex );
\r
7844 RtApi::tickStreamTime();
\r
7845 if ( doStopStream == 1 ) this->stopStream();
\r
7848 static void *ossCallbackHandler( void *ptr )
\r
7850 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7851 RtApiOss *object = (RtApiOss *) info->object;
\r
7852 bool *isRunning = &info->isRunning;
\r
7854 while ( *isRunning == true ) {
\r
7855 pthread_testcancel();
\r
7856 object->callbackEvent();
\r
7859 pthread_exit( NULL );
\r
7862 //******************** End of __LINUX_OSS__ *********************//
\r
7866 // *************************************************** //
\r
7868 // Protected common (OS-independent) RtAudio methods.
\r
7870 // *************************************************** //
\r
7872 // This method can be modified to control the behavior of error
\r
7873 // message printing.
\r
7874 void RtApi :: error( RtAudioError::Type type )
\r
7876 errorStream_.str(""); // clear the ostringstream
\r
7878 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
7879 if ( errorCallback ) {
\r
7880 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
7882 if ( firstErrorOccurred_ )
\r
7885 firstErrorOccurred_ = true;
\r
7886 const std::string errorMessage = errorText_;
\r
7888 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
7889 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
7893 errorCallback( type, errorMessage );
\r
7894 firstErrorOccurred_ = false;
\r
7898 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
7899 std::cerr << '\n' << errorText_ << "\n\n";
\r
7900 else if ( type != RtAudioError::WARNING )
\r
7901 throw( RtAudioError( errorText_, type ) );
\r
7904 void RtApi :: verifyStream()
\r
7906 if ( stream_.state == STREAM_CLOSED ) {
\r
7907 errorText_ = "RtApi:: a stream is not open!";
\r
7908 error( RtAudioError::INVALID_USE );
\r
7912 void RtApi :: clearStreamInfo()
\r
7914 stream_.mode = UNINITIALIZED;
\r
7915 stream_.state = STREAM_CLOSED;
\r
7916 stream_.sampleRate = 0;
\r
7917 stream_.bufferSize = 0;
\r
7918 stream_.nBuffers = 0;
\r
7919 stream_.userFormat = 0;
\r
7920 stream_.userInterleaved = true;
\r
7921 stream_.streamTime = 0.0;
\r
7922 stream_.apiHandle = 0;
\r
7923 stream_.deviceBuffer = 0;
\r
7924 stream_.callbackInfo.callback = 0;
\r
7925 stream_.callbackInfo.userData = 0;
\r
7926 stream_.callbackInfo.isRunning = false;
\r
7927 stream_.callbackInfo.errorCallback = 0;
\r
7928 for ( int i=0; i<2; i++ ) {
\r
7929 stream_.device[i] = 11111;
\r
7930 stream_.doConvertBuffer[i] = false;
\r
7931 stream_.deviceInterleaved[i] = true;
\r
7932 stream_.doByteSwap[i] = false;
\r
7933 stream_.nUserChannels[i] = 0;
\r
7934 stream_.nDeviceChannels[i] = 0;
\r
7935 stream_.channelOffset[i] = 0;
\r
7936 stream_.deviceFormat[i] = 0;
\r
7937 stream_.latency[i] = 0;
\r
7938 stream_.userBuffer[i] = 0;
\r
7939 stream_.convertInfo[i].channels = 0;
\r
7940 stream_.convertInfo[i].inJump = 0;
\r
7941 stream_.convertInfo[i].outJump = 0;
\r
7942 stream_.convertInfo[i].inFormat = 0;
\r
7943 stream_.convertInfo[i].outFormat = 0;
\r
7944 stream_.convertInfo[i].inOffset.clear();
\r
7945 stream_.convertInfo[i].outOffset.clear();
\r
7949 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7951 if ( format == RTAUDIO_SINT16 )
\r
7953 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7955 else if ( format == RTAUDIO_FLOAT64 )
\r
7957 else if ( format == RTAUDIO_SINT24 )
\r
7959 else if ( format == RTAUDIO_SINT8 )
\r
7962 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7963 error( RtAudioError::WARNING );
\r
7968 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7970 if ( mode == INPUT ) { // convert device to user buffer
\r
7971 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7972 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7973 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7974 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7976 else { // convert user to device buffer
\r
7977 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7978 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7979 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7980 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7983 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7984 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7986 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7988 // Set up the interleave/deinterleave offsets.
\r
7989 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7990 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7991 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7992 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7993 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7994 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7995 stream_.convertInfo[mode].inJump = 1;
\r
7999 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8000 stream_.convertInfo[mode].inOffset.push_back( k );
\r
8001 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
8002 stream_.convertInfo[mode].outJump = 1;
\r
8006 else { // no (de)interleaving
\r
8007 if ( stream_.userInterleaved ) {
\r
8008 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8009 stream_.convertInfo[mode].inOffset.push_back( k );
\r
8010 stream_.convertInfo[mode].outOffset.push_back( k );
\r
8014 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
8015 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
8016 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
8017 stream_.convertInfo[mode].inJump = 1;
\r
8018 stream_.convertInfo[mode].outJump = 1;
\r
8023 // Add channel offset.
\r
8024 if ( firstChannel > 0 ) {
\r
8025 if ( stream_.deviceInterleaved[mode] ) {
\r
8026 if ( mode == OUTPUT ) {
\r
8027 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8028 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
8031 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8032 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
8036 if ( mode == OUTPUT ) {
\r
8037 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8038 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8041 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8042 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8048 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
8050 // This function does format conversion, input/output channel compensation, and
\r
8051 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
8052 // the lower three bytes of a 32-bit integer.
\r
8054 // Clear our device buffer when in/out duplex device channels are different
\r
8055 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
8056 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
8057 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
8060 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
8062 Float64 *out = (Float64 *)outBuffer;
\r
8064 if (info.inFormat == RTAUDIO_SINT8) {
\r
8065 signed char *in = (signed char *)inBuffer;
\r
8066 scale = 1.0 / 127.5;
\r
8067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8068 for (j=0; j<info.channels; j++) {
\r
8069 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8070 out[info.outOffset[j]] += 0.5;
\r
8071 out[info.outOffset[j]] *= scale;
\r
8073 in += info.inJump;
\r
8074 out += info.outJump;
\r
8077 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8078 Int16 *in = (Int16 *)inBuffer;
\r
8079 scale = 1.0 / 32767.5;
\r
8080 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8081 for (j=0; j<info.channels; j++) {
\r
8082 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8083 out[info.outOffset[j]] += 0.5;
\r
8084 out[info.outOffset[j]] *= scale;
\r
8086 in += info.inJump;
\r
8087 out += info.outJump;
\r
8090 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8091 Int24 *in = (Int24 *)inBuffer;
\r
8092 scale = 1.0 / 8388607.5;
\r
8093 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8094 for (j=0; j<info.channels; j++) {
\r
8095 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
8096 out[info.outOffset[j]] += 0.5;
\r
8097 out[info.outOffset[j]] *= scale;
\r
8099 in += info.inJump;
\r
8100 out += info.outJump;
\r
8103 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8104 Int32 *in = (Int32 *)inBuffer;
\r
8105 scale = 1.0 / 2147483647.5;
\r
8106 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8107 for (j=0; j<info.channels; j++) {
\r
8108 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8109 out[info.outOffset[j]] += 0.5;
\r
8110 out[info.outOffset[j]] *= scale;
\r
8112 in += info.inJump;
\r
8113 out += info.outJump;
\r
8116 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8117 Float32 *in = (Float32 *)inBuffer;
\r
8118 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8119 for (j=0; j<info.channels; j++) {
\r
8120 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8122 in += info.inJump;
\r
8123 out += info.outJump;
\r
8126 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8127 // Channel compensation and/or (de)interleaving only.
\r
8128 Float64 *in = (Float64 *)inBuffer;
\r
8129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8130 for (j=0; j<info.channels; j++) {
\r
8131 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8133 in += info.inJump;
\r
8134 out += info.outJump;
\r
8138 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8140 Float32 *out = (Float32 *)outBuffer;
\r
8142 if (info.inFormat == RTAUDIO_SINT8) {
\r
8143 signed char *in = (signed char *)inBuffer;
\r
8144 scale = (Float32) ( 1.0 / 127.5 );
\r
8145 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8146 for (j=0; j<info.channels; j++) {
\r
8147 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8148 out[info.outOffset[j]] += 0.5;
\r
8149 out[info.outOffset[j]] *= scale;
\r
8151 in += info.inJump;
\r
8152 out += info.outJump;
\r
8155 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8156 Int16 *in = (Int16 *)inBuffer;
\r
8157 scale = (Float32) ( 1.0 / 32767.5 );
\r
8158 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8159 for (j=0; j<info.channels; j++) {
\r
8160 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8161 out[info.outOffset[j]] += 0.5;
\r
8162 out[info.outOffset[j]] *= scale;
\r
8164 in += info.inJump;
\r
8165 out += info.outJump;
\r
8168 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8169 Int24 *in = (Int24 *)inBuffer;
\r
8170 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8171 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8172 for (j=0; j<info.channels; j++) {
\r
8173 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8174 out[info.outOffset[j]] += 0.5;
\r
8175 out[info.outOffset[j]] *= scale;
\r
8177 in += info.inJump;
\r
8178 out += info.outJump;
\r
8181 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8182 Int32 *in = (Int32 *)inBuffer;
\r
8183 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8184 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8185 for (j=0; j<info.channels; j++) {
\r
8186 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8187 out[info.outOffset[j]] += 0.5;
\r
8188 out[info.outOffset[j]] *= scale;
\r
8190 in += info.inJump;
\r
8191 out += info.outJump;
\r
8194 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8195 // Channel compensation and/or (de)interleaving only.
\r
8196 Float32 *in = (Float32 *)inBuffer;
\r
8197 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8198 for (j=0; j<info.channels; j++) {
\r
8199 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8201 in += info.inJump;
\r
8202 out += info.outJump;
\r
8205 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8206 Float64 *in = (Float64 *)inBuffer;
\r
8207 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8208 for (j=0; j<info.channels; j++) {
\r
8209 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8211 in += info.inJump;
\r
8212 out += info.outJump;
\r
8216 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8217 Int32 *out = (Int32 *)outBuffer;
\r
8218 if (info.inFormat == RTAUDIO_SINT8) {
\r
8219 signed char *in = (signed char *)inBuffer;
\r
8220 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8221 for (j=0; j<info.channels; j++) {
\r
8222 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8223 out[info.outOffset[j]] <<= 24;
\r
8225 in += info.inJump;
\r
8226 out += info.outJump;
\r
8229 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8230 Int16 *in = (Int16 *)inBuffer;
\r
8231 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8232 for (j=0; j<info.channels; j++) {
\r
8233 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8234 out[info.outOffset[j]] <<= 16;
\r
8236 in += info.inJump;
\r
8237 out += info.outJump;
\r
8240 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8241 Int24 *in = (Int24 *)inBuffer;
\r
8242 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8243 for (j=0; j<info.channels; j++) {
\r
8244 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8245 out[info.outOffset[j]] <<= 8;
\r
8247 in += info.inJump;
\r
8248 out += info.outJump;
\r
8251 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8252 // Channel compensation and/or (de)interleaving only.
\r
8253 Int32 *in = (Int32 *)inBuffer;
\r
8254 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8255 for (j=0; j<info.channels; j++) {
\r
8256 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8258 in += info.inJump;
\r
8259 out += info.outJump;
\r
8262 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8263 Float32 *in = (Float32 *)inBuffer;
\r
8264 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8265 for (j=0; j<info.channels; j++) {
\r
8266 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8268 in += info.inJump;
\r
8269 out += info.outJump;
\r
8272 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8273 Float64 *in = (Float64 *)inBuffer;
\r
8274 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8275 for (j=0; j<info.channels; j++) {
\r
8276 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8278 in += info.inJump;
\r
8279 out += info.outJump;
\r
8283 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8284 Int24 *out = (Int24 *)outBuffer;
\r
8285 if (info.inFormat == RTAUDIO_SINT8) {
\r
8286 signed char *in = (signed char *)inBuffer;
\r
8287 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8288 for (j=0; j<info.channels; j++) {
\r
8289 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8290 //out[info.outOffset[j]] <<= 16;
\r
8292 in += info.inJump;
\r
8293 out += info.outJump;
\r
8296 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8297 Int16 *in = (Int16 *)inBuffer;
\r
8298 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8299 for (j=0; j<info.channels; j++) {
\r
8300 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8301 //out[info.outOffset[j]] <<= 8;
\r
8303 in += info.inJump;
\r
8304 out += info.outJump;
\r
8307 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8308 // Channel compensation and/or (de)interleaving only.
\r
8309 Int24 *in = (Int24 *)inBuffer;
\r
8310 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8311 for (j=0; j<info.channels; j++) {
\r
8312 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8314 in += info.inJump;
\r
8315 out += info.outJump;
\r
8318 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8319 Int32 *in = (Int32 *)inBuffer;
\r
8320 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8321 for (j=0; j<info.channels; j++) {
\r
8322 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8323 //out[info.outOffset[j]] >>= 8;
\r
8325 in += info.inJump;
\r
8326 out += info.outJump;
\r
8329 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8330 Float32 *in = (Float32 *)inBuffer;
\r
8331 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8332 for (j=0; j<info.channels; j++) {
\r
8333 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8335 in += info.inJump;
\r
8336 out += info.outJump;
\r
8339 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8340 Float64 *in = (Float64 *)inBuffer;
\r
8341 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8342 for (j=0; j<info.channels; j++) {
\r
8343 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8345 in += info.inJump;
\r
8346 out += info.outJump;
\r
8350 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8351 Int16 *out = (Int16 *)outBuffer;
\r
8352 if (info.inFormat == RTAUDIO_SINT8) {
\r
8353 signed char *in = (signed char *)inBuffer;
\r
8354 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8355 for (j=0; j<info.channels; j++) {
\r
8356 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8357 out[info.outOffset[j]] <<= 8;
\r
8359 in += info.inJump;
\r
8360 out += info.outJump;
\r
8363 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8364 // Channel compensation and/or (de)interleaving only.
\r
8365 Int16 *in = (Int16 *)inBuffer;
\r
8366 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8367 for (j=0; j<info.channels; j++) {
\r
8368 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8370 in += info.inJump;
\r
8371 out += info.outJump;
\r
8374 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8375 Int24 *in = (Int24 *)inBuffer;
\r
8376 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8377 for (j=0; j<info.channels; j++) {
\r
8378 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8380 in += info.inJump;
\r
8381 out += info.outJump;
\r
8384 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8385 Int32 *in = (Int32 *)inBuffer;
\r
8386 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8387 for (j=0; j<info.channels; j++) {
\r
8388 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8390 in += info.inJump;
\r
8391 out += info.outJump;
\r
8394 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8395 Float32 *in = (Float32 *)inBuffer;
\r
8396 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8397 for (j=0; j<info.channels; j++) {
\r
8398 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8400 in += info.inJump;
\r
8401 out += info.outJump;
\r
8404 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8405 Float64 *in = (Float64 *)inBuffer;
\r
8406 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8407 for (j=0; j<info.channels; j++) {
\r
8408 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8410 in += info.inJump;
\r
8411 out += info.outJump;
\r
8415 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8416 signed char *out = (signed char *)outBuffer;
\r
8417 if (info.inFormat == RTAUDIO_SINT8) {
\r
8418 // Channel compensation and/or (de)interleaving only.
\r
8419 signed char *in = (signed char *)inBuffer;
\r
8420 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8421 for (j=0; j<info.channels; j++) {
\r
8422 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8424 in += info.inJump;
\r
8425 out += info.outJump;
\r
8428 if (info.inFormat == RTAUDIO_SINT16) {
\r
8429 Int16 *in = (Int16 *)inBuffer;
\r
8430 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8431 for (j=0; j<info.channels; j++) {
\r
8432 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8434 in += info.inJump;
\r
8435 out += info.outJump;
\r
8438 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8439 Int24 *in = (Int24 *)inBuffer;
\r
8440 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8441 for (j=0; j<info.channels; j++) {
\r
8442 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8444 in += info.inJump;
\r
8445 out += info.outJump;
\r
8448 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8449 Int32 *in = (Int32 *)inBuffer;
\r
8450 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8451 for (j=0; j<info.channels; j++) {
\r
8452 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8454 in += info.inJump;
\r
8455 out += info.outJump;
\r
8458 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8459 Float32 *in = (Float32 *)inBuffer;
\r
8460 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8461 for (j=0; j<info.channels; j++) {
\r
8462 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8464 in += info.inJump;
\r
8465 out += info.outJump;
\r
8468 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8469 Float64 *in = (Float64 *)inBuffer;
\r
8470 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8471 for (j=0; j<info.channels; j++) {
\r
8472 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8474 in += info.inJump;
\r
8475 out += info.outJump;
\r
8481 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8482 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8483 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8485 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8487 register char val;
\r
8488 register char *ptr;
\r
8491 if ( format == RTAUDIO_SINT16 ) {
\r
8492 for ( unsigned int i=0; i<samples; i++ ) {
\r
8493 // Swap 1st and 2nd bytes.
\r
8495 *(ptr) = *(ptr+1);
\r
8498 // Increment 2 bytes.
\r
8502 else if ( format == RTAUDIO_SINT32 ||
\r
8503 format == RTAUDIO_FLOAT32 ) {
\r
8504 for ( unsigned int i=0; i<samples; i++ ) {
\r
8505 // Swap 1st and 4th bytes.
\r
8507 *(ptr) = *(ptr+3);
\r
8510 // Swap 2nd and 3rd bytes.
\r
8513 *(ptr) = *(ptr+1);
\r
8516 // Increment 3 more bytes.
\r
8520 else if ( format == RTAUDIO_SINT24 ) {
\r
8521 for ( unsigned int i=0; i<samples; i++ ) {
\r
8522 // Swap 1st and 3rd bytes.
\r
8524 *(ptr) = *(ptr+2);
\r
8527 // Increment 2 more bytes.
\r
8531 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8532 for ( unsigned int i=0; i<samples; i++ ) {
\r
8533 // Swap 1st and 8th bytes
\r
8535 *(ptr) = *(ptr+7);
\r
8538 // Swap 2nd and 7th bytes
\r
8541 *(ptr) = *(ptr+5);
\r
8544 // Swap 3rd and 6th bytes
\r
8547 *(ptr) = *(ptr+3);
\r
8550 // Swap 4th and 5th bytes
\r
8553 *(ptr) = *(ptr+1);
\r
8556 // Increment 5 more bytes.
\r
8562 // Indentation settings for Vim and Emacs
\r
8564 // Local Variables:
\r
8565 // c-basic-offset: 2
\r
8566 // indent-tabs-mode: nil
\r
8569 // vim: et sts=2 sw=2
\r