1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2012 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.11
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_PULSE__)
\r
91 apis.push_back( LINUX_PULSE );
\r
93 #if defined(__LINUX_OSS__)
\r
94 apis.push_back( LINUX_OSS );
\r
96 #if defined(__WINDOWS_ASIO__)
\r
97 apis.push_back( WINDOWS_ASIO );
\r
99 #if defined(__WINDOWS_DS__)
\r
100 apis.push_back( WINDOWS_DS );
\r
102 #if defined(__MACOSX_CORE__)
\r
103 apis.push_back( MACOSX_CORE );
\r
105 #if defined(__RTAUDIO_DUMMY__)
\r
106 apis.push_back( RTAUDIO_DUMMY );
\r
110 void RtAudio :: openRtApi( RtAudio::Api api )
\r
116 #if defined(__UNIX_JACK__)
\r
117 if ( api == UNIX_JACK )
\r
118 rtapi_ = new RtApiJack();
\r
120 #if defined(__LINUX_ALSA__)
\r
121 if ( api == LINUX_ALSA )
\r
122 rtapi_ = new RtApiAlsa();
\r
124 #if defined(__LINUX_PULSE__)
\r
125 if ( api == LINUX_PULSE )
\r
126 rtapi_ = new RtApiPulse();
\r
128 #if defined(__LINUX_OSS__)
\r
129 if ( api == LINUX_OSS )
\r
130 rtapi_ = new RtApiOss();
\r
132 #if defined(__WINDOWS_ASIO__)
\r
133 if ( api == WINDOWS_ASIO )
\r
134 rtapi_ = new RtApiAsio();
\r
136 #if defined(__WINDOWS_DS__)
\r
137 if ( api == WINDOWS_DS )
\r
138 rtapi_ = new RtApiDs();
\r
140 #if defined(__MACOSX_CORE__)
\r
141 if ( api == MACOSX_CORE )
\r
142 rtapi_ = new RtApiCore();
\r
144 #if defined(__RTAUDIO_DUMMY__)
\r
145 if ( api == RTAUDIO_DUMMY )
\r
146 rtapi_ = new RtApiDummy();
\r
150 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
154 if ( api != UNSPECIFIED ) {
\r
155 // Attempt to open the specified API.
\r
157 if ( rtapi_ ) return;
\r
159 // No compiled support for specified API value. Issue a debug
\r
160 // warning and continue as if no API was specified.
\r
161 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
164 // Iterate through the compiled APIs and return as soon as we find
\r
165 // one with at least one device or we reach the end of the list.
\r
166 std::vector< RtAudio::Api > apis;
\r
167 getCompiledApi( apis );
\r
168 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
169 openRtApi( apis[i] );
\r
170 if ( rtapi_->getDeviceCount() ) break;
\r
173 if ( rtapi_ ) return;
\r
175 // It should not be possible to get here because the preprocessor
\r
176 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
177 // API-specific definitions are passed to the compiler. But just in
\r
178 // case something weird happens, we'll print out an error message.
\r
179 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
182 RtAudio :: ~RtAudio() throw()
\r
187 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
188 RtAudio::StreamParameters *inputParameters,
\r
189 RtAudioFormat format, unsigned int sampleRate,
\r
190 unsigned int *bufferFrames,
\r
191 RtAudioCallback callback, void *userData,
\r
192 RtAudio::StreamOptions *options )
\r
194 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
195 sampleRate, bufferFrames, callback,
\r
196 userData, options );
\r
199 // *************************************************** //
\r
201 // Public RtApi definitions (see end of file for
\r
202 // private or protected utility functions).
\r
204 // *************************************************** //
\r
208 stream_.state = STREAM_CLOSED;
\r
209 stream_.mode = UNINITIALIZED;
\r
210 stream_.apiHandle = 0;
\r
211 stream_.userBuffer[0] = 0;
\r
212 stream_.userBuffer[1] = 0;
\r
213 MUTEX_INITIALIZE( &stream_.mutex );
\r
214 showWarnings_ = true;
\r
219 MUTEX_DESTROY( &stream_.mutex );
\r
222 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
223 RtAudio::StreamParameters *iParams,
\r
224 RtAudioFormat format, unsigned int sampleRate,
\r
225 unsigned int *bufferFrames,
\r
226 RtAudioCallback callback, void *userData,
\r
227 RtAudio::StreamOptions *options )
\r
229 if ( stream_.state != STREAM_CLOSED ) {
\r
230 errorText_ = "RtApi::openStream: a stream is already open!";
\r
231 error( RtError::INVALID_USE );
\r
234 if ( oParams && oParams->nChannels < 1 ) {
\r
235 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
236 error( RtError::INVALID_USE );
\r
239 if ( iParams && iParams->nChannels < 1 ) {
\r
240 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
241 error( RtError::INVALID_USE );
\r
244 if ( oParams == NULL && iParams == NULL ) {
\r
245 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
246 error( RtError::INVALID_USE );
\r
249 if ( formatBytes(format) == 0 ) {
\r
250 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
251 error( RtError::INVALID_USE );
\r
254 unsigned int nDevices = getDeviceCount();
\r
255 unsigned int oChannels = 0;
\r
257 oChannels = oParams->nChannels;
\r
258 if ( oParams->deviceId >= nDevices ) {
\r
259 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
260 error( RtError::INVALID_USE );
\r
264 unsigned int iChannels = 0;
\r
266 iChannels = iParams->nChannels;
\r
267 if ( iParams->deviceId >= nDevices ) {
\r
268 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
269 error( RtError::INVALID_USE );
\r
276 if ( oChannels > 0 ) {
\r
278 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
279 sampleRate, format, bufferFrames, options );
\r
280 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
283 if ( iChannels > 0 ) {
\r
285 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
286 sampleRate, format, bufferFrames, options );
\r
287 if ( result == false ) {
\r
288 if ( oChannels > 0 ) closeStream();
\r
289 error( RtError::SYSTEM_ERROR );
\r
293 stream_.callbackInfo.callback = (void *) callback;
\r
294 stream_.callbackInfo.userData = userData;
\r
296 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
297 stream_.state = STREAM_STOPPED;
\r
300 unsigned int RtApi :: getDefaultInputDevice( void )
\r
302 // Should be implemented in subclasses if possible.
\r
306 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
308 // Should be implemented in subclasses if possible.
\r
312 void RtApi :: closeStream( void )
\r
314 // MUST be implemented in subclasses!
\r
318 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
319 unsigned int firstChannel, unsigned int sampleRate,
\r
320 RtAudioFormat format, unsigned int *bufferSize,
\r
321 RtAudio::StreamOptions *options )
\r
323 // MUST be implemented in subclasses!
\r
327 void RtApi :: tickStreamTime( void )
\r
329 // Subclasses that do not provide their own implementation of
\r
330 // getStreamTime should call this function once per buffer I/O to
\r
331 // provide basic stream time support.
\r
333 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
335 #if defined( HAVE_GETTIMEOFDAY )
\r
336 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
340 long RtApi :: getStreamLatency( void )
\r
344 long totalLatency = 0;
\r
345 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
346 totalLatency = stream_.latency[0];
\r
347 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
348 totalLatency += stream_.latency[1];
\r
350 return totalLatency;
\r
353 double RtApi :: getStreamTime( void )
\r
357 #if defined( HAVE_GETTIMEOFDAY )
\r
358 // Return a very accurate estimate of the stream time by
\r
359 // adding in the elapsed time since the last tick.
\r
360 struct timeval then;
\r
361 struct timeval now;
\r
363 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
364 return stream_.streamTime;
\r
366 gettimeofday( &now, NULL );
\r
367 then = stream_.lastTickTimestamp;
\r
368 return stream_.streamTime +
\r
369 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
370 (then.tv_sec + 0.000001 * then.tv_usec));
\r
372 return stream_.streamTime;
\r
376 unsigned int RtApi :: getStreamSampleRate( void )
\r
380 return stream_.sampleRate;
\r
384 // *************************************************** //
\r
386 // OS/API-specific methods.
\r
388 // *************************************************** //
\r
390 #if defined(__MACOSX_CORE__)
\r
392 // The OS X CoreAudio API is designed to use a separate callback
\r
393 // procedure for each of its audio devices. A single RtAudio duplex
\r
394 // stream using two different devices is supported here, though it
\r
395 // cannot be guaranteed to always behave correctly because we cannot
\r
396 // synchronize these two callbacks.
\r
398 // A property listener is installed for over/underrun information.
\r
399 // However, no functionality is currently provided to allow property
\r
400 // listeners to trigger user handlers because it is unclear what could
\r
401 // be done if a critical stream parameter (buffer size, sample rate,
\r
402 // device disconnect) notification arrived. The listeners entail
\r
403 // quite a bit of extra code and most likely, a user program wouldn't
\r
404 // be prepared for the result anyway. However, we do provide a flag
\r
405 // to the client callback function to inform of an over/underrun.
\r
407 // A structure to hold various information related to the CoreAudio API
\r
409 struct CoreHandle {
\r
410 AudioDeviceID id[2]; // device ids
\r
411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
412 AudioDeviceIOProcID procId[2];
\r
414 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
415 UInt32 nStreams[2]; // number of streams to use
\r
417 char *deviceBuffer;
\r
418 pthread_cond_t condition;
\r
419 int drainCounter; // Tracks callback counts when draining
\r
420 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
423 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
426 ThreadHandle threadId;
\r
428 RtApiCore:: RtApiCore()
\r
430 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
431 // This is a largely undocumented but absolutely necessary
\r
432 // requirement starting with OS-X 10.6. If not called, queries and
\r
433 // updates to various audio device properties are not handled
\r
435 CFRunLoopRef theRunLoop = NULL;
\r
436 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
437 kAudioObjectPropertyScopeGlobal,
\r
438 kAudioObjectPropertyElementMaster };
\r
439 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
440 if ( result != noErr ) {
\r
441 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
442 error( RtError::WARNING );
\r
447 RtApiCore :: ~RtApiCore()
\r
449 // The subclass destructor gets called before the base class
\r
450 // destructor, so close an existing stream before deallocating
\r
451 // apiDeviceId memory.
\r
452 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
455 unsigned int RtApiCore :: getDeviceCount( void )
\r
457 // Find out how many audio devices there are, if any.
\r
459 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
460 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
461 if ( result != noErr ) {
\r
462 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
463 error( RtError::WARNING );
\r
467 return dataSize / sizeof( AudioDeviceID );
\r
470 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
472 unsigned int nDevices = getDeviceCount();
\r
473 if ( nDevices <= 1 ) return 0;
\r
476 UInt32 dataSize = sizeof( AudioDeviceID );
\r
477 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
478 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
479 if ( result != noErr ) {
\r
480 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
481 error( RtError::WARNING );
\r
485 dataSize *= nDevices;
\r
486 AudioDeviceID deviceList[ nDevices ];
\r
487 property.mSelector = kAudioHardwarePropertyDevices;
\r
488 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
489 if ( result != noErr ) {
\r
490 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
491 error( RtError::WARNING );
\r
495 for ( unsigned int i=0; i<nDevices; i++ )
\r
496 if ( id == deviceList[i] ) return i;
\r
498 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
499 error( RtError::WARNING );
\r
503 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
505 unsigned int nDevices = getDeviceCount();
\r
506 if ( nDevices <= 1 ) return 0;
\r
509 UInt32 dataSize = sizeof( AudioDeviceID );
\r
510 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
511 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
512 if ( result != noErr ) {
\r
513 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
514 error( RtError::WARNING );
\r
518 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
519 AudioDeviceID deviceList[ nDevices ];
\r
520 property.mSelector = kAudioHardwarePropertyDevices;
\r
521 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
522 if ( result != noErr ) {
\r
523 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
524 error( RtError::WARNING );
\r
528 for ( unsigned int i=0; i<nDevices; i++ )
\r
529 if ( id == deviceList[i] ) return i;
\r
531 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
532 error( RtError::WARNING );
\r
536 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
538 RtAudio::DeviceInfo info;
\r
539 info.probed = false;
\r
542 unsigned int nDevices = getDeviceCount();
\r
543 if ( nDevices == 0 ) {
\r
544 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
545 error( RtError::INVALID_USE );
\r
548 if ( device >= nDevices ) {
\r
549 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
550 error( RtError::INVALID_USE );
\r
553 AudioDeviceID deviceList[ nDevices ];
\r
554 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
555 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
556 kAudioObjectPropertyScopeGlobal,
\r
557 kAudioObjectPropertyElementMaster };
\r
558 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
559 0, NULL, &dataSize, (void *) &deviceList );
\r
560 if ( result != noErr ) {
\r
561 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
562 error( RtError::WARNING );
\r
566 AudioDeviceID id = deviceList[ device ];
\r
568 // Get the device name.
\r
570 CFStringRef cfname;
\r
571 dataSize = sizeof( CFStringRef );
\r
572 property.mSelector = kAudioObjectPropertyManufacturer;
\r
573 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
574 if ( result != noErr ) {
\r
575 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
576 errorText_ = errorStream_.str();
\r
577 error( RtError::WARNING );
\r
581 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
582 int length = CFStringGetLength(cfname);
\r
583 char *mname = (char *)malloc(length * 3 + 1);
\r
584 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
585 info.name.append( (const char *)mname, strlen(mname) );
\r
586 info.name.append( ": " );
\r
587 CFRelease( cfname );
\r
590 property.mSelector = kAudioObjectPropertyName;
\r
591 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
592 if ( result != noErr ) {
\r
593 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
594 errorText_ = errorStream_.str();
\r
595 error( RtError::WARNING );
\r
599 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
600 length = CFStringGetLength(cfname);
\r
601 char *name = (char *)malloc(length * 3 + 1);
\r
602 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
603 info.name.append( (const char *)name, strlen(name) );
\r
604 CFRelease( cfname );
\r
607 // Get the output stream "configuration".
\r
608 AudioBufferList *bufferList = nil;
\r
609 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
610 property.mScope = kAudioDevicePropertyScopeOutput;
\r
611 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
613 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
614 if ( result != noErr || dataSize == 0 ) {
\r
615 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
616 errorText_ = errorStream_.str();
\r
617 error( RtError::WARNING );
\r
621 // Allocate the AudioBufferList.
\r
622 bufferList = (AudioBufferList *) malloc( dataSize );
\r
623 if ( bufferList == NULL ) {
\r
624 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
625 error( RtError::WARNING );
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
630 if ( result != noErr || dataSize == 0 ) {
\r
631 free( bufferList );
\r
632 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
633 errorText_ = errorStream_.str();
\r
634 error( RtError::WARNING );
\r
638 // Get output channel information.
\r
639 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
640 for ( i=0; i<nStreams; i++ )
\r
641 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
642 free( bufferList );
\r
644 // Get the input stream "configuration".
\r
645 property.mScope = kAudioDevicePropertyScopeInput;
\r
646 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
647 if ( result != noErr || dataSize == 0 ) {
\r
648 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
649 errorText_ = errorStream_.str();
\r
650 error( RtError::WARNING );
\r
654 // Allocate the AudioBufferList.
\r
655 bufferList = (AudioBufferList *) malloc( dataSize );
\r
656 if ( bufferList == NULL ) {
\r
657 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
658 error( RtError::WARNING );
\r
662 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
663 if (result != noErr || dataSize == 0) {
\r
664 free( bufferList );
\r
665 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
666 errorText_ = errorStream_.str();
\r
667 error( RtError::WARNING );
\r
671 // Get input channel information.
\r
672 nStreams = bufferList->mNumberBuffers;
\r
673 for ( i=0; i<nStreams; i++ )
\r
674 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
675 free( bufferList );
\r
677 // If device opens for both playback and capture, we determine the channels.
\r
678 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
679 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
681 // Probe the device sample rates.
\r
682 bool isInput = false;
\r
683 if ( info.outputChannels == 0 ) isInput = true;
\r
685 // Determine the supported sample rates.
\r
686 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
687 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
688 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
689 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
690 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
691 errorText_ = errorStream_.str();
\r
692 error( RtError::WARNING );
\r
696 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
697 AudioValueRange rangeList[ nRanges ];
\r
698 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
699 if ( result != kAudioHardwareNoError ) {
\r
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
701 errorText_ = errorStream_.str();
\r
702 error( RtError::WARNING );
\r
706 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
707 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
708 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
709 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
712 info.sampleRates.clear();
\r
713 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
714 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
715 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
718 if ( info.sampleRates.size() == 0 ) {
\r
719 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
720 errorText_ = errorStream_.str();
\r
721 error( RtError::WARNING );
\r
725 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
726 // Thus, any other "physical" formats supported by the device are of
\r
727 // no interest to the client.
\r
728 info.nativeFormats = RTAUDIO_FLOAT32;
\r
730 if ( info.outputChannels > 0 )
\r
731 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
732 if ( info.inputChannels > 0 )
\r
733 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
735 info.probed = true;
\r
739 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
740 const AudioTimeStamp* inNow,
\r
741 const AudioBufferList* inInputData,
\r
742 const AudioTimeStamp* inInputTime,
\r
743 AudioBufferList* outOutputData,
\r
744 const AudioTimeStamp* inOutputTime,
\r
745 void* infoPointer )
\r
747 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
749 RtApiCore *object = (RtApiCore *) info->object;
\r
750 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
751 return kAudioHardwareUnspecifiedError;
\r
753 return kAudioHardwareNoError;
\r
756 OSStatus xrunListener( AudioObjectID inDevice,
\r
758 const AudioObjectPropertyAddress properties[],
\r
759 void* handlePointer )
\r
761 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
762 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
763 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
764 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
765 handle->xrun[1] = true;
\r
767 handle->xrun[0] = true;
\r
771 return kAudioHardwareNoError;
\r
774 OSStatus rateListener( AudioObjectID inDevice,
\r
776 const AudioObjectPropertyAddress properties[],
\r
777 void* ratePointer )
\r
780 Float64 *rate = (Float64 *) ratePointer;
\r
781 UInt32 dataSize = sizeof( Float64 );
\r
782 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
783 kAudioObjectPropertyScopeGlobal,
\r
784 kAudioObjectPropertyElementMaster };
\r
785 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
786 return kAudioHardwareNoError;
\r
789 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
790 unsigned int firstChannel, unsigned int sampleRate,
\r
791 RtAudioFormat format, unsigned int *bufferSize,
\r
792 RtAudio::StreamOptions *options )
\r
795 unsigned int nDevices = getDeviceCount();
\r
796 if ( nDevices == 0 ) {
\r
797 // This should not happen because a check is made before this function is called.
\r
798 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
802 if ( device >= nDevices ) {
\r
803 // This should not happen because a check is made before this function is called.
\r
804 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
808 AudioDeviceID deviceList[ nDevices ];
\r
809 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
810 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
811 kAudioObjectPropertyScopeGlobal,
\r
812 kAudioObjectPropertyElementMaster };
\r
813 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
814 0, NULL, &dataSize, (void *) &deviceList );
\r
815 if ( result != noErr ) {
\r
816 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
820 AudioDeviceID id = deviceList[ device ];
\r
822 // Setup for stream mode.
\r
823 bool isInput = false;
\r
824 if ( mode == INPUT ) {
\r
826 property.mScope = kAudioDevicePropertyScopeInput;
\r
829 property.mScope = kAudioDevicePropertyScopeOutput;
\r
831 // Get the stream "configuration".
\r
832 AudioBufferList *bufferList = nil;
\r
834 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
835 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
836 if ( result != noErr || dataSize == 0 ) {
\r
837 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
838 errorText_ = errorStream_.str();
\r
842 // Allocate the AudioBufferList.
\r
843 bufferList = (AudioBufferList *) malloc( dataSize );
\r
844 if ( bufferList == NULL ) {
\r
845 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
849 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
850 if (result != noErr || dataSize == 0) {
\r
851 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
852 errorText_ = errorStream_.str();
\r
856 // Search for one or more streams that contain the desired number of
\r
857 // channels. CoreAudio devices can have an arbitrary number of
\r
858 // streams and each stream can have an arbitrary number of channels.
\r
859 // For each stream, a single buffer of interleaved samples is
\r
860 // provided. RtAudio prefers the use of one stream of interleaved
\r
861 // data or multiple consecutive single-channel streams. However, we
\r
862 // now support multiple consecutive multi-channel streams of
\r
863 // interleaved data as well.
\r
864 UInt32 iStream, offsetCounter = firstChannel;
\r
865 UInt32 nStreams = bufferList->mNumberBuffers;
\r
866 bool monoMode = false;
\r
867 bool foundStream = false;
\r
869 // First check that the device supports the requested number of
\r
871 UInt32 deviceChannels = 0;
\r
872 for ( iStream=0; iStream<nStreams; iStream++ )
\r
873 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
875 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
876 free( bufferList );
\r
877 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
878 errorText_ = errorStream_.str();
\r
882 // Look for a single stream meeting our needs.
\r
883 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
884 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
885 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
886 if ( streamChannels >= channels + offsetCounter ) {
\r
887 firstStream = iStream;
\r
888 channelOffset = offsetCounter;
\r
889 foundStream = true;
\r
892 if ( streamChannels > offsetCounter ) break;
\r
893 offsetCounter -= streamChannels;
\r
896 // If we didn't find a single stream above, then we should be able
\r
897 // to meet the channel specification with multiple streams.
\r
898 if ( foundStream == false ) {
\r
900 offsetCounter = firstChannel;
\r
901 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
902 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
903 if ( streamChannels > offsetCounter ) break;
\r
904 offsetCounter -= streamChannels;
\r
907 firstStream = iStream;
\r
908 channelOffset = offsetCounter;
\r
909 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
911 if ( streamChannels > 1 ) monoMode = false;
\r
912 while ( channelCounter > 0 ) {
\r
913 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
914 if ( streamChannels > 1 ) monoMode = false;
\r
915 channelCounter -= streamChannels;
\r
920 free( bufferList );
\r
922 // Determine the buffer size.
\r
923 AudioValueRange bufferRange;
\r
924 dataSize = sizeof( AudioValueRange );
\r
925 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
926 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
928 if ( result != noErr ) {
\r
929 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
930 errorText_ = errorStream_.str();
\r
934 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
935 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
936 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
938 // Set the buffer size. For multiple streams, I'm assuming we only
\r
939 // need to make this setting for the master channel.
\r
940 UInt32 theSize = (UInt32) *bufferSize;
\r
941 dataSize = sizeof( UInt32 );
\r
942 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
943 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
945 if ( result != noErr ) {
\r
946 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
947 errorText_ = errorStream_.str();
\r
951 // If attempting to setup a duplex stream, the bufferSize parameter
\r
952 // MUST be the same in both directions!
\r
953 *bufferSize = theSize;
\r
954 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
955 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
956 errorText_ = errorStream_.str();
\r
960 stream_.bufferSize = *bufferSize;
\r
961 stream_.nBuffers = 1;
\r
963 // Try to set "hog" mode ... it's not clear to me this is working.
\r
964 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
966 dataSize = sizeof( hog_pid );
\r
967 property.mSelector = kAudioDevicePropertyHogMode;
\r
968 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
969 if ( result != noErr ) {
\r
970 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
971 errorText_ = errorStream_.str();
\r
975 if ( hog_pid != getpid() ) {
\r
976 hog_pid = getpid();
\r
977 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
978 if ( result != noErr ) {
\r
979 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
980 errorText_ = errorStream_.str();
\r
986 // Check and if necessary, change the sample rate for the device.
\r
987 Float64 nominalRate;
\r
988 dataSize = sizeof( Float64 );
\r
989 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
990 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
992 if ( result != noErr ) {
\r
993 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
994 errorText_ = errorStream_.str();
\r
998 // Only change the sample rate if off by more than 1 Hz.
\r
999 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1001 // Set a property listener for the sample rate change
\r
1002 Float64 reportedRate = 0.0;
\r
1003 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1004 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1005 if ( result != noErr ) {
\r
1006 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1007 errorText_ = errorStream_.str();
\r
1011 nominalRate = (Float64) sampleRate;
\r
1012 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1014 if ( result != noErr ) {
\r
1015 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1016 errorText_ = errorStream_.str();
\r
1020 // Now wait until the reported nominal rate is what we just set.
\r
1021 UInt32 microCounter = 0;
\r
1022 while ( reportedRate != nominalRate ) {
\r
1023 microCounter += 5000;
\r
1024 if ( microCounter > 5000000 ) break;
\r
1028 // Remove the property listener.
\r
1029 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1031 if ( microCounter > 5000000 ) {
\r
1032 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1033 errorText_ = errorStream_.str();
\r
1038 // Now set the stream format for all streams. Also, check the
\r
1039 // physical format of the device and change that if necessary.
\r
1040 AudioStreamBasicDescription description;
\r
1041 dataSize = sizeof( AudioStreamBasicDescription );
\r
1042 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1043 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1044 if ( result != noErr ) {
\r
1045 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1046 errorText_ = errorStream_.str();
\r
1050 // Set the sample rate and data format id. However, only make the
\r
1051 // change if the sample rate is not within 1.0 of the desired
\r
1052 // rate and the format is not linear pcm.
\r
1053 bool updateFormat = false;
\r
1054 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1055 description.mSampleRate = (Float64) sampleRate;
\r
1056 updateFormat = true;
\r
1059 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1060 description.mFormatID = kAudioFormatLinearPCM;
\r
1061 updateFormat = true;
\r
1064 if ( updateFormat ) {
\r
1065 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1066 if ( result != noErr ) {
\r
1067 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1068 errorText_ = errorStream_.str();
\r
1073 // Now check the physical format.
\r
1074 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1075 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1076 if ( result != noErr ) {
\r
1077 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1078 errorText_ = errorStream_.str();
\r
1082 //std::cout << "Current physical stream format:" << std::endl;
\r
1083 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1084 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1085 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1086 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1088 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1089 description.mFormatID = kAudioFormatLinearPCM;
\r
1090 //description.mSampleRate = (Float64) sampleRate;
\r
1091 AudioStreamBasicDescription testDescription = description;
\r
1092 UInt32 formatFlags;
\r
1094 // We'll try higher bit rates first and then work our way down.
\r
1095 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1096 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1097 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1098 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1099 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1100 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1101 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1102 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1103 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1104 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1105 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1106 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1107 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1109 bool setPhysicalFormat = false;
\r
1110 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1111 testDescription = description;
\r
1112 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1113 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1114 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1115 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1117 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1118 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1119 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1120 if ( result == noErr ) {
\r
1121 setPhysicalFormat = true;
\r
1122 //std::cout << "Updated physical stream format:" << std::endl;
\r
1123 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1124 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1125 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1126 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1131 if ( !setPhysicalFormat ) {
\r
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1133 errorText_ = errorStream_.str();
\r
1136 } // done setting virtual/physical formats.
\r
1138 // Get the stream / device latency.
\r
1140 dataSize = sizeof( UInt32 );
\r
1141 property.mSelector = kAudioDevicePropertyLatency;
\r
1142 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1144 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1147 errorText_ = errorStream_.str();
\r
1148 error( RtError::WARNING );
\r
1152 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1153 // always be presented in native-endian format, so we should never
\r
1154 // need to byte swap.
\r
1155 stream_.doByteSwap[mode] = false;
\r
1157 // From the CoreAudio documentation, PCM data must be supplied as
\r
1159 stream_.userFormat = format;
\r
1160 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1162 if ( streamCount == 1 )
\r
1163 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1164 else // multiple streams
\r
1165 stream_.nDeviceChannels[mode] = channels;
\r
1166 stream_.nUserChannels[mode] = channels;
\r
1167 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1168 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1169 else stream_.userInterleaved = true;
\r
1170 stream_.deviceInterleaved[mode] = true;
\r
1171 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1173 // Set flags for buffer conversion.
\r
1174 stream_.doConvertBuffer[mode] = false;
\r
1175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1176 stream_.doConvertBuffer[mode] = true;
\r
1177 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1178 stream_.doConvertBuffer[mode] = true;
\r
1179 if ( streamCount == 1 ) {
\r
1180 if ( stream_.nUserChannels[mode] > 1 &&
\r
1181 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1182 stream_.doConvertBuffer[mode] = true;
\r
1184 else if ( monoMode && stream_.userInterleaved )
\r
1185 stream_.doConvertBuffer[mode] = true;
\r
1187 // Allocate our CoreHandle structure for the stream.
\r
1188 CoreHandle *handle = 0;
\r
1189 if ( stream_.apiHandle == 0 ) {
\r
1191 handle = new CoreHandle;
\r
1193 catch ( std::bad_alloc& ) {
\r
1194 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1198 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1199 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1202 stream_.apiHandle = (void *) handle;
\r
1205 handle = (CoreHandle *) stream_.apiHandle;
\r
1206 handle->iStream[mode] = firstStream;
\r
1207 handle->nStreams[mode] = streamCount;
\r
1208 handle->id[mode] = id;
\r
1210 // Allocate necessary internal buffers.
\r
1211 unsigned long bufferBytes;
\r
1212 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1213 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1214 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1215 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1216 if ( stream_.userBuffer[mode] == NULL ) {
\r
1217 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1221 // If possible, we will make use of the CoreAudio stream buffers as
\r
1222 // "device buffers". However, we can't do this if using multiple
\r
1224 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1226 bool makeBuffer = true;
\r
1227 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1228 if ( mode == INPUT ) {
\r
1229 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1230 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1231 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1235 if ( makeBuffer ) {
\r
1236 bufferBytes *= *bufferSize;
\r
1237 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1238 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1239 if ( stream_.deviceBuffer == NULL ) {
\r
1240 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1246 stream_.sampleRate = sampleRate;
\r
1247 stream_.device[mode] = device;
\r
1248 stream_.state = STREAM_STOPPED;
\r
1249 stream_.callbackInfo.object = (void *) this;
\r
1251 // Setup the buffer conversion information structure.
\r
1252 if ( stream_.doConvertBuffer[mode] ) {
\r
1253 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1254 else setConvertInfo( mode, channelOffset );
\r
1257 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1258 // Only one callback procedure per device.
\r
1259 stream_.mode = DUPLEX;
\r
1261 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1262 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1264 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1265 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1267 if ( result != noErr ) {
\r
1268 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1269 errorText_ = errorStream_.str();
\r
1272 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1273 stream_.mode = DUPLEX;
\r
1275 stream_.mode = mode;
\r
1278 // Setup the device property listener for over/underload.
\r
1279 property.mSelector = kAudioDeviceProcessorOverload;
\r
1280 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1286 pthread_cond_destroy( &handle->condition );
\r
1288 stream_.apiHandle = 0;
\r
1291 for ( int i=0; i<2; i++ ) {
\r
1292 if ( stream_.userBuffer[i] ) {
\r
1293 free( stream_.userBuffer[i] );
\r
1294 stream_.userBuffer[i] = 0;
\r
1298 if ( stream_.deviceBuffer ) {
\r
1299 free( stream_.deviceBuffer );
\r
1300 stream_.deviceBuffer = 0;
\r
1306 void RtApiCore :: closeStream( void )
\r
1308 if ( stream_.state == STREAM_CLOSED ) {
\r
1309 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1310 error( RtError::WARNING );
\r
1314 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1315 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1316 if ( stream_.state == STREAM_RUNNING )
\r
1317 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1321 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1322 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1326 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1327 if ( stream_.state == STREAM_RUNNING )
\r
1328 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1329 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1330 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1332 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1333 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1337 for ( int i=0; i<2; i++ ) {
\r
1338 if ( stream_.userBuffer[i] ) {
\r
1339 free( stream_.userBuffer[i] );
\r
1340 stream_.userBuffer[i] = 0;
\r
1344 if ( stream_.deviceBuffer ) {
\r
1345 free( stream_.deviceBuffer );
\r
1346 stream_.deviceBuffer = 0;
\r
1349 // Destroy pthread condition variable.
\r
1350 pthread_cond_destroy( &handle->condition );
\r
1352 stream_.apiHandle = 0;
\r
1354 stream_.mode = UNINITIALIZED;
\r
1355 stream_.state = STREAM_CLOSED;
\r
1358 void RtApiCore :: startStream( void )
\r
1361 if ( stream_.state == STREAM_RUNNING ) {
\r
1362 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1363 error( RtError::WARNING );
\r
1367 OSStatus result = noErr;
\r
1368 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1369 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1371 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1372 if ( result != noErr ) {
\r
1373 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1374 errorText_ = errorStream_.str();
\r
1379 if ( stream_.mode == INPUT ||
\r
1380 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1382 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1383 if ( result != noErr ) {
\r
1384 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1385 errorText_ = errorStream_.str();
\r
1390 handle->drainCounter = 0;
\r
1391 handle->internalDrain = false;
\r
1392 stream_.state = STREAM_RUNNING;
\r
1395 if ( result == noErr ) return;
\r
1396 error( RtError::SYSTEM_ERROR );
\r
1399 void RtApiCore :: stopStream( void )
\r
1402 if ( stream_.state == STREAM_STOPPED ) {
\r
1403 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1404 error( RtError::WARNING );
\r
1408 OSStatus result = noErr;
\r
1409 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1412 if ( handle->drainCounter == 0 ) {
\r
1413 handle->drainCounter = 2;
\r
1414 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1417 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1418 if ( result != noErr ) {
\r
1419 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1420 errorText_ = errorStream_.str();
\r
1425 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1427 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1428 if ( result != noErr ) {
\r
1429 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1430 errorText_ = errorStream_.str();
\r
1435 stream_.state = STREAM_STOPPED;
\r
1438 if ( result == noErr ) return;
\r
1439 error( RtError::SYSTEM_ERROR );
\r
1442 void RtApiCore :: abortStream( void )
\r
1445 if ( stream_.state == STREAM_STOPPED ) {
\r
1446 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1447 error( RtError::WARNING );
\r
1451 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1452 handle->drainCounter = 2;
\r
1457 // This function will be called by a spawned thread when the user
\r
1458 // callback function signals that the stream should be stopped or
\r
1459 // aborted. It is better to handle it this way because the
\r
1460 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1461 // function is called.
\r
1462 extern "C" void *coreStopStream( void *ptr )
\r
1464 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1465 RtApiCore *object = (RtApiCore *) info->object;
\r
1467 object->stopStream();
\r
1468 pthread_exit( NULL );
\r
1471 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1472 const AudioBufferList *inBufferList,
\r
1473 const AudioBufferList *outBufferList )
\r
1475 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1476 if ( stream_.state == STREAM_CLOSED ) {
\r
1477 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1478 error( RtError::WARNING );
\r
1482 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1483 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1485 // Check if we were draining the stream and signal is finished.
\r
1486 if ( handle->drainCounter > 3 ) {
\r
1488 stream_.state = STREAM_STOPPING;
\r
1489 if ( handle->internalDrain == true )
\r
1490 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1491 else // external call to stopStream()
\r
1492 pthread_cond_signal( &handle->condition );
\r
1496 AudioDeviceID outputDevice = handle->id[0];
\r
1498 // Invoke user callback to get fresh output data UNLESS we are
\r
1499 // draining stream or duplex mode AND the input/output devices are
\r
1500 // different AND this function is called for the input device.
\r
1501 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1502 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1503 double streamTime = getStreamTime();
\r
1504 RtAudioStreamStatus status = 0;
\r
1505 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1506 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1507 handle->xrun[0] = false;
\r
1509 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1510 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1511 handle->xrun[1] = false;
\r
1514 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1515 stream_.bufferSize, streamTime, status, info->userData );
\r
1516 if ( cbReturnValue == 2 ) {
\r
1517 stream_.state = STREAM_STOPPING;
\r
1518 handle->drainCounter = 2;
\r
1522 else if ( cbReturnValue == 1 ) {
\r
1523 handle->drainCounter = 1;
\r
1524 handle->internalDrain = true;
\r
1528 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1530 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1532 if ( handle->nStreams[0] == 1 ) {
\r
1533 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1535 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1537 else { // fill multiple streams with zeros
\r
1538 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1539 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1541 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1545 else if ( handle->nStreams[0] == 1 ) {
\r
1546 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1547 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1548 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1550 else { // copy from user buffer
\r
1551 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1552 stream_.userBuffer[0],
\r
1553 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1556 else { // fill multiple streams
\r
1557 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1558 if ( stream_.doConvertBuffer[0] ) {
\r
1559 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1560 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1563 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1564 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1565 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1566 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1567 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1570 else { // fill multiple multi-channel streams with interleaved data
\r
1571 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1572 Float32 *out, *in;
\r
1574 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1575 UInt32 inChannels = stream_.nUserChannels[0];
\r
1576 if ( stream_.doConvertBuffer[0] ) {
\r
1577 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1578 inChannels = stream_.nDeviceChannels[0];
\r
1581 if ( inInterleaved ) inOffset = 1;
\r
1582 else inOffset = stream_.bufferSize;
\r
1584 channelsLeft = inChannels;
\r
1585 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1587 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1588 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1591 // Account for possible channel offset in first stream
\r
1592 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1593 streamChannels -= stream_.channelOffset[0];
\r
1594 outJump = stream_.channelOffset[0];
\r
1598 // Account for possible unfilled channels at end of the last stream
\r
1599 if ( streamChannels > channelsLeft ) {
\r
1600 outJump = streamChannels - channelsLeft;
\r
1601 streamChannels = channelsLeft;
\r
1604 // Determine input buffer offsets and skips
\r
1605 if ( inInterleaved ) {
\r
1606 inJump = inChannels;
\r
1607 in += inChannels - channelsLeft;
\r
1611 in += (inChannels - channelsLeft) * inOffset;
\r
1614 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1615 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1616 *out++ = in[j*inOffset];
\r
1621 channelsLeft -= streamChannels;
\r
1626 if ( handle->drainCounter ) {
\r
1627 handle->drainCounter++;
\r
1632 AudioDeviceID inputDevice;
\r
1633 inputDevice = handle->id[1];
\r
1634 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1636 if ( handle->nStreams[1] == 1 ) {
\r
1637 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1638 convertBuffer( stream_.userBuffer[1],
\r
1639 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1640 stream_.convertInfo[1] );
\r
1642 else { // copy to user buffer
\r
1643 memcpy( stream_.userBuffer[1],
\r
1644 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1645 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1648 else { // read from multiple streams
\r
1649 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1650 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1652 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1653 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1655 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1656 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1659 else { // read from multiple multi-channel streams
\r
1660 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1661 Float32 *out, *in;
\r
1663 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1664 UInt32 outChannels = stream_.nUserChannels[1];
\r
1665 if ( stream_.doConvertBuffer[1] ) {
\r
1666 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1667 outChannels = stream_.nDeviceChannels[1];
\r
1670 if ( outInterleaved ) outOffset = 1;
\r
1671 else outOffset = stream_.bufferSize;
\r
1673 channelsLeft = outChannels;
\r
1674 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1676 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1677 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1680 // Account for possible channel offset in first stream
\r
1681 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1682 streamChannels -= stream_.channelOffset[1];
\r
1683 inJump = stream_.channelOffset[1];
\r
1687 // Account for possible unread channels at end of the last stream
\r
1688 if ( streamChannels > channelsLeft ) {
\r
1689 inJump = streamChannels - channelsLeft;
\r
1690 streamChannels = channelsLeft;
\r
1693 // Determine output buffer offsets and skips
\r
1694 if ( outInterleaved ) {
\r
1695 outJump = outChannels;
\r
1696 out += outChannels - channelsLeft;
\r
1700 out += (outChannels - channelsLeft) * outOffset;
\r
1703 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1704 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1705 out[j*outOffset] = *in++;
\r
1710 channelsLeft -= streamChannels;
\r
1714 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1715 convertBuffer( stream_.userBuffer[1],
\r
1716 stream_.deviceBuffer,
\r
1717 stream_.convertInfo[1] );
\r
1723 //MUTEX_UNLOCK( &stream_.mutex );
\r
1725 RtApi::tickStreamTime();
\r
1729 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1733 case kAudioHardwareNotRunningError:
\r
1734 return "kAudioHardwareNotRunningError";
\r
1736 case kAudioHardwareUnspecifiedError:
\r
1737 return "kAudioHardwareUnspecifiedError";
\r
1739 case kAudioHardwareUnknownPropertyError:
\r
1740 return "kAudioHardwareUnknownPropertyError";
\r
1742 case kAudioHardwareBadPropertySizeError:
\r
1743 return "kAudioHardwareBadPropertySizeError";
\r
1745 case kAudioHardwareIllegalOperationError:
\r
1746 return "kAudioHardwareIllegalOperationError";
\r
1748 case kAudioHardwareBadObjectError:
\r
1749 return "kAudioHardwareBadObjectError";
\r
1751 case kAudioHardwareBadDeviceError:
\r
1752 return "kAudioHardwareBadDeviceError";
\r
1754 case kAudioHardwareBadStreamError:
\r
1755 return "kAudioHardwareBadStreamError";
\r
1757 case kAudioHardwareUnsupportedOperationError:
\r
1758 return "kAudioHardwareUnsupportedOperationError";
\r
1760 case kAudioDeviceUnsupportedFormatError:
\r
1761 return "kAudioDeviceUnsupportedFormatError";
\r
1763 case kAudioDevicePermissionsError:
\r
1764 return "kAudioDevicePermissionsError";
\r
1767 return "CoreAudio unknown error";
\r
1771 //******************** End of __MACOSX_CORE__ *********************//
\r
1774 #if defined(__UNIX_JACK__)
\r
1776 // JACK is a low-latency audio server, originally written for the
\r
1777 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1778 // connect a number of different applications to an audio device, as
\r
1779 // well as allowing them to share audio between themselves.
\r
1781 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1782 // have ports connected to the server. The JACK server is typically
\r
1783 // started in a terminal as follows:
\r
1785 // .jackd -d alsa -d hw:0
\r
1787 // or through an interface program such as qjackctl. Many of the
\r
1788 // parameters normally set for a stream are fixed by the JACK server
\r
1789 // and can be specified when the JACK server is started. In
\r
1792 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1794 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1795 // frames, and number of buffers = 4. Once the server is running, it
\r
1796 // is not possible to override these values. If the values are not
\r
1797 // specified in the command-line, the JACK server uses default values.
\r
1799 // The JACK server does not have to be running when an instance of
\r
1800 // RtApiJack is created, though the function getDeviceCount() will
\r
1801 // report 0 devices found until JACK has been started. When no
\r
1802 // devices are available (i.e., the JACK server is not running), a
\r
1803 // stream cannot be opened.
\r
1805 #include <jack/jack.h>
\r
1806 #include <unistd.h>
\r
1809 // A structure to hold various information related to the Jack API
\r
1810 // implementation.
\r
1811 struct JackHandle {
\r
1812 jack_client_t *client;
\r
1813 jack_port_t **ports[2];
\r
1814 std::string deviceName[2];
\r
1816 pthread_cond_t condition;
\r
1817 int drainCounter; // Tracks callback counts when draining
\r
1818 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1821 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1824 ThreadHandle threadId;
\r
1825 void jackSilentError( const char * ) {};
\r
1827 RtApiJack :: RtApiJack()
\r
1829 // Nothing to do here.
\r
1830 #if !defined(__RTAUDIO_DEBUG__)
\r
1831 // Turn off Jack's internal error reporting.
\r
1832 jack_set_error_function( &jackSilentError );
\r
1836 RtApiJack :: ~RtApiJack()
\r
1838 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1841 unsigned int RtApiJack :: getDeviceCount( void )
\r
1843 // See if we can become a jack client.
\r
1844 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1845 jack_status_t *status = NULL;
\r
1846 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1847 if ( client == 0 ) return 0;
\r
1849 const char **ports;
\r
1850 std::string port, previousPort;
\r
1851 unsigned int nChannels = 0, nDevices = 0;
\r
1852 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1854 // Parse the port names up to the first colon (:).
\r
1855 size_t iColon = 0;
\r
1857 port = (char *) ports[ nChannels ];
\r
1858 iColon = port.find(":");
\r
1859 if ( iColon != std::string::npos ) {
\r
1860 port = port.substr( 0, iColon + 1 );
\r
1861 if ( port != previousPort ) {
\r
1863 previousPort = port;
\r
1866 } while ( ports[++nChannels] );
\r
1870 jack_client_close( client );
\r
1874 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1876 RtAudio::DeviceInfo info;
\r
1877 info.probed = false;
\r
1879 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1880 jack_status_t *status = NULL;
\r
1881 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1882 if ( client == 0 ) {
\r
1883 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1884 error( RtError::WARNING );
\r
1888 const char **ports;
\r
1889 std::string port, previousPort;
\r
1890 unsigned int nPorts = 0, nDevices = 0;
\r
1891 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1893 // Parse the port names up to the first colon (:).
\r
1894 size_t iColon = 0;
\r
1896 port = (char *) ports[ nPorts ];
\r
1897 iColon = port.find(":");
\r
1898 if ( iColon != std::string::npos ) {
\r
1899 port = port.substr( 0, iColon );
\r
1900 if ( port != previousPort ) {
\r
1901 if ( nDevices == device ) info.name = port;
\r
1903 previousPort = port;
\r
1906 } while ( ports[++nPorts] );
\r
1910 if ( device >= nDevices ) {
\r
1911 jack_client_close( client );
\r
1912 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1913 error( RtError::INVALID_USE );
\r
1916 // Get the current jack server sample rate.
\r
1917 info.sampleRates.clear();
\r
1918 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1920 // Count the available ports containing the client name as device
\r
1921 // channels. Jack "input ports" equal RtAudio output channels.
\r
1922 unsigned int nChannels = 0;
\r
1923 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1925 while ( ports[ nChannels ] ) nChannels++;
\r
1927 info.outputChannels = nChannels;
\r
1930 // Jack "output ports" equal RtAudio input channels.
\r
1932 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1934 while ( ports[ nChannels ] ) nChannels++;
\r
1936 info.inputChannels = nChannels;
\r
1939 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1940 jack_client_close(client);
\r
1941 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1942 error( RtError::WARNING );
\r
1946 // If device opens for both playback and capture, we determine the channels.
\r
1947 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1948 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1950 // Jack always uses 32-bit floats.
\r
1951 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1953 // Jack doesn't provide default devices so we'll use the first available one.
\r
1954 if ( device == 0 && info.outputChannels > 0 )
\r
1955 info.isDefaultOutput = true;
\r
1956 if ( device == 0 && info.inputChannels > 0 )
\r
1957 info.isDefaultInput = true;
\r
1959 jack_client_close(client);
\r
1960 info.probed = true;
\r
1964 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1966 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1968 RtApiJack *object = (RtApiJack *) info->object;
\r
1969 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1974 // This function will be called by a spawned thread when the Jack
\r
1975 // server signals that it is shutting down. It is necessary to handle
\r
1976 // it this way because the jackShutdown() function must return before
\r
1977 // the jack_deactivate() function (in closeStream()) will return.
\r
1978 extern "C" void *jackCloseStream( void *ptr )
\r
1980 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1981 RtApiJack *object = (RtApiJack *) info->object;
\r
1983 object->closeStream();
\r
1985 pthread_exit( NULL );
\r
1987 void jackShutdown( void *infoPointer )
\r
1989 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1990 RtApiJack *object = (RtApiJack *) info->object;
\r
1992 // Check current stream state. If stopped, then we'll assume this
\r
1993 // was called as a result of a call to RtApiJack::stopStream (the
\r
1994 // deactivation of a client handle causes this function to be called).
\r
1995 // If not, we'll assume the Jack server is shutting down or some
\r
1996 // other problem occurred and we should close the stream.
\r
1997 if ( object->isStreamRunning() == false ) return;
\r
1999 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2000 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2003 int jackXrun( void *infoPointer )
\r
2005 JackHandle *handle = (JackHandle *) infoPointer;
\r
2007 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2008 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2013 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2014 unsigned int firstChannel, unsigned int sampleRate,
\r
2015 RtAudioFormat format, unsigned int *bufferSize,
\r
2016 RtAudio::StreamOptions *options )
\r
2018 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2020 // Look for jack server and try to become a client (only do once per stream).
\r
2021 jack_client_t *client = 0;
\r
2022 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2023 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2024 jack_status_t *status = NULL;
\r
2025 if ( options && !options->streamName.empty() )
\r
2026 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2028 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2029 if ( client == 0 ) {
\r
2030 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2031 error( RtError::WARNING );
\r
2036 // The handle must have been created on an earlier pass.
\r
2037 client = handle->client;
\r
2040 const char **ports;
\r
2041 std::string port, previousPort, deviceName;
\r
2042 unsigned int nPorts = 0, nDevices = 0;
\r
2043 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2045 // Parse the port names up to the first colon (:).
\r
2046 size_t iColon = 0;
\r
2048 port = (char *) ports[ nPorts ];
\r
2049 iColon = port.find(":");
\r
2050 if ( iColon != std::string::npos ) {
\r
2051 port = port.substr( 0, iColon );
\r
2052 if ( port != previousPort ) {
\r
2053 if ( nDevices == device ) deviceName = port;
\r
2055 previousPort = port;
\r
2058 } while ( ports[++nPorts] );
\r
2062 if ( device >= nDevices ) {
\r
2063 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2067 // Count the available ports containing the client name as device
\r
2068 // channels. Jack "input ports" equal RtAudio output channels.
\r
2069 unsigned int nChannels = 0;
\r
2070 unsigned long flag = JackPortIsInput;
\r
2071 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2072 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2074 while ( ports[ nChannels ] ) nChannels++;
\r
2078 // Compare the jack ports for specified client to the requested number of channels.
\r
2079 if ( nChannels < (channels + firstChannel) ) {
\r
2080 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2081 errorText_ = errorStream_.str();
\r
2085 // Check the jack server sample rate.
\r
2086 unsigned int jackRate = jack_get_sample_rate( client );
\r
2087 if ( sampleRate != jackRate ) {
\r
2088 jack_client_close( client );
\r
2089 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2090 errorText_ = errorStream_.str();
\r
2093 stream_.sampleRate = jackRate;
\r
2095 // Get the latency of the JACK port.
\r
2096 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2097 if ( ports[ firstChannel ] ) {
\r
2098 // Added by Ge Wang
\r
2099 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2100 // the range (usually the min and max are equal)
\r
2101 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2102 // get the latency range
\r
2103 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2104 // be optimistic, use the min!
\r
2105 stream_.latency[mode] = latrange.min;
\r
2106 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2110 // The jack server always uses 32-bit floating-point data.
\r
2111 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2112 stream_.userFormat = format;
\r
2114 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2115 else stream_.userInterleaved = true;
\r
2117 // Jack always uses non-interleaved buffers.
\r
2118 stream_.deviceInterleaved[mode] = false;
\r
2120 // Jack always provides host byte-ordered data.
\r
2121 stream_.doByteSwap[mode] = false;
\r
2123 // Get the buffer size. The buffer size and number of buffers
\r
2124 // (periods) is set when the jack server is started.
\r
2125 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2126 *bufferSize = stream_.bufferSize;
\r
2128 stream_.nDeviceChannels[mode] = channels;
\r
2129 stream_.nUserChannels[mode] = channels;
\r
2131 // Set flags for buffer conversion.
\r
2132 stream_.doConvertBuffer[mode] = false;
\r
2133 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2134 stream_.doConvertBuffer[mode] = true;
\r
2135 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2136 stream_.nUserChannels[mode] > 1 )
\r
2137 stream_.doConvertBuffer[mode] = true;
\r
2139 // Allocate our JackHandle structure for the stream.
\r
2140 if ( handle == 0 ) {
\r
2142 handle = new JackHandle;
\r
2144 catch ( std::bad_alloc& ) {
\r
2145 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2149 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2150 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2153 stream_.apiHandle = (void *) handle;
\r
2154 handle->client = client;
\r
2156 handle->deviceName[mode] = deviceName;
\r
2158 // Allocate necessary internal buffers.
\r
2159 unsigned long bufferBytes;
\r
2160 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2161 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2162 if ( stream_.userBuffer[mode] == NULL ) {
\r
2163 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2167 if ( stream_.doConvertBuffer[mode] ) {
\r
2169 bool makeBuffer = true;
\r
2170 if ( mode == OUTPUT )
\r
2171 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2172 else { // mode == INPUT
\r
2173 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2174 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2175 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2176 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2180 if ( makeBuffer ) {
\r
2181 bufferBytes *= *bufferSize;
\r
2182 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2183 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2184 if ( stream_.deviceBuffer == NULL ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2191 // Allocate memory for the Jack ports (channels) identifiers.
\r
2192 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2193 if ( handle->ports[mode] == NULL ) {
\r
2194 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2198 stream_.device[mode] = device;
\r
2199 stream_.channelOffset[mode] = firstChannel;
\r
2200 stream_.state = STREAM_STOPPED;
\r
2201 stream_.callbackInfo.object = (void *) this;
\r
2203 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2204 // We had already set up the stream for output.
\r
2205 stream_.mode = DUPLEX;
\r
2207 stream_.mode = mode;
\r
2208 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2209 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2210 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2213 // Register our ports.
\r
2215 if ( mode == OUTPUT ) {
\r
2216 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2217 snprintf( label, 64, "outport %d", i );
\r
2218 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2219 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2223 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2224 snprintf( label, 64, "inport %d", i );
\r
2225 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2226 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2230 // Setup the buffer conversion information structure. We don't use
\r
2231 // buffers to do channel offsets, so we override that parameter
\r
2233 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2239 pthread_cond_destroy( &handle->condition );
\r
2240 jack_client_close( handle->client );
\r
2242 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2243 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2246 stream_.apiHandle = 0;
\r
2249 for ( int i=0; i<2; i++ ) {
\r
2250 if ( stream_.userBuffer[i] ) {
\r
2251 free( stream_.userBuffer[i] );
\r
2252 stream_.userBuffer[i] = 0;
\r
2256 if ( stream_.deviceBuffer ) {
\r
2257 free( stream_.deviceBuffer );
\r
2258 stream_.deviceBuffer = 0;
\r
2264 void RtApiJack :: closeStream( void )
\r
2266 if ( stream_.state == STREAM_CLOSED ) {
\r
2267 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2268 error( RtError::WARNING );
\r
2272 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2275 if ( stream_.state == STREAM_RUNNING )
\r
2276 jack_deactivate( handle->client );
\r
2278 jack_client_close( handle->client );
\r
2282 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2283 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2284 pthread_cond_destroy( &handle->condition );
\r
2286 stream_.apiHandle = 0;
\r
2289 for ( int i=0; i<2; i++ ) {
\r
2290 if ( stream_.userBuffer[i] ) {
\r
2291 free( stream_.userBuffer[i] );
\r
2292 stream_.userBuffer[i] = 0;
\r
2296 if ( stream_.deviceBuffer ) {
\r
2297 free( stream_.deviceBuffer );
\r
2298 stream_.deviceBuffer = 0;
\r
2301 stream_.mode = UNINITIALIZED;
\r
2302 stream_.state = STREAM_CLOSED;
\r
2305 void RtApiJack :: startStream( void )
\r
2308 if ( stream_.state == STREAM_RUNNING ) {
\r
2309 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2310 error( RtError::WARNING );
\r
2314 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2315 int result = jack_activate( handle->client );
\r
2317 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2321 const char **ports;
\r
2323 // Get the list of available ports.
\r
2324 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2326 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2327 if ( ports == NULL) {
\r
2328 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2332 // Now make the port connections. Since RtAudio wasn't designed to
\r
2333 // allow the user to select particular channels of a device, we'll
\r
2334 // just open the first "nChannels" ports with offset.
\r
2335 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2337 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2338 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2341 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2348 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2350 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2351 if ( ports == NULL) {
\r
2352 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2356 // Now make the port connections. See note above.
\r
2357 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2359 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2360 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2363 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2370 handle->drainCounter = 0;
\r
2371 handle->internalDrain = false;
\r
2372 stream_.state = STREAM_RUNNING;
\r
2375 if ( result == 0 ) return;
\r
2376 error( RtError::SYSTEM_ERROR );
\r
2379 void RtApiJack :: stopStream( void )
\r
2382 if ( stream_.state == STREAM_STOPPED ) {
\r
2383 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2384 error( RtError::WARNING );
\r
2388 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2389 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2391 if ( handle->drainCounter == 0 ) {
\r
2392 handle->drainCounter = 2;
\r
2393 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2397 jack_deactivate( handle->client );
\r
2398 stream_.state = STREAM_STOPPED;
\r
2401 void RtApiJack :: abortStream( void )
\r
2404 if ( stream_.state == STREAM_STOPPED ) {
\r
2405 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2406 error( RtError::WARNING );
\r
2410 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2411 handle->drainCounter = 2;
\r
2416 // This function will be called by a spawned thread when the user
\r
2417 // callback function signals that the stream should be stopped or
\r
2418 // aborted. It is necessary to handle it this way because the
\r
2419 // callbackEvent() function must return before the jack_deactivate()
\r
2420 // function will return.
\r
2421 extern "C" void *jackStopStream( void *ptr )
\r
2423 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2424 RtApiJack *object = (RtApiJack *) info->object;
\r
2426 object->stopStream();
\r
2427 pthread_exit( NULL );
\r
2430 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2432 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2433 if ( stream_.state == STREAM_CLOSED ) {
\r
2434 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2435 error( RtError::WARNING );
\r
2438 if ( stream_.bufferSize != nframes ) {
\r
2439 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2440 error( RtError::WARNING );
\r
2444 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2445 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2447 // Check if we were draining the stream and signal is finished.
\r
2448 if ( handle->drainCounter > 3 ) {
\r
2450 stream_.state = STREAM_STOPPING;
\r
2451 if ( handle->internalDrain == true )
\r
2452 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2454 pthread_cond_signal( &handle->condition );
\r
2458 // Invoke user callback first, to get fresh output data.
\r
2459 if ( handle->drainCounter == 0 ) {
\r
2460 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2461 double streamTime = getStreamTime();
\r
2462 RtAudioStreamStatus status = 0;
\r
2463 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2464 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2465 handle->xrun[0] = false;
\r
2467 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2468 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2469 handle->xrun[1] = false;
\r
2471 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2472 stream_.bufferSize, streamTime, status, info->userData );
\r
2473 if ( cbReturnValue == 2 ) {
\r
2474 stream_.state = STREAM_STOPPING;
\r
2475 handle->drainCounter = 2;
\r
2477 pthread_create( &id, NULL, jackStopStream, info );
\r
2480 else if ( cbReturnValue == 1 ) {
\r
2481 handle->drainCounter = 1;
\r
2482 handle->internalDrain = true;
\r
2486 jack_default_audio_sample_t *jackbuffer;
\r
2487 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2490 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2492 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2493 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2494 memset( jackbuffer, 0, bufferBytes );
\r
2498 else if ( stream_.doConvertBuffer[0] ) {
\r
2500 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2502 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2503 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2504 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2507 else { // no buffer conversion
\r
2508 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2509 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2510 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2514 if ( handle->drainCounter ) {
\r
2515 handle->drainCounter++;
\r
2520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2522 if ( stream_.doConvertBuffer[1] ) {
\r
2523 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2524 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2525 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2527 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2529 else { // no buffer conversion
\r
2530 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2531 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2532 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2538 RtApi::tickStreamTime();
\r
2541 //******************** End of __UNIX_JACK__ *********************//
\r
2544 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2546 // The ASIO API is designed around a callback scheme, so this
\r
2547 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2548 // Jack. The primary constraint with ASIO is that it only allows
\r
2549 // access to a single driver at a time. Thus, it is not possible to
\r
2550 // have more than one simultaneous RtAudio stream.
\r
2552 // This implementation also requires a number of external ASIO files
\r
2553 // and a few global variables. The ASIO callback scheme does not
\r
2554 // allow for the passing of user data, so we must create a global
\r
2555 // pointer to our callbackInfo structure.
\r
2557 // On unix systems, we make use of a pthread condition variable.
\r
2558 // Since there is no equivalent in Windows, I hacked something based
\r
2559 // on information found in
\r
2560 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2562 #include "asiosys.h"
\r
2564 #include "iasiothiscallresolver.h"
\r
2565 #include "asiodrivers.h"
\r
2568 AsioDrivers drivers;
\r
2569 ASIOCallbacks asioCallbacks;
\r
2570 ASIODriverInfo driverInfo;
\r
2571 CallbackInfo *asioCallbackInfo;
\r
2574 struct AsioHandle {
\r
2575 int drainCounter; // Tracks callback counts when draining
\r
2576 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2577 ASIOBufferInfo *bufferInfos;
\r
2581 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2584 // Function declarations (definitions at end of section)
\r
2585 static const char* getAsioErrorString( ASIOError result );
\r
2586 void sampleRateChanged( ASIOSampleRate sRate );
\r
2587 long asioMessages( long selector, long value, void* message, double* opt );
\r
2589 RtApiAsio :: RtApiAsio()
\r
2591 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2592 // CoInitialize beforehand, but it must be for appartment threading
\r
2593 // (in which case, CoInitilialize will return S_FALSE here).
\r
2594 coInitialized_ = false;
\r
2595 HRESULT hr = CoInitialize( NULL );
\r
2596 if ( FAILED(hr) ) {
\r
2597 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2598 error( RtError::WARNING );
\r
2600 coInitialized_ = true;
\r
2602 drivers.removeCurrentDriver();
\r
2603 driverInfo.asioVersion = 2;
\r
2605 // See note in DirectSound implementation about GetDesktopWindow().
\r
2606 driverInfo.sysRef = GetForegroundWindow();
\r
2609 RtApiAsio :: ~RtApiAsio()
\r
2611 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2612 if ( coInitialized_ ) CoUninitialize();
\r
2615 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2617 return (unsigned int) drivers.asioGetNumDev();
\r
2620 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2622 RtAudio::DeviceInfo info;
\r
2623 info.probed = false;
\r
2626 unsigned int nDevices = getDeviceCount();
\r
2627 if ( nDevices == 0 ) {
\r
2628 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2629 error( RtError::INVALID_USE );
\r
2632 if ( device >= nDevices ) {
\r
2633 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2634 error( RtError::INVALID_USE );
\r
2637 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2638 if ( stream_.state != STREAM_CLOSED ) {
\r
2639 if ( device >= devices_.size() ) {
\r
2640 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2641 error( RtError::WARNING );
\r
2644 return devices_[ device ];
\r
2647 char driverName[32];
\r
2648 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2649 if ( result != ASE_OK ) {
\r
2650 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2651 errorText_ = errorStream_.str();
\r
2652 error( RtError::WARNING );
\r
2656 info.name = driverName;
\r
2658 if ( !drivers.loadDriver( driverName ) ) {
\r
2659 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2660 errorText_ = errorStream_.str();
\r
2661 error( RtError::WARNING );
\r
2665 result = ASIOInit( &driverInfo );
\r
2666 if ( result != ASE_OK ) {
\r
2667 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2668 errorText_ = errorStream_.str();
\r
2669 error( RtError::WARNING );
\r
2673 // Determine the device channel information.
\r
2674 long inputChannels, outputChannels;
\r
2675 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2676 if ( result != ASE_OK ) {
\r
2677 drivers.removeCurrentDriver();
\r
2678 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2679 errorText_ = errorStream_.str();
\r
2680 error( RtError::WARNING );
\r
2684 info.outputChannels = outputChannels;
\r
2685 info.inputChannels = inputChannels;
\r
2686 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2687 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2689 // Determine the supported sample rates.
\r
2690 info.sampleRates.clear();
\r
2691 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2692 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2693 if ( result == ASE_OK )
\r
2694 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2697 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2698 ASIOChannelInfo channelInfo;
\r
2699 channelInfo.channel = 0;
\r
2700 channelInfo.isInput = true;
\r
2701 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2702 result = ASIOGetChannelInfo( &channelInfo );
\r
2703 if ( result != ASE_OK ) {
\r
2704 drivers.removeCurrentDriver();
\r
2705 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2706 errorText_ = errorStream_.str();
\r
2707 error( RtError::WARNING );
\r
2711 info.nativeFormats = 0;
\r
2712 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2713 info.nativeFormats |= RTAUDIO_SINT16;
\r
2714 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2715 info.nativeFormats |= RTAUDIO_SINT32;
\r
2716 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2717 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2718 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2719 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2720 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2721 info.nativeFormats |= RTAUDIO_SINT24;
\r
2723 if ( info.outputChannels > 0 )
\r
2724 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2725 if ( info.inputChannels > 0 )
\r
2726 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2728 info.probed = true;
\r
2729 drivers.removeCurrentDriver();
\r
2733 void bufferSwitch( long index, ASIOBool processNow )
\r
2735 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2736 object->callbackEvent( index );
\r
2739 void RtApiAsio :: saveDeviceInfo( void )
\r
2743 unsigned int nDevices = getDeviceCount();
\r
2744 devices_.resize( nDevices );
\r
2745 for ( unsigned int i=0; i<nDevices; i++ )
\r
2746 devices_[i] = getDeviceInfo( i );
\r
2749 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2750 unsigned int firstChannel, unsigned int sampleRate,
\r
2751 RtAudioFormat format, unsigned int *bufferSize,
\r
2752 RtAudio::StreamOptions *options )
\r
2754 // For ASIO, a duplex stream MUST use the same driver.
\r
2755 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2756 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2760 char driverName[32];
\r
2761 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2762 if ( result != ASE_OK ) {
\r
2763 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2764 errorText_ = errorStream_.str();
\r
2768 // Only load the driver once for duplex stream.
\r
2769 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2770 // The getDeviceInfo() function will not work when a stream is open
\r
2771 // because ASIO does not allow multiple devices to run at the same
\r
2772 // time. Thus, we'll probe the system before opening a stream and
\r
2773 // save the results for use by getDeviceInfo().
\r
2774 this->saveDeviceInfo();
\r
2776 if ( !drivers.loadDriver( driverName ) ) {
\r
2777 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2782 result = ASIOInit( &driverInfo );
\r
2783 if ( result != ASE_OK ) {
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2790 // Check the device channel count.
\r
2791 long inputChannels, outputChannels;
\r
2792 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2793 if ( result != ASE_OK ) {
\r
2794 drivers.removeCurrentDriver();
\r
2795 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2800 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2801 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2802 drivers.removeCurrentDriver();
\r
2803 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2804 errorText_ = errorStream_.str();
\r
2807 stream_.nDeviceChannels[mode] = channels;
\r
2808 stream_.nUserChannels[mode] = channels;
\r
2809 stream_.channelOffset[mode] = firstChannel;
\r
2811 // Verify the sample rate is supported.
\r
2812 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2813 if ( result != ASE_OK ) {
\r
2814 drivers.removeCurrentDriver();
\r
2815 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2816 errorText_ = errorStream_.str();
\r
2820 // Get the current sample rate
\r
2821 ASIOSampleRate currentRate;
\r
2822 result = ASIOGetSampleRate( ¤tRate );
\r
2823 if ( result != ASE_OK ) {
\r
2824 drivers.removeCurrentDriver();
\r
2825 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2826 errorText_ = errorStream_.str();
\r
2830 // Set the sample rate only if necessary
\r
2831 if ( currentRate != sampleRate ) {
\r
2832 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2841 // Determine the driver data type.
\r
2842 ASIOChannelInfo channelInfo;
\r
2843 channelInfo.channel = 0;
\r
2844 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2845 else channelInfo.isInput = true;
\r
2846 result = ASIOGetChannelInfo( &channelInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 drivers.removeCurrentDriver();
\r
2849 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2850 errorText_ = errorStream_.str();
\r
2854 // Assuming WINDOWS host is always little-endian.
\r
2855 stream_.doByteSwap[mode] = false;
\r
2856 stream_.userFormat = format;
\r
2857 stream_.deviceFormat[mode] = 0;
\r
2858 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2859 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2860 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2862 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2863 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2864 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2866 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2867 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2868 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2870 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2871 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2872 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2874 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2875 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2876 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2879 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Set the buffer size. For a duplex stream, this will end up
\r
2887 // setting the buffer size based on the input constraints, which
\r
2889 long minSize, maxSize, preferSize, granularity;
\r
2890 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2891 if ( result != ASE_OK ) {
\r
2892 drivers.removeCurrentDriver();
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2894 errorText_ = errorStream_.str();
\r
2898 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2899 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2900 else if ( granularity == -1 ) {
\r
2901 // Make sure bufferSize is a power of two.
\r
2902 int log2_of_min_size = 0;
\r
2903 int log2_of_max_size = 0;
\r
2905 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2906 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2907 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2910 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2911 int min_delta_num = log2_of_min_size;
\r
2913 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2914 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2915 if (current_delta < min_delta) {
\r
2916 min_delta = current_delta;
\r
2917 min_delta_num = i;
\r
2921 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2922 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2923 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2925 else if ( granularity != 0 ) {
\r
2926 // Set to an even multiple of granularity, rounding up.
\r
2927 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2930 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2931 drivers.removeCurrentDriver();
\r
2932 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2936 stream_.bufferSize = *bufferSize;
\r
2937 stream_.nBuffers = 2;
\r
2939 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2940 else stream_.userInterleaved = true;
\r
2942 // ASIO always uses non-interleaved buffers.
\r
2943 stream_.deviceInterleaved[mode] = false;
\r
2945 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2946 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2947 if ( handle == 0 ) {
\r
2949 handle = new AsioHandle;
\r
2951 catch ( std::bad_alloc& ) {
\r
2952 //if ( handle == NULL ) {
\r
2953 drivers.removeCurrentDriver();
\r
2954 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2957 handle->bufferInfos = 0;
\r
2959 // Create a manual-reset event.
\r
2960 handle->condition = CreateEvent( NULL, // no security
\r
2961 TRUE, // manual-reset
\r
2962 FALSE, // non-signaled initially
\r
2963 NULL ); // unnamed
\r
2964 stream_.apiHandle = (void *) handle;
\r
2967 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2968 // and output separately, we'll have to dispose of previously
\r
2969 // created output buffers for a duplex stream.
\r
2970 long inputLatency, outputLatency;
\r
2971 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2972 ASIODisposeBuffers();
\r
2973 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2976 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2977 bool buffersAllocated = false;
\r
2978 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2979 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2980 if ( handle->bufferInfos == NULL ) {
\r
2981 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2982 errorText_ = errorStream_.str();
\r
2986 ASIOBufferInfo *infos;
\r
2987 infos = handle->bufferInfos;
\r
2988 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2989 infos->isInput = ASIOFalse;
\r
2990 infos->channelNum = i + stream_.channelOffset[0];
\r
2991 infos->buffers[0] = infos->buffers[1] = 0;
\r
2993 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2994 infos->isInput = ASIOTrue;
\r
2995 infos->channelNum = i + stream_.channelOffset[1];
\r
2996 infos->buffers[0] = infos->buffers[1] = 0;
\r
2999 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3000 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3001 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3002 asioCallbacks.asioMessage = &asioMessages;
\r
3003 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3004 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3005 if ( result != ASE_OK ) {
\r
3006 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3007 errorText_ = errorStream_.str();
\r
3010 buffersAllocated = true;
\r
3012 // Set flags for buffer conversion.
\r
3013 stream_.doConvertBuffer[mode] = false;
\r
3014 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3015 stream_.doConvertBuffer[mode] = true;
\r
3016 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3017 stream_.nUserChannels[mode] > 1 )
\r
3018 stream_.doConvertBuffer[mode] = true;
\r
3020 // Allocate necessary internal buffers
\r
3021 unsigned long bufferBytes;
\r
3022 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3023 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3024 if ( stream_.userBuffer[mode] == NULL ) {
\r
3025 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3029 if ( stream_.doConvertBuffer[mode] ) {
\r
3031 bool makeBuffer = true;
\r
3032 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3033 if ( mode == INPUT ) {
\r
3034 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3035 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3036 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3040 if ( makeBuffer ) {
\r
3041 bufferBytes *= *bufferSize;
\r
3042 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3043 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3044 if ( stream_.deviceBuffer == NULL ) {
\r
3045 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3051 stream_.sampleRate = sampleRate;
\r
3052 stream_.device[mode] = device;
\r
3053 stream_.state = STREAM_STOPPED;
\r
3054 asioCallbackInfo = &stream_.callbackInfo;
\r
3055 stream_.callbackInfo.object = (void *) this;
\r
3056 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3057 // We had already set up an output stream.
\r
3058 stream_.mode = DUPLEX;
\r
3060 stream_.mode = mode;
\r
3062 // Determine device latencies
\r
3063 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3064 if ( result != ASE_OK ) {
\r
3065 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3066 errorText_ = errorStream_.str();
\r
3067 error( RtError::WARNING); // warn but don't fail
\r
3070 stream_.latency[0] = outputLatency;
\r
3071 stream_.latency[1] = inputLatency;
\r
3074 // Setup the buffer conversion information structure. We don't use
\r
3075 // buffers to do channel offsets, so we override that parameter
\r
3077 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3082 if ( buffersAllocated )
\r
3083 ASIODisposeBuffers();
\r
3084 drivers.removeCurrentDriver();
\r
3087 CloseHandle( handle->condition );
\r
3088 if ( handle->bufferInfos )
\r
3089 free( handle->bufferInfos );
\r
3091 stream_.apiHandle = 0;
\r
3094 for ( int i=0; i<2; i++ ) {
\r
3095 if ( stream_.userBuffer[i] ) {
\r
3096 free( stream_.userBuffer[i] );
\r
3097 stream_.userBuffer[i] = 0;
\r
3101 if ( stream_.deviceBuffer ) {
\r
3102 free( stream_.deviceBuffer );
\r
3103 stream_.deviceBuffer = 0;
\r
3109 void RtApiAsio :: closeStream()
\r
3111 if ( stream_.state == STREAM_CLOSED ) {
\r
3112 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3113 error( RtError::WARNING );
\r
3117 if ( stream_.state == STREAM_RUNNING ) {
\r
3118 stream_.state = STREAM_STOPPED;
\r
3121 ASIODisposeBuffers();
\r
3122 drivers.removeCurrentDriver();
\r
3124 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3126 CloseHandle( handle->condition );
\r
3127 if ( handle->bufferInfos )
\r
3128 free( handle->bufferInfos );
\r
3130 stream_.apiHandle = 0;
\r
3133 for ( int i=0; i<2; i++ ) {
\r
3134 if ( stream_.userBuffer[i] ) {
\r
3135 free( stream_.userBuffer[i] );
\r
3136 stream_.userBuffer[i] = 0;
\r
3140 if ( stream_.deviceBuffer ) {
\r
3141 free( stream_.deviceBuffer );
\r
3142 stream_.deviceBuffer = 0;
\r
3145 stream_.mode = UNINITIALIZED;
\r
3146 stream_.state = STREAM_CLOSED;
\r
3149 bool stopThreadCalled = false;
\r
3151 void RtApiAsio :: startStream()
\r
3154 if ( stream_.state == STREAM_RUNNING ) {
\r
3155 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3156 error( RtError::WARNING );
\r
3160 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3161 ASIOError result = ASIOStart();
\r
3162 if ( result != ASE_OK ) {
\r
3163 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3164 errorText_ = errorStream_.str();
\r
3168 handle->drainCounter = 0;
\r
3169 handle->internalDrain = false;
\r
3170 ResetEvent( handle->condition );
\r
3171 stream_.state = STREAM_RUNNING;
\r
3175 stopThreadCalled = false;
\r
3177 if ( result == ASE_OK ) return;
\r
3178 error( RtError::SYSTEM_ERROR );
\r
3181 void RtApiAsio :: stopStream()
\r
3184 if ( stream_.state == STREAM_STOPPED ) {
\r
3185 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3186 error( RtError::WARNING );
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3191 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3192 if ( handle->drainCounter == 0 ) {
\r
3193 handle->drainCounter = 2;
\r
3194 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3198 stream_.state = STREAM_STOPPED;
\r
3200 ASIOError result = ASIOStop();
\r
3201 if ( result != ASE_OK ) {
\r
3202 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3203 errorText_ = errorStream_.str();
\r
3206 if ( result == ASE_OK ) return;
\r
3207 error( RtError::SYSTEM_ERROR );
\r
3210 void RtApiAsio :: abortStream()
\r
3213 if ( stream_.state == STREAM_STOPPED ) {
\r
3214 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3215 error( RtError::WARNING );
\r
3219 // The following lines were commented-out because some behavior was
\r
3220 // noted where the device buffers need to be zeroed to avoid
\r
3221 // continuing sound, even when the device buffers are completely
\r
3222 // disposed. So now, calling abort is the same as calling stop.
\r
3223 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3224 // handle->drainCounter = 2;
\r
3228 // This function will be called by a spawned thread when the user
\r
3229 // callback function signals that the stream should be stopped or
\r
3230 // aborted. It is necessary to handle it this way because the
\r
3231 // callbackEvent() function must return before the ASIOStop()
\r
3232 // function will return.
\r
3233 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3235 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3236 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3238 object->stopStream();
\r
3239 _endthreadex( 0 );
\r
3243 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3245 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3246 if ( stream_.state == STREAM_CLOSED ) {
\r
3247 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3248 error( RtError::WARNING );
\r
3252 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3253 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 // Check if we were draining the stream and signal if finished.
\r
3256 if ( handle->drainCounter > 3 ) {
\r
3258 stream_.state = STREAM_STOPPING;
\r
3259 if ( handle->internalDrain == false )
\r
3260 SetEvent( handle->condition );
\r
3261 else { // spawn a thread to stop the stream
\r
3262 unsigned threadId;
\r
3263 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3264 &stream_.callbackInfo, 0, &threadId );
\r
3269 // Invoke user callback to get fresh output data UNLESS we are
\r
3270 // draining stream.
\r
3271 if ( handle->drainCounter == 0 ) {
\r
3272 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3273 double streamTime = getStreamTime();
\r
3274 RtAudioStreamStatus status = 0;
\r
3275 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3276 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3279 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3280 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3283 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3284 stream_.bufferSize, streamTime, status, info->userData );
\r
3285 if ( cbReturnValue == 2 ) {
\r
3286 stream_.state = STREAM_STOPPING;
\r
3287 handle->drainCounter = 2;
\r
3288 unsigned threadId;
\r
3289 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3290 &stream_.callbackInfo, 0, &threadId );
\r
3293 else if ( cbReturnValue == 1 ) {
\r
3294 handle->drainCounter = 1;
\r
3295 handle->internalDrain = true;
\r
3299 unsigned int nChannels, bufferBytes, i, j;
\r
3300 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3301 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3303 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3305 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3307 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3308 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3309 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3313 else if ( stream_.doConvertBuffer[0] ) {
\r
3315 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3316 if ( stream_.doByteSwap[0] )
\r
3317 byteSwapBuffer( stream_.deviceBuffer,
\r
3318 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3319 stream_.deviceFormat[0] );
\r
3321 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3322 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3323 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3324 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3330 if ( stream_.doByteSwap[0] )
\r
3331 byteSwapBuffer( stream_.userBuffer[0],
\r
3332 stream_.bufferSize * stream_.nUserChannels[0],
\r
3333 stream_.userFormat );
\r
3335 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3336 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3337 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3338 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3343 if ( handle->drainCounter ) {
\r
3344 handle->drainCounter++;
\r
3349 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3351 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3353 if (stream_.doConvertBuffer[1]) {
\r
3355 // Always interleave ASIO input data.
\r
3356 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3357 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3358 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3359 handle->bufferInfos[i].buffers[bufferIndex],
\r
3363 if ( stream_.doByteSwap[1] )
\r
3364 byteSwapBuffer( stream_.deviceBuffer,
\r
3365 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3366 stream_.deviceFormat[1] );
\r
3367 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3373 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3374 handle->bufferInfos[i].buffers[bufferIndex],
\r
3379 if ( stream_.doByteSwap[1] )
\r
3380 byteSwapBuffer( stream_.userBuffer[1],
\r
3381 stream_.bufferSize * stream_.nUserChannels[1],
\r
3382 stream_.userFormat );
\r
3387 // The following call was suggested by Malte Clasen. While the API
\r
3388 // documentation indicates it should not be required, some device
\r
3389 // drivers apparently do not function correctly without it.
\r
3390 ASIOOutputReady();
\r
3392 RtApi::tickStreamTime();
\r
3396 void sampleRateChanged( ASIOSampleRate sRate )
\r
3398 // The ASIO documentation says that this usually only happens during
\r
3399 // external sync. Audio processing is not stopped by the driver,
\r
3400 // actual sample rate might not have even changed, maybe only the
\r
3401 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3404 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3406 object->stopStream();
\r
3408 catch ( RtError &exception ) {
\r
3409 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3413 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3416 long asioMessages( long selector, long value, void* message, double* opt )
\r
3420 switch( selector ) {
\r
3421 case kAsioSelectorSupported:
\r
3422 if ( value == kAsioResetRequest
\r
3423 || value == kAsioEngineVersion
\r
3424 || value == kAsioResyncRequest
\r
3425 || value == kAsioLatenciesChanged
\r
3426 // The following three were added for ASIO 2.0, you don't
\r
3427 // necessarily have to support them.
\r
3428 || value == kAsioSupportsTimeInfo
\r
3429 || value == kAsioSupportsTimeCode
\r
3430 || value == kAsioSupportsInputMonitor)
\r
3433 case kAsioResetRequest:
\r
3434 // Defer the task and perform the reset of the driver during the
\r
3435 // next "safe" situation. You cannot reset the driver right now,
\r
3436 // as this code is called from the driver. Reset the driver is
\r
3437 // done by completely destruct is. I.e. ASIOStop(),
\r
3438 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3440 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3443 case kAsioResyncRequest:
\r
3444 // This informs the application that the driver encountered some
\r
3445 // non-fatal data loss. It is used for synchronization purposes
\r
3446 // of different media. Added mainly to work around the Win16Mutex
\r
3447 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3448 // which could lose data because the Mutex was held too long by
\r
3449 // another thread. However a driver can issue it in other
\r
3450 // situations, too.
\r
3451 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3455 case kAsioLatenciesChanged:
\r
3456 // This will inform the host application that the drivers were
\r
3457 // latencies changed. Beware, it this does not mean that the
\r
3458 // buffer sizes have changed! You might need to update internal
\r
3460 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3463 case kAsioEngineVersion:
\r
3464 // Return the supported ASIO version of the host application. If
\r
3465 // a host application does not implement this selector, ASIO 1.0
\r
3466 // is assumed by the driver.
\r
3469 case kAsioSupportsTimeInfo:
\r
3470 // Informs the driver whether the
\r
3471 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3472 // For compatibility with ASIO 1.0 drivers the host application
\r
3473 // should always support the "old" bufferSwitch method, too.
\r
3476 case kAsioSupportsTimeCode:
\r
3477 // Informs the driver whether application is interested in time
\r
3478 // code info. If an application does not need to know about time
\r
3479 // code, the driver has less work to do.
\r
3486 static const char* getAsioErrorString( ASIOError result )
\r
3491 const char*message;
\r
3494 static Messages m[] =
\r
3496 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3497 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3498 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3499 { ASE_InvalidMode, "Invalid mode." },
\r
3500 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3501 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3502 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3505 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3506 if ( m[i].value == result ) return m[i].message;
\r
3508 return "Unknown error.";
\r
3510 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3514 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3516 // Modified by Robin Davies, October 2005
\r
3517 // - Improvements to DirectX pointer chasing.
\r
3518 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3519 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3520 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3521 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3523 #include <dsound.h>
\r
3524 #include <assert.h>
\r
3525 #include <algorithm>
\r
3527 #if defined(__MINGW32__)
\r
3528 // missing from latest mingw winapi
\r
3529 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3530 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3531 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3532 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3535 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3537 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3538 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3541 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3543 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3544 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3545 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3546 return pointer >= earlierPointer && pointer < laterPointer;
\r
3549 // A structure to hold various information related to the DirectSound
\r
3550 // API implementation.
\r
3552 unsigned int drainCounter; // Tracks callback counts when draining
\r
3553 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3557 UINT bufferPointer[2];
\r
3558 DWORD dsBufferSize[2];
\r
3559 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3563 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3566 // Declarations for utility functions, callbacks, and structures
\r
3567 // specific to the DirectSound implementation.
\r
3568 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3569 LPCTSTR description,
\r
3571 LPVOID lpContext );
\r
3573 static const char* getErrorString( int code );
\r
3575 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3584 : found(false) { validId[0] = false; validId[1] = false; }
\r
3587 std::vector< DsDevice > dsDevices;
\r
3589 RtApiDs :: RtApiDs()
\r
3591 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3592 // accept whatever the mainline chose for a threading model.
\r
3593 coInitialized_ = false;
\r
3594 HRESULT hr = CoInitialize( NULL );
\r
3595 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3598 RtApiDs :: ~RtApiDs()
\r
3600 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3601 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3604 // The DirectSound default output is always the first device.
\r
3605 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3610 // The DirectSound default input is always the first input device,
\r
3611 // which is the first capture device enumerated.
\r
3612 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3617 unsigned int RtApiDs :: getDeviceCount( void )
\r
3619 // Set query flag for previously found devices to false, so that we
\r
3620 // can check for any devices that have disappeared.
\r
3621 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3622 dsDevices[i].found = false;
\r
3624 // Query DirectSound devices.
\r
3625 bool isInput = false;
\r
3626 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3627 if ( FAILED( result ) ) {
\r
3628 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3629 errorText_ = errorStream_.str();
\r
3630 error( RtError::WARNING );
\r
3633 // Query DirectSoundCapture devices.
\r
3635 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3636 if ( FAILED( result ) ) {
\r
3637 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3638 errorText_ = errorStream_.str();
\r
3639 error( RtError::WARNING );
\r
3642 // Clean out any devices that may have disappeared.
\r
3643 std::vector< int > indices;
\r
3644 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3645 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3646 unsigned int nErased = 0;
\r
3647 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3648 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3650 return dsDevices.size();
\r
3653 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3655 RtAudio::DeviceInfo info;
\r
3656 info.probed = false;
\r
3658 if ( dsDevices.size() == 0 ) {
\r
3659 // Force a query of all devices
\r
3661 if ( dsDevices.size() == 0 ) {
\r
3662 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3663 error( RtError::INVALID_USE );
\r
3667 if ( device >= dsDevices.size() ) {
\r
3668 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3669 error( RtError::INVALID_USE );
\r
3673 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3675 LPDIRECTSOUND output;
\r
3677 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3678 if ( FAILED( result ) ) {
\r
3679 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3680 errorText_ = errorStream_.str();
\r
3681 error( RtError::WARNING );
\r
3685 outCaps.dwSize = sizeof( outCaps );
\r
3686 result = output->GetCaps( &outCaps );
\r
3687 if ( FAILED( result ) ) {
\r
3688 output->Release();
\r
3689 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3690 errorText_ = errorStream_.str();
\r
3691 error( RtError::WARNING );
\r
3695 // Get output channel information.
\r
3696 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3698 // Get sample rate information.
\r
3699 info.sampleRates.clear();
\r
3700 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3701 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3702 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3703 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3706 // Get format information.
\r
3707 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3708 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3710 output->Release();
\r
3712 if ( getDefaultOutputDevice() == device )
\r
3713 info.isDefaultOutput = true;
\r
3715 if ( dsDevices[ device ].validId[1] == false ) {
\r
3716 info.name = dsDevices[ device ].name;
\r
3717 info.probed = true;
\r
3723 LPDIRECTSOUNDCAPTURE input;
\r
3724 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3725 if ( FAILED( result ) ) {
\r
3726 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3727 errorText_ = errorStream_.str();
\r
3728 error( RtError::WARNING );
\r
3733 inCaps.dwSize = sizeof( inCaps );
\r
3734 result = input->GetCaps( &inCaps );
\r
3735 if ( FAILED( result ) ) {
\r
3737 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3738 errorText_ = errorStream_.str();
\r
3739 error( RtError::WARNING );
\r
3743 // Get input channel information.
\r
3744 info.inputChannels = inCaps.dwChannels;
\r
3746 // Get sample rate and format information.
\r
3747 std::vector<unsigned int> rates;
\r
3748 if ( inCaps.dwChannels >= 2 ) {
\r
3749 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3750 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3751 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3752 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3753 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3754 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3755 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3756 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3758 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3759 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3760 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3761 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3762 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3764 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3765 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3766 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3767 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3768 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3771 else if ( inCaps.dwChannels == 1 ) {
\r
3772 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3775 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3776 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3781 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3787 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3791 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3794 else info.inputChannels = 0; // technically, this would be an error
\r
3798 if ( info.inputChannels == 0 ) return info;
\r
3800 // Copy the supported rates to the info structure but avoid duplication.
\r
3802 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3804 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3805 if ( rates[i] == info.sampleRates[j] ) {
\r
3810 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3812 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3814 // If device opens for both playback and capture, we determine the channels.
\r
3815 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3816 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3818 if ( device == 0 ) info.isDefaultInput = true;
\r
3820 // Copy name and return.
\r
3821 info.name = dsDevices[ device ].name;
\r
3822 info.probed = true;
\r
3826 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3827 unsigned int firstChannel, unsigned int sampleRate,
\r
3828 RtAudioFormat format, unsigned int *bufferSize,
\r
3829 RtAudio::StreamOptions *options )
\r
3831 if ( channels + firstChannel > 2 ) {
\r
3832 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3836 unsigned int nDevices = dsDevices.size();
\r
3837 if ( nDevices == 0 ) {
\r
3838 // This should not happen because a check is made before this function is called.
\r
3839 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3843 if ( device >= nDevices ) {
\r
3844 // This should not happen because a check is made before this function is called.
\r
3845 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3849 if ( mode == OUTPUT ) {
\r
3850 if ( dsDevices[ device ].validId[0] == false ) {
\r
3851 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3852 errorText_ = errorStream_.str();
\r
3856 else { // mode == INPUT
\r
3857 if ( dsDevices[ device ].validId[1] == false ) {
\r
3858 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3859 errorText_ = errorStream_.str();
\r
3864 // According to a note in PortAudio, using GetDesktopWindow()
\r
3865 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3866 // that occur when the application's window is not the foreground
\r
3867 // window. Also, if the application window closes before the
\r
3868 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3869 // problems when using GetDesktopWindow() but it seems fine now
\r
3870 // (January 2010). I'll leave it commented here.
\r
3871 // HWND hWnd = GetForegroundWindow();
\r
3872 HWND hWnd = GetDesktopWindow();
\r
3874 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3875 // two. This is a judgement call and a value of two is probably too
\r
3876 // low for capture, but it should work for playback.
\r
3878 if ( options ) nBuffers = options->numberOfBuffers;
\r
3879 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3880 if ( nBuffers < 2 ) nBuffers = 3;
\r
3882 // Check the lower range of the user-specified buffer size and set
\r
3883 // (arbitrarily) to a lower bound of 32.
\r
3884 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3886 // Create the wave format structure. The data format setting will
\r
3887 // be determined later.
\r
3888 WAVEFORMATEX waveFormat;
\r
3889 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3890 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3891 waveFormat.nChannels = channels + firstChannel;
\r
3892 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3894 // Determine the device buffer size. By default, we'll use the value
\r
3895 // defined above (32K), but we will grow it to make allowances for
\r
3896 // very large software buffer sizes.
\r
3897 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3898 DWORD dsPointerLeadTime = 0;
\r
3900 void *ohandle = 0, *bhandle = 0;
\r
3902 if ( mode == OUTPUT ) {
\r
3904 LPDIRECTSOUND output;
\r
3905 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3906 if ( FAILED( result ) ) {
\r
3907 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3908 errorText_ = errorStream_.str();
\r
3913 outCaps.dwSize = sizeof( outCaps );
\r
3914 result = output->GetCaps( &outCaps );
\r
3915 if ( FAILED( result ) ) {
\r
3916 output->Release();
\r
3917 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3918 errorText_ = errorStream_.str();
\r
3922 // Check channel information.
\r
3923 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3924 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3925 errorText_ = errorStream_.str();
\r
3929 // Check format information. Use 16-bit format unless not
\r
3930 // supported or user requests 8-bit.
\r
3931 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3932 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3933 waveFormat.wBitsPerSample = 16;
\r
3934 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3937 waveFormat.wBitsPerSample = 8;
\r
3938 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3940 stream_.userFormat = format;
\r
3942 // Update wave format structure and buffer information.
\r
3943 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3944 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3945 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3947 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3948 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3949 dsBufferSize *= 2;
\r
3951 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3952 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3953 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3954 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3955 if ( FAILED( result ) ) {
\r
3956 output->Release();
\r
3957 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3958 errorText_ = errorStream_.str();
\r
3962 // Even though we will write to the secondary buffer, we need to
\r
3963 // access the primary buffer to set the correct output format
\r
3964 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3965 // buffer description.
\r
3966 DSBUFFERDESC bufferDescription;
\r
3967 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3968 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3969 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3971 // Obtain the primary buffer
\r
3972 LPDIRECTSOUNDBUFFER buffer;
\r
3973 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3974 if ( FAILED( result ) ) {
\r
3975 output->Release();
\r
3976 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3977 errorText_ = errorStream_.str();
\r
3981 // Set the primary DS buffer sound format.
\r
3982 result = buffer->SetFormat( &waveFormat );
\r
3983 if ( FAILED( result ) ) {
\r
3984 output->Release();
\r
3985 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
3986 errorText_ = errorStream_.str();
\r
3990 // Setup the secondary DS buffer description.
\r
3991 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3992 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3993 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
3994 DSBCAPS_GLOBALFOCUS |
\r
3995 DSBCAPS_GETCURRENTPOSITION2 |
\r
3996 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
3997 bufferDescription.dwBufferBytes = dsBufferSize;
\r
3998 bufferDescription.lpwfxFormat = &waveFormat;
\r
4000 // Try to create the secondary DS buffer. If that doesn't work,
\r
4001 // try to use software mixing. Otherwise, there's a problem.
\r
4002 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4003 if ( FAILED( result ) ) {
\r
4004 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4005 DSBCAPS_GLOBALFOCUS |
\r
4006 DSBCAPS_GETCURRENTPOSITION2 |
\r
4007 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4008 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4009 if ( FAILED( result ) ) {
\r
4010 output->Release();
\r
4011 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4012 errorText_ = errorStream_.str();
\r
4017 // Get the buffer size ... might be different from what we specified.
\r
4019 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4020 result = buffer->GetCaps( &dsbcaps );
\r
4021 if ( FAILED( result ) ) {
\r
4022 output->Release();
\r
4023 buffer->Release();
\r
4024 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4025 errorText_ = errorStream_.str();
\r
4029 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4031 // Lock the DS buffer
\r
4034 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4035 if ( FAILED( result ) ) {
\r
4036 output->Release();
\r
4037 buffer->Release();
\r
4038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4039 errorText_ = errorStream_.str();
\r
4043 // Zero the DS buffer
\r
4044 ZeroMemory( audioPtr, dataLen );
\r
4046 // Unlock the DS buffer
\r
4047 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4048 if ( FAILED( result ) ) {
\r
4049 output->Release();
\r
4050 buffer->Release();
\r
4051 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4052 errorText_ = errorStream_.str();
\r
4056 ohandle = (void *) output;
\r
4057 bhandle = (void *) buffer;
\r
4060 if ( mode == INPUT ) {
\r
4062 LPDIRECTSOUNDCAPTURE input;
\r
4063 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4064 if ( FAILED( result ) ) {
\r
4065 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4066 errorText_ = errorStream_.str();
\r
4071 inCaps.dwSize = sizeof( inCaps );
\r
4072 result = input->GetCaps( &inCaps );
\r
4073 if ( FAILED( result ) ) {
\r
4075 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4076 errorText_ = errorStream_.str();
\r
4080 // Check channel information.
\r
4081 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4082 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4086 // Check format information. Use 16-bit format unless user
\r
4087 // requests 8-bit.
\r
4088 DWORD deviceFormats;
\r
4089 if ( channels + firstChannel == 2 ) {
\r
4090 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4091 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4092 waveFormat.wBitsPerSample = 8;
\r
4093 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4095 else { // assume 16-bit is supported
\r
4096 waveFormat.wBitsPerSample = 16;
\r
4097 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4100 else { // channel == 1
\r
4101 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4102 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4103 waveFormat.wBitsPerSample = 8;
\r
4104 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4106 else { // assume 16-bit is supported
\r
4107 waveFormat.wBitsPerSample = 16;
\r
4108 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4111 stream_.userFormat = format;
\r
4113 // Update wave format structure and buffer information.
\r
4114 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4115 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4116 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4118 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4119 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4120 dsBufferSize *= 2;
\r
4122 // Setup the secondary DS buffer description.
\r
4123 DSCBUFFERDESC bufferDescription;
\r
4124 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4125 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4126 bufferDescription.dwFlags = 0;
\r
4127 bufferDescription.dwReserved = 0;
\r
4128 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4129 bufferDescription.lpwfxFormat = &waveFormat;
\r
4131 // Create the capture buffer.
\r
4132 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4133 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4134 if ( FAILED( result ) ) {
\r
4136 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4137 errorText_ = errorStream_.str();
\r
4141 // Get the buffer size ... might be different from what we specified.
\r
4142 DSCBCAPS dscbcaps;
\r
4143 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4144 result = buffer->GetCaps( &dscbcaps );
\r
4145 if ( FAILED( result ) ) {
\r
4147 buffer->Release();
\r
4148 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4149 errorText_ = errorStream_.str();
\r
4153 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4155 // NOTE: We could have a problem here if this is a duplex stream
\r
4156 // and the play and capture hardware buffer sizes are different
\r
4157 // (I'm actually not sure if that is a problem or not).
\r
4158 // Currently, we are not verifying that.
\r
4160 // Lock the capture buffer
\r
4163 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4164 if ( FAILED( result ) ) {
\r
4166 buffer->Release();
\r
4167 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4168 errorText_ = errorStream_.str();
\r
4172 // Zero the buffer
\r
4173 ZeroMemory( audioPtr, dataLen );
\r
4175 // Unlock the buffer
\r
4176 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4177 if ( FAILED( result ) ) {
\r
4179 buffer->Release();
\r
4180 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4181 errorText_ = errorStream_.str();
\r
4185 ohandle = (void *) input;
\r
4186 bhandle = (void *) buffer;
\r
4189 // Set various stream parameters
\r
4190 DsHandle *handle = 0;
\r
4191 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4192 stream_.nUserChannels[mode] = channels;
\r
4193 stream_.bufferSize = *bufferSize;
\r
4194 stream_.channelOffset[mode] = firstChannel;
\r
4195 stream_.deviceInterleaved[mode] = true;
\r
4196 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4197 else stream_.userInterleaved = true;
\r
4199 // Set flag for buffer conversion
\r
4200 stream_.doConvertBuffer[mode] = false;
\r
4201 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4202 stream_.doConvertBuffer[mode] = true;
\r
4203 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4204 stream_.doConvertBuffer[mode] = true;
\r
4205 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4206 stream_.nUserChannels[mode] > 1 )
\r
4207 stream_.doConvertBuffer[mode] = true;
\r
4209 // Allocate necessary internal buffers
\r
4210 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4211 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4212 if ( stream_.userBuffer[mode] == NULL ) {
\r
4213 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4217 if ( stream_.doConvertBuffer[mode] ) {
\r
4219 bool makeBuffer = true;
\r
4220 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4221 if ( mode == INPUT ) {
\r
4222 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4223 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4224 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4228 if ( makeBuffer ) {
\r
4229 bufferBytes *= *bufferSize;
\r
4230 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4231 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4232 if ( stream_.deviceBuffer == NULL ) {
\r
4233 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4239 // Allocate our DsHandle structures for the stream.
\r
4240 if ( stream_.apiHandle == 0 ) {
\r
4242 handle = new DsHandle;
\r
4244 catch ( std::bad_alloc& ) {
\r
4245 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4249 // Create a manual-reset event.
\r
4250 handle->condition = CreateEvent( NULL, // no security
\r
4251 TRUE, // manual-reset
\r
4252 FALSE, // non-signaled initially
\r
4253 NULL ); // unnamed
\r
4254 stream_.apiHandle = (void *) handle;
\r
4257 handle = (DsHandle *) stream_.apiHandle;
\r
4258 handle->id[mode] = ohandle;
\r
4259 handle->buffer[mode] = bhandle;
\r
4260 handle->dsBufferSize[mode] = dsBufferSize;
\r
4261 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4263 stream_.device[mode] = device;
\r
4264 stream_.state = STREAM_STOPPED;
\r
4265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4266 // We had already set up an output stream.
\r
4267 stream_.mode = DUPLEX;
\r
4269 stream_.mode = mode;
\r
4270 stream_.nBuffers = nBuffers;
\r
4271 stream_.sampleRate = sampleRate;
\r
4273 // Setup the buffer conversion information structure.
\r
4274 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4276 // Setup the callback thread.
\r
4277 if ( stream_.callbackInfo.isRunning == false ) {
\r
4278 unsigned threadId;
\r
4279 stream_.callbackInfo.isRunning = true;
\r
4280 stream_.callbackInfo.object = (void *) this;
\r
4281 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4282 &stream_.callbackInfo, 0, &threadId );
\r
4283 if ( stream_.callbackInfo.thread == 0 ) {
\r
4284 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4288 // Boost DS thread priority
\r
4289 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4295 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4296 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4297 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4298 if ( buffer ) buffer->Release();
\r
4299 object->Release();
\r
4301 if ( handle->buffer[1] ) {
\r
4302 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4303 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4304 if ( buffer ) buffer->Release();
\r
4305 object->Release();
\r
4307 CloseHandle( handle->condition );
\r
4309 stream_.apiHandle = 0;
\r
4312 for ( int i=0; i<2; i++ ) {
\r
4313 if ( stream_.userBuffer[i] ) {
\r
4314 free( stream_.userBuffer[i] );
\r
4315 stream_.userBuffer[i] = 0;
\r
4319 if ( stream_.deviceBuffer ) {
\r
4320 free( stream_.deviceBuffer );
\r
4321 stream_.deviceBuffer = 0;
\r
4327 void RtApiDs :: closeStream()
\r
4329 if ( stream_.state == STREAM_CLOSED ) {
\r
4330 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4331 error( RtError::WARNING );
\r
4335 // Stop the callback thread.
\r
4336 stream_.callbackInfo.isRunning = false;
\r
4337 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4338 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4340 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4342 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4343 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4344 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4347 buffer->Release();
\r
4349 object->Release();
\r
4351 if ( handle->buffer[1] ) {
\r
4352 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4353 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4356 buffer->Release();
\r
4358 object->Release();
\r
4360 CloseHandle( handle->condition );
\r
4362 stream_.apiHandle = 0;
\r
4365 for ( int i=0; i<2; i++ ) {
\r
4366 if ( stream_.userBuffer[i] ) {
\r
4367 free( stream_.userBuffer[i] );
\r
4368 stream_.userBuffer[i] = 0;
\r
4372 if ( stream_.deviceBuffer ) {
\r
4373 free( stream_.deviceBuffer );
\r
4374 stream_.deviceBuffer = 0;
\r
4377 stream_.mode = UNINITIALIZED;
\r
4378 stream_.state = STREAM_CLOSED;
\r
4381 void RtApiDs :: startStream()
\r
4384 if ( stream_.state == STREAM_RUNNING ) {
\r
4385 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4386 error( RtError::WARNING );
\r
4390 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4392 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4393 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4394 // this is already in effect.
\r
4395 timeBeginPeriod( 1 );
\r
4397 buffersRolling = false;
\r
4398 duplexPrerollBytes = 0;
\r
4400 if ( stream_.mode == DUPLEX ) {
\r
4401 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4402 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4405 HRESULT result = 0;
\r
4406 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4408 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4409 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4410 if ( FAILED( result ) ) {
\r
4411 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4412 errorText_ = errorStream_.str();
\r
4417 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4419 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4420 result = buffer->Start( DSCBSTART_LOOPING );
\r
4421 if ( FAILED( result ) ) {
\r
4422 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4423 errorText_ = errorStream_.str();
\r
4428 handle->drainCounter = 0;
\r
4429 handle->internalDrain = false;
\r
4430 ResetEvent( handle->condition );
\r
4431 stream_.state = STREAM_RUNNING;
\r
4434 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4437 void RtApiDs :: stopStream()
\r
4440 if ( stream_.state == STREAM_STOPPED ) {
\r
4441 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4442 error( RtError::WARNING );
\r
4446 HRESULT result = 0;
\r
4449 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4451 if ( handle->drainCounter == 0 ) {
\r
4452 handle->drainCounter = 2;
\r
4453 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4456 stream_.state = STREAM_STOPPED;
\r
4458 // Stop the buffer and clear memory
\r
4459 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4460 result = buffer->Stop();
\r
4461 if ( FAILED( result ) ) {
\r
4462 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4463 errorText_ = errorStream_.str();
\r
4467 // Lock the buffer and clear it so that if we start to play again,
\r
4468 // we won't have old data playing.
\r
4469 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4470 if ( FAILED( result ) ) {
\r
4471 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4472 errorText_ = errorStream_.str();
\r
4476 // Zero the DS buffer
\r
4477 ZeroMemory( audioPtr, dataLen );
\r
4479 // Unlock the DS buffer
\r
4480 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4481 if ( FAILED( result ) ) {
\r
4482 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4483 errorText_ = errorStream_.str();
\r
4487 // If we start playing again, we must begin at beginning of buffer.
\r
4488 handle->bufferPointer[0] = 0;
\r
4491 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4492 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4496 stream_.state = STREAM_STOPPED;
\r
4498 result = buffer->Stop();
\r
4499 if ( FAILED( result ) ) {
\r
4500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4501 errorText_ = errorStream_.str();
\r
4505 // Lock the buffer and clear it so that if we start to play again,
\r
4506 // we won't have old data playing.
\r
4507 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4508 if ( FAILED( result ) ) {
\r
4509 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4510 errorText_ = errorStream_.str();
\r
4514 // Zero the DS buffer
\r
4515 ZeroMemory( audioPtr, dataLen );
\r
4517 // Unlock the DS buffer
\r
4518 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4519 if ( FAILED( result ) ) {
\r
4520 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4521 errorText_ = errorStream_.str();
\r
4525 // If we start recording again, we must begin at beginning of buffer.
\r
4526 handle->bufferPointer[1] = 0;
\r
4530 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4531 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4534 void RtApiDs :: abortStream()
\r
4537 if ( stream_.state == STREAM_STOPPED ) {
\r
4538 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4539 error( RtError::WARNING );
\r
4543 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4544 handle->drainCounter = 2;
\r
4549 void RtApiDs :: callbackEvent()
\r
4551 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4552 Sleep( 50 ); // sleep 50 milliseconds
\r
4556 if ( stream_.state == STREAM_CLOSED ) {
\r
4557 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4558 error( RtError::WARNING );
\r
4562 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4563 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4565 // Check if we were draining the stream and signal is finished.
\r
4566 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4568 stream_.state = STREAM_STOPPING;
\r
4569 if ( handle->internalDrain == false )
\r
4570 SetEvent( handle->condition );
\r
4576 // Invoke user callback to get fresh output data UNLESS we are
\r
4577 // draining stream.
\r
4578 if ( handle->drainCounter == 0 ) {
\r
4579 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4580 double streamTime = getStreamTime();
\r
4581 RtAudioStreamStatus status = 0;
\r
4582 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4583 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4584 handle->xrun[0] = false;
\r
4586 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4587 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4588 handle->xrun[1] = false;
\r
4590 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4591 stream_.bufferSize, streamTime, status, info->userData );
\r
4592 if ( cbReturnValue == 2 ) {
\r
4593 stream_.state = STREAM_STOPPING;
\r
4594 handle->drainCounter = 2;
\r
4598 else if ( cbReturnValue == 1 ) {
\r
4599 handle->drainCounter = 1;
\r
4600 handle->internalDrain = true;
\r
4605 DWORD currentWritePointer, safeWritePointer;
\r
4606 DWORD currentReadPointer, safeReadPointer;
\r
4607 UINT nextWritePointer;
\r
4609 LPVOID buffer1 = NULL;
\r
4610 LPVOID buffer2 = NULL;
\r
4611 DWORD bufferSize1 = 0;
\r
4612 DWORD bufferSize2 = 0;
\r
4617 if ( buffersRolling == false ) {
\r
4618 if ( stream_.mode == DUPLEX ) {
\r
4619 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4621 // It takes a while for the devices to get rolling. As a result,
\r
4622 // there's no guarantee that the capture and write device pointers
\r
4623 // will move in lockstep. Wait here for both devices to start
\r
4624 // rolling, and then set our buffer pointers accordingly.
\r
4625 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4626 // bytes later than the write buffer.
\r
4628 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4629 // take place between the two GetCurrentPosition calls... but I'm
\r
4630 // really not sure how to solve the problem. Temporarily boost to
\r
4631 // Realtime priority, maybe; but I'm not sure what priority the
\r
4632 // DirectSound service threads run at. We *should* be roughly
\r
4633 // within a ms or so of correct.
\r
4635 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4636 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4638 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4640 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4641 if ( FAILED( result ) ) {
\r
4642 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4643 errorText_ = errorStream_.str();
\r
4644 error( RtError::SYSTEM_ERROR );
\r
4646 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4647 if ( FAILED( result ) ) {
\r
4648 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4649 errorText_ = errorStream_.str();
\r
4650 error( RtError::SYSTEM_ERROR );
\r
4653 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4654 if ( FAILED( result ) ) {
\r
4655 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4656 errorText_ = errorStream_.str();
\r
4657 error( RtError::SYSTEM_ERROR );
\r
4659 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4660 if ( FAILED( result ) ) {
\r
4661 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4662 errorText_ = errorStream_.str();
\r
4663 error( RtError::SYSTEM_ERROR );
\r
4665 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4669 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4671 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4672 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4673 handle->bufferPointer[1] = safeReadPointer;
\r
4675 else if ( stream_.mode == OUTPUT ) {
\r
4677 // Set the proper nextWritePosition after initial startup.
\r
4678 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4679 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4680 if ( FAILED( result ) ) {
\r
4681 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4682 errorText_ = errorStream_.str();
\r
4683 error( RtError::SYSTEM_ERROR );
\r
4685 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4686 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4689 buffersRolling = true;
\r
4692 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4694 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4696 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4697 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4698 bufferBytes *= formatBytes( stream_.userFormat );
\r
4699 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4702 // Setup parameters and do buffer conversion if necessary.
\r
4703 if ( stream_.doConvertBuffer[0] ) {
\r
4704 buffer = stream_.deviceBuffer;
\r
4705 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4706 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4707 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4710 buffer = stream_.userBuffer[0];
\r
4711 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4712 bufferBytes *= formatBytes( stream_.userFormat );
\r
4715 // No byte swapping necessary in DirectSound implementation.
\r
4717 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4718 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4720 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4721 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4723 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4724 nextWritePointer = handle->bufferPointer[0];
\r
4726 DWORD endWrite, leadPointer;
\r
4728 // Find out where the read and "safe write" pointers are.
\r
4729 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4730 if ( FAILED( result ) ) {
\r
4731 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4732 errorText_ = errorStream_.str();
\r
4733 error( RtError::SYSTEM_ERROR );
\r
4736 // We will copy our output buffer into the region between
\r
4737 // safeWritePointer and leadPointer. If leadPointer is not
\r
4738 // beyond the next endWrite position, wait until it is.
\r
4739 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4740 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4741 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4742 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4743 endWrite = nextWritePointer + bufferBytes;
\r
4745 // Check whether the entire write region is behind the play pointer.
\r
4746 if ( leadPointer >= endWrite ) break;
\r
4748 // If we are here, then we must wait until the leadPointer advances
\r
4749 // beyond the end of our next write region. We use the
\r
4750 // Sleep() function to suspend operation until that happens.
\r
4751 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4752 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4753 if ( millis < 1.0 ) millis = 1.0;
\r
4754 Sleep( (DWORD) millis );
\r
4757 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4758 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4759 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4760 handle->xrun[0] = true;
\r
4761 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4762 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4763 handle->bufferPointer[0] = nextWritePointer;
\r
4764 endWrite = nextWritePointer + bufferBytes;
\r
4767 // Lock free space in the buffer
\r
4768 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4769 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4770 if ( FAILED( result ) ) {
\r
4771 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4772 errorText_ = errorStream_.str();
\r
4773 error( RtError::SYSTEM_ERROR );
\r
4776 // Copy our buffer into the DS buffer
\r
4777 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4778 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4780 // Update our buffer offset and unlock sound buffer
\r
4781 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4782 if ( FAILED( result ) ) {
\r
4783 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4784 errorText_ = errorStream_.str();
\r
4785 error( RtError::SYSTEM_ERROR );
\r
4787 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4788 handle->bufferPointer[0] = nextWritePointer;
\r
4790 if ( handle->drainCounter ) {
\r
4791 handle->drainCounter++;
\r
4796 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4798 // Setup parameters.
\r
4799 if ( stream_.doConvertBuffer[1] ) {
\r
4800 buffer = stream_.deviceBuffer;
\r
4801 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4802 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4805 buffer = stream_.userBuffer[1];
\r
4806 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4807 bufferBytes *= formatBytes( stream_.userFormat );
\r
4810 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4811 long nextReadPointer = handle->bufferPointer[1];
\r
4812 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4814 // Find out where the write and "safe read" pointers are.
\r
4815 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4816 if ( FAILED( result ) ) {
\r
4817 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4818 errorText_ = errorStream_.str();
\r
4819 error( RtError::SYSTEM_ERROR );
\r
4822 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4823 DWORD endRead = nextReadPointer + bufferBytes;
\r
4825 // Handling depends on whether we are INPUT or DUPLEX.
\r
4826 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4827 // then a wait here will drag the write pointers into the forbidden zone.
\r
4829 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4830 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4831 // practical way to sync up the read and write pointers reliably, given the
\r
4832 // the very complex relationship between phase and increment of the read and write
\r
4835 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4836 // provide a pre-roll period of 0.5 seconds in which we return
\r
4837 // zeros from the read buffer while the pointers sync up.
\r
4839 if ( stream_.mode == DUPLEX ) {
\r
4840 if ( safeReadPointer < endRead ) {
\r
4841 if ( duplexPrerollBytes <= 0 ) {
\r
4842 // Pre-roll time over. Be more agressive.
\r
4843 int adjustment = endRead-safeReadPointer;
\r
4845 handle->xrun[1] = true;
\r
4847 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4848 // and perform fine adjustments later.
\r
4849 // - small adjustments: back off by twice as much.
\r
4850 if ( adjustment >= 2*bufferBytes )
\r
4851 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4853 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4855 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4859 // In pre=roll time. Just do it.
\r
4860 nextReadPointer = safeReadPointer - bufferBytes;
\r
4861 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4863 endRead = nextReadPointer + bufferBytes;
\r
4866 else { // mode == INPUT
\r
4867 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4868 // See comments for playback.
\r
4869 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4870 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4871 if ( millis < 1.0 ) millis = 1.0;
\r
4872 Sleep( (DWORD) millis );
\r
4874 // Wake up and find out where we are now.
\r
4875 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4876 if ( FAILED( result ) ) {
\r
4877 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4878 errorText_ = errorStream_.str();
\r
4879 error( RtError::SYSTEM_ERROR );
\r
4882 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4886 // Lock free space in the buffer
\r
4887 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4888 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4889 if ( FAILED( result ) ) {
\r
4890 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4891 errorText_ = errorStream_.str();
\r
4892 error( RtError::SYSTEM_ERROR );
\r
4895 if ( duplexPrerollBytes <= 0 ) {
\r
4896 // Copy our buffer into the DS buffer
\r
4897 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4898 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4901 memset( buffer, 0, bufferSize1 );
\r
4902 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4903 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4906 // Update our buffer offset and unlock sound buffer
\r
4907 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4908 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4909 if ( FAILED( result ) ) {
\r
4910 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4911 errorText_ = errorStream_.str();
\r
4912 error( RtError::SYSTEM_ERROR );
\r
4914 handle->bufferPointer[1] = nextReadPointer;
\r
4916 // No byte swapping necessary in DirectSound implementation.
\r
4918 // If necessary, convert 8-bit data from unsigned to signed.
\r
4919 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4920 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4922 // Do buffer conversion if necessary.
\r
4923 if ( stream_.doConvertBuffer[1] )
\r
4924 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4928 RtApi::tickStreamTime();
\r
4931 // Definitions for utility functions and callbacks
\r
4932 // specific to the DirectSound implementation.
\r
4934 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4936 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4937 RtApiDs *object = (RtApiDs *) info->object;
\r
4938 bool* isRunning = &info->isRunning;
\r
4940 while ( *isRunning == true ) {
\r
4941 object->callbackEvent();
\r
4944 _endthreadex( 0 );
\r
4948 #include "tchar.h"
\r
4950 std::string convertTChar( LPCTSTR name )
\r
4952 #if defined( UNICODE ) || defined( _UNICODE )
\r
4953 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4954 std::string s( length, 0 );
\r
4955 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
4957 std::string s( name );
\r
4963 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4964 LPCTSTR description,
\r
4966 LPVOID lpContext )
\r
4968 bool *isInput = (bool *) lpContext;
\r
4971 bool validDevice = false;
\r
4972 if ( *isInput == true ) {
\r
4974 LPDIRECTSOUNDCAPTURE object;
\r
4976 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
4977 if ( hr != DS_OK ) return TRUE;
\r
4979 caps.dwSize = sizeof(caps);
\r
4980 hr = object->GetCaps( &caps );
\r
4981 if ( hr == DS_OK ) {
\r
4982 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
4983 validDevice = true;
\r
4985 object->Release();
\r
4989 LPDIRECTSOUND object;
\r
4990 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
4991 if ( hr != DS_OK ) return TRUE;
\r
4993 caps.dwSize = sizeof(caps);
\r
4994 hr = object->GetCaps( &caps );
\r
4995 if ( hr == DS_OK ) {
\r
4996 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
4997 validDevice = true;
\r
4999 object->Release();
\r
5002 // If good device, then save its name and guid.
\r
5003 std::string name = convertTChar( description );
\r
5004 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5005 name = "Default Device";
\r
5006 if ( validDevice ) {
\r
5007 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5008 if ( dsDevices[i].name == name ) {
\r
5009 dsDevices[i].found = true;
\r
5011 dsDevices[i].id[1] = lpguid;
\r
5012 dsDevices[i].validId[1] = true;
\r
5015 dsDevices[i].id[0] = lpguid;
\r
5016 dsDevices[i].validId[0] = true;
\r
5023 device.name = name;
\r
5024 device.found = true;
\r
5026 device.id[1] = lpguid;
\r
5027 device.validId[1] = true;
\r
5030 device.id[0] = lpguid;
\r
5031 device.validId[0] = true;
\r
5033 dsDevices.push_back( device );
\r
5039 static const char* getErrorString( int code )
\r
5043 case DSERR_ALLOCATED:
\r
5044 return "Already allocated";
\r
5046 case DSERR_CONTROLUNAVAIL:
\r
5047 return "Control unavailable";
\r
5049 case DSERR_INVALIDPARAM:
\r
5050 return "Invalid parameter";
\r
5052 case DSERR_INVALIDCALL:
\r
5053 return "Invalid call";
\r
5055 case DSERR_GENERIC:
\r
5056 return "Generic error";
\r
5058 case DSERR_PRIOLEVELNEEDED:
\r
5059 return "Priority level needed";
\r
5061 case DSERR_OUTOFMEMORY:
\r
5062 return "Out of memory";
\r
5064 case DSERR_BADFORMAT:
\r
5065 return "The sample rate or the channel format is not supported";
\r
5067 case DSERR_UNSUPPORTED:
\r
5068 return "Not supported";
\r
5070 case DSERR_NODRIVER:
\r
5071 return "No driver";
\r
5073 case DSERR_ALREADYINITIALIZED:
\r
5074 return "Already initialized";
\r
5076 case DSERR_NOAGGREGATION:
\r
5077 return "No aggregation";
\r
5079 case DSERR_BUFFERLOST:
\r
5080 return "Buffer lost";
\r
5082 case DSERR_OTHERAPPHASPRIO:
\r
5083 return "Another application already has priority";
\r
5085 case DSERR_UNINITIALIZED:
\r
5086 return "Uninitialized";
\r
5089 return "DirectSound unknown error";
\r
5092 //******************** End of __WINDOWS_DS__ *********************//
\r
5096 #if defined(__LINUX_ALSA__)
\r
5098 #include <alsa/asoundlib.h>
\r
5099 #include <unistd.h>
\r
5101 // A structure to hold various information related to the ALSA API
\r
5102 // implementation.
\r
5103 struct AlsaHandle {
\r
5104 snd_pcm_t *handles[2];
\r
5105 bool synchronized;
\r
5107 pthread_cond_t runnable_cv;
\r
5111 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5114 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5116 RtApiAlsa :: RtApiAlsa()
\r
5118 // Nothing to do here.
\r
5121 RtApiAlsa :: ~RtApiAlsa()
\r
5123 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5126 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5128 unsigned nDevices = 0;
\r
5129 int result, subdevice, card;
\r
5131 snd_ctl_t *handle;
\r
5133 // Count cards and devices
\r
5135 snd_card_next( &card );
\r
5136 while ( card >= 0 ) {
\r
5137 sprintf( name, "hw:%d", card );
\r
5138 result = snd_ctl_open( &handle, name, 0 );
\r
5139 if ( result < 0 ) {
\r
5140 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5141 errorText_ = errorStream_.str();
\r
5142 error( RtError::WARNING );
\r
5147 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5148 if ( result < 0 ) {
\r
5149 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5150 errorText_ = errorStream_.str();
\r
5151 error( RtError::WARNING );
\r
5154 if ( subdevice < 0 )
\r
5159 snd_ctl_close( handle );
\r
5160 snd_card_next( &card );
\r
5163 result = snd_ctl_open( &handle, "default", 0 );
\r
5164 if (result == 0) {
\r
5166 snd_ctl_close( handle );
\r
5172 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5174 RtAudio::DeviceInfo info;
\r
5175 info.probed = false;
\r
5177 unsigned nDevices = 0;
\r
5178 int result, subdevice, card;
\r
5180 snd_ctl_t *chandle;
\r
5182 // Count cards and devices
\r
5184 snd_card_next( &card );
\r
5185 while ( card >= 0 ) {
\r
5186 sprintf( name, "hw:%d", card );
\r
5187 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5188 if ( result < 0 ) {
\r
5189 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5190 errorText_ = errorStream_.str();
\r
5191 error( RtError::WARNING );
\r
5196 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5197 if ( result < 0 ) {
\r
5198 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5199 errorText_ = errorStream_.str();
\r
5200 error( RtError::WARNING );
\r
5203 if ( subdevice < 0 ) break;
\r
5204 if ( nDevices == device ) {
\r
5205 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5211 snd_ctl_close( chandle );
\r
5212 snd_card_next( &card );
\r
5215 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5216 if ( result == 0 ) {
\r
5217 if ( nDevices == device ) {
\r
5218 strcpy( name, "default" );
\r
5224 if ( nDevices == 0 ) {
\r
5225 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5226 error( RtError::INVALID_USE );
\r
5229 if ( device >= nDevices ) {
\r
5230 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5231 error( RtError::INVALID_USE );
\r
5236 // If a stream is already open, we cannot probe the stream devices.
\r
5237 // Thus, use the saved results.
\r
5238 if ( stream_.state != STREAM_CLOSED &&
\r
5239 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5240 snd_ctl_close( chandle );
\r
5241 if ( device >= devices_.size() ) {
\r
5242 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5243 error( RtError::WARNING );
\r
5246 return devices_[ device ];
\r
5249 int openMode = SND_PCM_ASYNC;
\r
5250 snd_pcm_stream_t stream;
\r
5251 snd_pcm_info_t *pcminfo;
\r
5252 snd_pcm_info_alloca( &pcminfo );
\r
5253 snd_pcm_t *phandle;
\r
5254 snd_pcm_hw_params_t *params;
\r
5255 snd_pcm_hw_params_alloca( ¶ms );
\r
5257 // First try for playback unless default device (which has subdev -1)
\r
5258 stream = SND_PCM_STREAM_PLAYBACK;
\r
5259 snd_pcm_info_set_stream( pcminfo, stream );
\r
5260 if ( subdevice != -1 ) {
\r
5261 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5262 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5264 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5265 if ( result < 0 ) {
\r
5266 // Device probably doesn't support playback.
\r
5267 goto captureProbe;
\r
5271 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5272 if ( result < 0 ) {
\r
5273 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5274 errorText_ = errorStream_.str();
\r
5275 error( RtError::WARNING );
\r
5276 goto captureProbe;
\r
5279 // The device is open ... fill the parameter structure.
\r
5280 result = snd_pcm_hw_params_any( phandle, params );
\r
5281 if ( result < 0 ) {
\r
5282 snd_pcm_close( phandle );
\r
5283 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5284 errorText_ = errorStream_.str();
\r
5285 error( RtError::WARNING );
\r
5286 goto captureProbe;
\r
5289 // Get output channel information.
\r
5290 unsigned int value;
\r
5291 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5292 if ( result < 0 ) {
\r
5293 snd_pcm_close( phandle );
\r
5294 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5295 errorText_ = errorStream_.str();
\r
5296 error( RtError::WARNING );
\r
5297 goto captureProbe;
\r
5299 info.outputChannels = value;
\r
5300 snd_pcm_close( phandle );
\r
5303 stream = SND_PCM_STREAM_CAPTURE;
\r
5304 snd_pcm_info_set_stream( pcminfo, stream );
\r
5306 // Now try for capture unless default device (with subdev = -1)
\r
5307 if ( subdevice != -1 ) {
\r
5308 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5309 snd_ctl_close( chandle );
\r
5310 if ( result < 0 ) {
\r
5311 // Device probably doesn't support capture.
\r
5312 if ( info.outputChannels == 0 ) return info;
\r
5313 goto probeParameters;
\r
5317 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5318 if ( result < 0 ) {
\r
5319 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5320 errorText_ = errorStream_.str();
\r
5321 error( RtError::WARNING );
\r
5322 if ( info.outputChannels == 0 ) return info;
\r
5323 goto probeParameters;
\r
5326 // The device is open ... fill the parameter structure.
\r
5327 result = snd_pcm_hw_params_any( phandle, params );
\r
5328 if ( result < 0 ) {
\r
5329 snd_pcm_close( phandle );
\r
5330 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5331 errorText_ = errorStream_.str();
\r
5332 error( RtError::WARNING );
\r
5333 if ( info.outputChannels == 0 ) return info;
\r
5334 goto probeParameters;
\r
5337 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5338 if ( result < 0 ) {
\r
5339 snd_pcm_close( phandle );
\r
5340 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5341 errorText_ = errorStream_.str();
\r
5342 error( RtError::WARNING );
\r
5343 if ( info.outputChannels == 0 ) return info;
\r
5344 goto probeParameters;
\r
5346 info.inputChannels = value;
\r
5347 snd_pcm_close( phandle );
\r
5349 // If device opens for both playback and capture, we determine the channels.
\r
5350 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5351 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5353 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5354 if ( device == 0 && info.outputChannels > 0 )
\r
5355 info.isDefaultOutput = true;
\r
5356 if ( device == 0 && info.inputChannels > 0 )
\r
5357 info.isDefaultInput = true;
\r
5360 // At this point, we just need to figure out the supported data
\r
5361 // formats and sample rates. We'll proceed by opening the device in
\r
5362 // the direction with the maximum number of channels, or playback if
\r
5363 // they are equal. This might limit our sample rate options, but so
\r
5366 if ( info.outputChannels >= info.inputChannels )
\r
5367 stream = SND_PCM_STREAM_PLAYBACK;
\r
5369 stream = SND_PCM_STREAM_CAPTURE;
\r
5370 snd_pcm_info_set_stream( pcminfo, stream );
\r
5372 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5373 if ( result < 0 ) {
\r
5374 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5375 errorText_ = errorStream_.str();
\r
5376 error( RtError::WARNING );
\r
5380 // The device is open ... fill the parameter structure.
\r
5381 result = snd_pcm_hw_params_any( phandle, params );
\r
5382 if ( result < 0 ) {
\r
5383 snd_pcm_close( phandle );
\r
5384 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5385 errorText_ = errorStream_.str();
\r
5386 error( RtError::WARNING );
\r
5390 // Test our discrete set of sample rate values.
\r
5391 info.sampleRates.clear();
\r
5392 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5393 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5394 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5396 if ( info.sampleRates.size() == 0 ) {
\r
5397 snd_pcm_close( phandle );
\r
5398 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5399 errorText_ = errorStream_.str();
\r
5400 error( RtError::WARNING );
\r
5404 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5405 snd_pcm_format_t format;
\r
5406 info.nativeFormats = 0;
\r
5407 format = SND_PCM_FORMAT_S8;
\r
5408 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5409 info.nativeFormats |= RTAUDIO_SINT8;
\r
5410 format = SND_PCM_FORMAT_S16;
\r
5411 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5412 info.nativeFormats |= RTAUDIO_SINT16;
\r
5413 format = SND_PCM_FORMAT_S24;
\r
5414 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5415 info.nativeFormats |= RTAUDIO_SINT24;
\r
5416 format = SND_PCM_FORMAT_S32;
\r
5417 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5418 info.nativeFormats |= RTAUDIO_SINT32;
\r
5419 format = SND_PCM_FORMAT_FLOAT;
\r
5420 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5421 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5422 format = SND_PCM_FORMAT_FLOAT64;
\r
5423 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5424 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5426 // Check that we have at least one supported format
\r
5427 if ( info.nativeFormats == 0 ) {
\r
5428 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5429 errorText_ = errorStream_.str();
\r
5430 error( RtError::WARNING );
\r
5434 // Get the device name
\r
5436 result = snd_card_get_name( card, &cardname );
\r
5437 if ( result >= 0 )
\r
5438 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5441 // That's all ... close the device and return
\r
5442 snd_pcm_close( phandle );
\r
5443 info.probed = true;
\r
5447 void RtApiAlsa :: saveDeviceInfo( void )
\r
5451 unsigned int nDevices = getDeviceCount();
\r
5452 devices_.resize( nDevices );
\r
5453 for ( unsigned int i=0; i<nDevices; i++ )
\r
5454 devices_[i] = getDeviceInfo( i );
\r
5457 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5458 unsigned int firstChannel, unsigned int sampleRate,
\r
5459 RtAudioFormat format, unsigned int *bufferSize,
\r
5460 RtAudio::StreamOptions *options )
\r
5463 #if defined(__RTAUDIO_DEBUG__)
\r
5464 snd_output_t *out;
\r
5465 snd_output_stdio_attach(&out, stderr, 0);
\r
5468 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5470 unsigned nDevices = 0;
\r
5471 int result, subdevice, card;
\r
5473 snd_ctl_t *chandle;
\r
5475 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5476 snprintf(name, sizeof(name), "%s", "default");
\r
5478 // Count cards and devices
\r
5480 snd_card_next( &card );
\r
5481 while ( card >= 0 ) {
\r
5482 sprintf( name, "hw:%d", card );
\r
5483 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5484 if ( result < 0 ) {
\r
5485 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5486 errorText_ = errorStream_.str();
\r
5491 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5492 if ( result < 0 ) break;
\r
5493 if ( subdevice < 0 ) break;
\r
5494 if ( nDevices == device ) {
\r
5495 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5496 snd_ctl_close( chandle );
\r
5501 snd_ctl_close( chandle );
\r
5502 snd_card_next( &card );
\r
5505 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5506 if ( result == 0 ) {
\r
5507 if ( nDevices == device ) {
\r
5508 strcpy( name, "default" );
\r
5514 if ( nDevices == 0 ) {
\r
5515 // This should not happen because a check is made before this function is called.
\r
5516 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5520 if ( device >= nDevices ) {
\r
5521 // This should not happen because a check is made before this function is called.
\r
5522 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5529 // The getDeviceInfo() function will not work for a device that is
\r
5530 // already open. Thus, we'll probe the system before opening a
\r
5531 // stream and save the results for use by getDeviceInfo().
\r
5532 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5533 this->saveDeviceInfo();
\r
5535 snd_pcm_stream_t stream;
\r
5536 if ( mode == OUTPUT )
\r
5537 stream = SND_PCM_STREAM_PLAYBACK;
\r
5539 stream = SND_PCM_STREAM_CAPTURE;
\r
5541 snd_pcm_t *phandle;
\r
5542 int openMode = SND_PCM_ASYNC;
\r
5543 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5544 if ( result < 0 ) {
\r
5545 if ( mode == OUTPUT )
\r
5546 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5548 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5549 errorText_ = errorStream_.str();
\r
5553 // Fill the parameter structure.
\r
5554 snd_pcm_hw_params_t *hw_params;
\r
5555 snd_pcm_hw_params_alloca( &hw_params );
\r
5556 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5557 if ( result < 0 ) {
\r
5558 snd_pcm_close( phandle );
\r
5559 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5560 errorText_ = errorStream_.str();
\r
5564 #if defined(__RTAUDIO_DEBUG__)
\r
5565 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5566 snd_pcm_hw_params_dump( hw_params, out );
\r
5569 // Set access ... check user preference.
\r
5570 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5571 stream_.userInterleaved = false;
\r
5572 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5573 if ( result < 0 ) {
\r
5574 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5575 stream_.deviceInterleaved[mode] = true;
\r
5578 stream_.deviceInterleaved[mode] = false;
\r
5581 stream_.userInterleaved = true;
\r
5582 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5583 if ( result < 0 ) {
\r
5584 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5585 stream_.deviceInterleaved[mode] = false;
\r
5588 stream_.deviceInterleaved[mode] = true;
\r
5591 if ( result < 0 ) {
\r
5592 snd_pcm_close( phandle );
\r
5593 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5594 errorText_ = errorStream_.str();
\r
5598 // Determine how to set the device format.
\r
5599 stream_.userFormat = format;
\r
5600 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5602 if ( format == RTAUDIO_SINT8 )
\r
5603 deviceFormat = SND_PCM_FORMAT_S8;
\r
5604 else if ( format == RTAUDIO_SINT16 )
\r
5605 deviceFormat = SND_PCM_FORMAT_S16;
\r
5606 else if ( format == RTAUDIO_SINT24 )
\r
5607 deviceFormat = SND_PCM_FORMAT_S24;
\r
5608 else if ( format == RTAUDIO_SINT32 )
\r
5609 deviceFormat = SND_PCM_FORMAT_S32;
\r
5610 else if ( format == RTAUDIO_FLOAT32 )
\r
5611 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5612 else if ( format == RTAUDIO_FLOAT64 )
\r
5613 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5615 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5616 stream_.deviceFormat[mode] = format;
\r
5620 // The user requested format is not natively supported by the device.
\r
5621 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5622 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5623 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5627 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5628 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5629 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5633 deviceFormat = SND_PCM_FORMAT_S32;
\r
5634 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5635 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5639 deviceFormat = SND_PCM_FORMAT_S24;
\r
5640 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5641 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5645 deviceFormat = SND_PCM_FORMAT_S16;
\r
5646 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5647 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5651 deviceFormat = SND_PCM_FORMAT_S8;
\r
5652 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5653 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5657 // If we get here, no supported format was found.
\r
5658 snd_pcm_close( phandle );
\r
5659 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5660 errorText_ = errorStream_.str();
\r
5664 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5665 if ( result < 0 ) {
\r
5666 snd_pcm_close( phandle );
\r
5667 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5668 errorText_ = errorStream_.str();
\r
5672 // Determine whether byte-swaping is necessary.
\r
5673 stream_.doByteSwap[mode] = false;
\r
5674 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5675 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5676 if ( result == 0 )
\r
5677 stream_.doByteSwap[mode] = true;
\r
5678 else if (result < 0) {
\r
5679 snd_pcm_close( phandle );
\r
5680 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5681 errorText_ = errorStream_.str();
\r
5686 // Set the sample rate.
\r
5687 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5688 if ( result < 0 ) {
\r
5689 snd_pcm_close( phandle );
\r
5690 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5691 errorText_ = errorStream_.str();
\r
5695 // Determine the number of channels for this device. We support a possible
\r
5696 // minimum device channel number > than the value requested by the user.
\r
5697 stream_.nUserChannels[mode] = channels;
\r
5698 unsigned int value;
\r
5699 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5700 unsigned int deviceChannels = value;
\r
5701 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5702 snd_pcm_close( phandle );
\r
5703 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5704 errorText_ = errorStream_.str();
\r
5708 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5709 if ( result < 0 ) {
\r
5710 snd_pcm_close( phandle );
\r
5711 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5712 errorText_ = errorStream_.str();
\r
5715 deviceChannels = value;
\r
5716 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5717 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5719 // Set the device channels.
\r
5720 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5721 if ( result < 0 ) {
\r
5722 snd_pcm_close( phandle );
\r
5723 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5724 errorText_ = errorStream_.str();
\r
5728 // Set the buffer (or period) size.
\r
5730 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5731 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5732 if ( result < 0 ) {
\r
5733 snd_pcm_close( phandle );
\r
5734 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5735 errorText_ = errorStream_.str();
\r
5738 *bufferSize = periodSize;
\r
5740 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5741 unsigned int periods = 0;
\r
5742 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5743 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5744 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5745 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5746 if ( result < 0 ) {
\r
5747 snd_pcm_close( phandle );
\r
5748 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5749 errorText_ = errorStream_.str();
\r
5753 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5754 // MUST be the same in both directions!
\r
5755 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5756 snd_pcm_close( phandle );
\r
5757 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5758 errorText_ = errorStream_.str();
\r
5762 stream_.bufferSize = *bufferSize;
\r
5764 // Install the hardware configuration
\r
5765 result = snd_pcm_hw_params( phandle, hw_params );
\r
5766 if ( result < 0 ) {
\r
5767 snd_pcm_close( phandle );
\r
5768 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5769 errorText_ = errorStream_.str();
\r
5773 #if defined(__RTAUDIO_DEBUG__)
\r
5774 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5775 snd_pcm_hw_params_dump( hw_params, out );
\r
5778 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5779 snd_pcm_sw_params_t *sw_params = NULL;
\r
5780 snd_pcm_sw_params_alloca( &sw_params );
\r
5781 snd_pcm_sw_params_current( phandle, sw_params );
\r
5782 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5783 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5784 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5786 // The following two settings were suggested by Theo Veenker
\r
5787 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5788 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5790 // here are two options for a fix
\r
5791 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5792 snd_pcm_uframes_t val;
\r
5793 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5794 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5796 result = snd_pcm_sw_params( phandle, sw_params );
\r
5797 if ( result < 0 ) {
\r
5798 snd_pcm_close( phandle );
\r
5799 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5800 errorText_ = errorStream_.str();
\r
5804 #if defined(__RTAUDIO_DEBUG__)
\r
5805 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5806 snd_pcm_sw_params_dump( sw_params, out );
\r
5809 // Set flags for buffer conversion
\r
5810 stream_.doConvertBuffer[mode] = false;
\r
5811 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5812 stream_.doConvertBuffer[mode] = true;
\r
5813 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5814 stream_.doConvertBuffer[mode] = true;
\r
5815 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5816 stream_.nUserChannels[mode] > 1 )
\r
5817 stream_.doConvertBuffer[mode] = true;
\r
5819 // Allocate the ApiHandle if necessary and then save.
\r
5820 AlsaHandle *apiInfo = 0;
\r
5821 if ( stream_.apiHandle == 0 ) {
\r
5823 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5825 catch ( std::bad_alloc& ) {
\r
5826 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5830 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5831 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5835 stream_.apiHandle = (void *) apiInfo;
\r
5836 apiInfo->handles[0] = 0;
\r
5837 apiInfo->handles[1] = 0;
\r
5840 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5842 apiInfo->handles[mode] = phandle;
\r
5845 // Allocate necessary internal buffers.
\r
5846 unsigned long bufferBytes;
\r
5847 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5848 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5849 if ( stream_.userBuffer[mode] == NULL ) {
\r
5850 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5854 if ( stream_.doConvertBuffer[mode] ) {
\r
5856 bool makeBuffer = true;
\r
5857 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5858 if ( mode == INPUT ) {
\r
5859 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5860 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5861 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5865 if ( makeBuffer ) {
\r
5866 bufferBytes *= *bufferSize;
\r
5867 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5868 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5869 if ( stream_.deviceBuffer == NULL ) {
\r
5870 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5876 stream_.sampleRate = sampleRate;
\r
5877 stream_.nBuffers = periods;
\r
5878 stream_.device[mode] = device;
\r
5879 stream_.state = STREAM_STOPPED;
\r
5881 // Setup the buffer conversion information structure.
\r
5882 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5884 // Setup thread if necessary.
\r
5885 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5886 // We had already set up an output stream.
\r
5887 stream_.mode = DUPLEX;
\r
5888 // Link the streams if possible.
\r
5889 apiInfo->synchronized = false;
\r
5890 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5891 apiInfo->synchronized = true;
\r
5893 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5894 error( RtError::WARNING );
\r
5898 stream_.mode = mode;
\r
5900 // Setup callback thread.
\r
5901 stream_.callbackInfo.object = (void *) this;
\r
5903 // Set the thread attributes for joinable and realtime scheduling
\r
5904 // priority (optional). The higher priority will only take affect
\r
5905 // if the program is run as root or suid. Note, under Linux
\r
5906 // processes with CAP_SYS_NICE privilege, a user can change
\r
5907 // scheduling policy and priority (thus need not be root). See
\r
5908 // POSIX "capabilities".
\r
5909 pthread_attr_t attr;
\r
5910 pthread_attr_init( &attr );
\r
5911 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5913 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5914 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5915 // We previously attempted to increase the audio callback priority
\r
5916 // to SCHED_RR here via the attributes. However, while no errors
\r
5917 // were reported in doing so, it did not work. So, now this is
\r
5918 // done in the alsaCallbackHandler function.
\r
5919 stream_.callbackInfo.doRealtime = true;
\r
5920 int priority = options->priority;
\r
5921 int min = sched_get_priority_min( SCHED_RR );
\r
5922 int max = sched_get_priority_max( SCHED_RR );
\r
5923 if ( priority < min ) priority = min;
\r
5924 else if ( priority > max ) priority = max;
\r
5925 stream_.callbackInfo.priority = priority;
\r
5929 stream_.callbackInfo.isRunning = true;
\r
5930 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5931 pthread_attr_destroy( &attr );
\r
5933 stream_.callbackInfo.isRunning = false;
\r
5934 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5943 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5944 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5945 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5947 stream_.apiHandle = 0;
\r
5950 if ( phandle) snd_pcm_close( phandle );
\r
5952 for ( int i=0; i<2; i++ ) {
\r
5953 if ( stream_.userBuffer[i] ) {
\r
5954 free( stream_.userBuffer[i] );
\r
5955 stream_.userBuffer[i] = 0;
\r
5959 if ( stream_.deviceBuffer ) {
\r
5960 free( stream_.deviceBuffer );
\r
5961 stream_.deviceBuffer = 0;
\r
5967 void RtApiAlsa :: closeStream()
\r
5969 if ( stream_.state == STREAM_CLOSED ) {
\r
5970 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5971 error( RtError::WARNING );
\r
5975 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5976 stream_.callbackInfo.isRunning = false;
\r
5977 MUTEX_LOCK( &stream_.mutex );
\r
5978 if ( stream_.state == STREAM_STOPPED ) {
\r
5979 apiInfo->runnable = true;
\r
5980 pthread_cond_signal( &apiInfo->runnable_cv );
\r
5982 MUTEX_UNLOCK( &stream_.mutex );
\r
5983 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5985 if ( stream_.state == STREAM_RUNNING ) {
\r
5986 stream_.state = STREAM_STOPPED;
\r
5987 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
5988 snd_pcm_drop( apiInfo->handles[0] );
\r
5989 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
5990 snd_pcm_drop( apiInfo->handles[1] );
\r
5994 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5995 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5996 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5998 stream_.apiHandle = 0;
\r
6001 for ( int i=0; i<2; i++ ) {
\r
6002 if ( stream_.userBuffer[i] ) {
\r
6003 free( stream_.userBuffer[i] );
\r
6004 stream_.userBuffer[i] = 0;
\r
6008 if ( stream_.deviceBuffer ) {
\r
6009 free( stream_.deviceBuffer );
\r
6010 stream_.deviceBuffer = 0;
\r
6013 stream_.mode = UNINITIALIZED;
\r
6014 stream_.state = STREAM_CLOSED;
\r
6017 void RtApiAlsa :: startStream()
\r
6019 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6022 if ( stream_.state == STREAM_RUNNING ) {
\r
6023 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6024 error( RtError::WARNING );
\r
6028 MUTEX_LOCK( &stream_.mutex );
\r
6031 snd_pcm_state_t state;
\r
6032 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6033 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6034 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6035 state = snd_pcm_state( handle[0] );
\r
6036 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6037 result = snd_pcm_prepare( handle[0] );
\r
6038 if ( result < 0 ) {
\r
6039 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6040 errorText_ = errorStream_.str();
\r
6046 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6047 state = snd_pcm_state( handle[1] );
\r
6048 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6049 result = snd_pcm_prepare( handle[1] );
\r
6050 if ( result < 0 ) {
\r
6051 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6052 errorText_ = errorStream_.str();
\r
6058 stream_.state = STREAM_RUNNING;
\r
6061 apiInfo->runnable = true;
\r
6062 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6063 MUTEX_UNLOCK( &stream_.mutex );
\r
6065 if ( result >= 0 ) return;
\r
6066 error( RtError::SYSTEM_ERROR );
\r
6069 void RtApiAlsa :: stopStream()
\r
6072 if ( stream_.state == STREAM_STOPPED ) {
\r
6073 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6074 error( RtError::WARNING );
\r
6078 stream_.state = STREAM_STOPPED;
\r
6079 MUTEX_LOCK( &stream_.mutex );
\r
6082 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6083 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6084 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6085 if ( apiInfo->synchronized )
\r
6086 result = snd_pcm_drop( handle[0] );
\r
6088 result = snd_pcm_drain( handle[0] );
\r
6089 if ( result < 0 ) {
\r
6090 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6091 errorText_ = errorStream_.str();
\r
6096 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6097 result = snd_pcm_drop( handle[1] );
\r
6098 if ( result < 0 ) {
\r
6099 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6100 errorText_ = errorStream_.str();
\r
6106 MUTEX_UNLOCK( &stream_.mutex );
\r
6108 if ( result >= 0 ) return;
\r
6109 error( RtError::SYSTEM_ERROR );
\r
6112 void RtApiAlsa :: abortStream()
\r
6115 if ( stream_.state == STREAM_STOPPED ) {
\r
6116 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6117 error( RtError::WARNING );
\r
6121 stream_.state = STREAM_STOPPED;
\r
6122 MUTEX_LOCK( &stream_.mutex );
\r
6125 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6126 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6127 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6128 result = snd_pcm_drop( handle[0] );
\r
6129 if ( result < 0 ) {
\r
6130 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6131 errorText_ = errorStream_.str();
\r
6136 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6137 result = snd_pcm_drop( handle[1] );
\r
6138 if ( result < 0 ) {
\r
6139 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6140 errorText_ = errorStream_.str();
\r
6146 MUTEX_UNLOCK( &stream_.mutex );
\r
6148 if ( result >= 0 ) return;
\r
6149 error( RtError::SYSTEM_ERROR );
\r
6152 void RtApiAlsa :: callbackEvent()
\r
6154 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6155 if ( stream_.state == STREAM_STOPPED ) {
\r
6156 MUTEX_LOCK( &stream_.mutex );
\r
6157 while ( !apiInfo->runnable )
\r
6158 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6160 if ( stream_.state != STREAM_RUNNING ) {
\r
6161 MUTEX_UNLOCK( &stream_.mutex );
\r
6164 MUTEX_UNLOCK( &stream_.mutex );
\r
6167 if ( stream_.state == STREAM_CLOSED ) {
\r
6168 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6169 error( RtError::WARNING );
\r
6173 int doStopStream = 0;
\r
6174 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6175 double streamTime = getStreamTime();
\r
6176 RtAudioStreamStatus status = 0;
\r
6177 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6178 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6179 apiInfo->xrun[0] = false;
\r
6181 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6182 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6183 apiInfo->xrun[1] = false;
\r
6185 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6186 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6188 if ( doStopStream == 2 ) {
\r
6193 MUTEX_LOCK( &stream_.mutex );
\r
6195 // The state might change while waiting on a mutex.
\r
6196 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6201 snd_pcm_t **handle;
\r
6202 snd_pcm_sframes_t frames;
\r
6203 RtAudioFormat format;
\r
6204 handle = (snd_pcm_t **) apiInfo->handles;
\r
6206 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6208 // Setup parameters.
\r
6209 if ( stream_.doConvertBuffer[1] ) {
\r
6210 buffer = stream_.deviceBuffer;
\r
6211 channels = stream_.nDeviceChannels[1];
\r
6212 format = stream_.deviceFormat[1];
\r
6215 buffer = stream_.userBuffer[1];
\r
6216 channels = stream_.nUserChannels[1];
\r
6217 format = stream_.userFormat;
\r
6220 // Read samples from device in interleaved/non-interleaved format.
\r
6221 if ( stream_.deviceInterleaved[1] )
\r
6222 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6224 void *bufs[channels];
\r
6225 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6226 for ( int i=0; i<channels; i++ )
\r
6227 bufs[i] = (void *) (buffer + (i * offset));
\r
6228 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6231 if ( result < (int) stream_.bufferSize ) {
\r
6232 // Either an error or overrun occured.
\r
6233 if ( result == -EPIPE ) {
\r
6234 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6235 if ( state == SND_PCM_STATE_XRUN ) {
\r
6236 apiInfo->xrun[1] = true;
\r
6237 result = snd_pcm_prepare( handle[1] );
\r
6238 if ( result < 0 ) {
\r
6239 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6240 errorText_ = errorStream_.str();
\r
6244 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6245 errorText_ = errorStream_.str();
\r
6249 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6250 errorText_ = errorStream_.str();
\r
6252 error( RtError::WARNING );
\r
6256 // Do byte swapping if necessary.
\r
6257 if ( stream_.doByteSwap[1] )
\r
6258 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6260 // Do buffer conversion if necessary.
\r
6261 if ( stream_.doConvertBuffer[1] )
\r
6262 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6264 // Check stream latency
\r
6265 result = snd_pcm_delay( handle[1], &frames );
\r
6266 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6271 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6273 // Setup parameters and do buffer conversion if necessary.
\r
6274 if ( stream_.doConvertBuffer[0] ) {
\r
6275 buffer = stream_.deviceBuffer;
\r
6276 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6277 channels = stream_.nDeviceChannels[0];
\r
6278 format = stream_.deviceFormat[0];
\r
6281 buffer = stream_.userBuffer[0];
\r
6282 channels = stream_.nUserChannels[0];
\r
6283 format = stream_.userFormat;
\r
6286 // Do byte swapping if necessary.
\r
6287 if ( stream_.doByteSwap[0] )
\r
6288 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6290 // Write samples to device in interleaved/non-interleaved format.
\r
6291 if ( stream_.deviceInterleaved[0] )
\r
6292 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6294 void *bufs[channels];
\r
6295 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6296 for ( int i=0; i<channels; i++ )
\r
6297 bufs[i] = (void *) (buffer + (i * offset));
\r
6298 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6301 if ( result < (int) stream_.bufferSize ) {
\r
6302 // Either an error or underrun occured.
\r
6303 if ( result == -EPIPE ) {
\r
6304 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6305 if ( state == SND_PCM_STATE_XRUN ) {
\r
6306 apiInfo->xrun[0] = true;
\r
6307 result = snd_pcm_prepare( handle[0] );
\r
6308 if ( result < 0 ) {
\r
6309 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6310 errorText_ = errorStream_.str();
\r
6314 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6315 errorText_ = errorStream_.str();
\r
6319 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6320 errorText_ = errorStream_.str();
\r
6322 error( RtError::WARNING );
\r
6326 // Check stream latency
\r
6327 result = snd_pcm_delay( handle[0], &frames );
\r
6328 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6332 MUTEX_UNLOCK( &stream_.mutex );
\r
6334 RtApi::tickStreamTime();
\r
6335 if ( doStopStream == 1 ) this->stopStream();
\r
6338 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6340 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6341 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6342 bool *isRunning = &info->isRunning;
\r
6344 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6345 if ( &info->doRealtime ) {
\r
6346 pthread_t tID = pthread_self(); // ID of this thread
\r
6347 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6348 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6352 while ( *isRunning == true ) {
\r
6353 pthread_testcancel();
\r
6354 object->callbackEvent();
\r
6357 pthread_exit( NULL );
\r
6360 //******************** End of __LINUX_ALSA__ *********************//
\r
6363 #if defined(__LINUX_PULSE__)
\r
6365 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6366 // and Tristan Matthews.
\r
6368 #include <pulse/error.h>
\r
6369 #include <pulse/simple.h>
\r
6373 const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6374 44100, 48000, 96000, 0}; }
\r
6376 struct rtaudio_pa_format_mapping_t {
\r
6377 RtAudioFormat rtaudio_format;
\r
6378 pa_sample_format_t pa_format;
\r
6381 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6382 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6383 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6384 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6385 {0, PA_SAMPLE_INVALID}};
\r
6387 struct PulseAudioHandle {
\r
6388 pa_simple *s_play;
\r
6391 pthread_cond_t runnable_cv;
\r
6393 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6396 RtApiPulse::~RtApiPulse()
\r
6398 if ( stream_.state != STREAM_CLOSED )
\r
6402 unsigned int RtApiPulse::getDeviceCount( void )
\r
6407 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6409 RtAudio::DeviceInfo info;
\r
6410 info.probed = true;
\r
6411 info.name = "PulseAudio";
\r
6412 info.outputChannels = 2;
\r
6413 info.inputChannels = 2;
\r
6414 info.duplexChannels = 2;
\r
6415 info.isDefaultOutput = true;
\r
6416 info.isDefaultInput = true;
\r
6418 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6419 info.sampleRates.push_back( *sr );
\r
6421 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6426 extern "C" void *pulseaudio_callback( void * user )
\r
6428 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6429 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6430 volatile bool *isRunning = &cbi->isRunning;
\r
6432 while ( *isRunning ) {
\r
6433 pthread_testcancel();
\r
6434 context->callbackEvent();
\r
6437 pthread_exit( NULL );
\r
6440 void RtApiPulse::closeStream( void )
\r
6442 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6444 stream_.callbackInfo.isRunning = false;
\r
6446 MUTEX_LOCK( &stream_.mutex );
\r
6447 if ( stream_.state == STREAM_STOPPED ) {
\r
6448 pah->runnable = true;
\r
6449 pthread_cond_signal( &pah->runnable_cv );
\r
6451 MUTEX_UNLOCK( &stream_.mutex );
\r
6453 pthread_join( pah->thread, 0 );
\r
6454 if ( pah->s_play ) {
\r
6455 pa_simple_flush( pah->s_play, NULL );
\r
6456 pa_simple_free( pah->s_play );
\r
6459 pa_simple_free( pah->s_rec );
\r
6461 pthread_cond_destroy( &pah->runnable_cv );
\r
6463 stream_.apiHandle = 0;
\r
6466 if ( stream_.userBuffer[0] ) {
\r
6467 free( stream_.userBuffer[0] );
\r
6468 stream_.userBuffer[0] = 0;
\r
6470 if ( stream_.userBuffer[1] ) {
\r
6471 free( stream_.userBuffer[1] );
\r
6472 stream_.userBuffer[1] = 0;
\r
6475 stream_.state = STREAM_CLOSED;
\r
6476 stream_.mode = UNINITIALIZED;
\r
6479 void RtApiPulse::callbackEvent( void )
\r
6481 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6483 if ( stream_.state == STREAM_STOPPED ) {
\r
6484 MUTEX_LOCK( &stream_.mutex );
\r
6485 while ( !pah->runnable )
\r
6486 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6488 if ( stream_.state != STREAM_RUNNING ) {
\r
6489 MUTEX_UNLOCK( &stream_.mutex );
\r
6492 MUTEX_UNLOCK( &stream_.mutex );
\r
6495 if ( stream_.state == STREAM_CLOSED ) {
\r
6496 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6497 "this shouldn't happen!";
\r
6498 error( RtError::WARNING );
\r
6502 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6503 double streamTime = getStreamTime();
\r
6504 RtAudioStreamStatus status = 0;
\r
6505 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6506 stream_.bufferSize, streamTime, status,
\r
6507 stream_.callbackInfo.userData );
\r
6509 if ( doStopStream == 2 ) {
\r
6514 MUTEX_LOCK( &stream_.mutex );
\r
6515 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6516 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6518 if ( stream_.state != STREAM_RUNNING )
\r
6523 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6524 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6525 convertBuffer( stream_.deviceBuffer,
\r
6526 stream_.userBuffer[OUTPUT],
\r
6527 stream_.convertInfo[OUTPUT] );
\r
6528 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6529 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6531 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6532 formatBytes( stream_.userFormat );
\r
6534 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6535 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6536 pa_strerror( pa_error ) << ".";
\r
6537 errorText_ = errorStream_.str();
\r
6538 error( RtError::WARNING );
\r
6542 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6543 if ( stream_.doConvertBuffer[INPUT] )
\r
6544 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6545 formatBytes( stream_.deviceFormat[INPUT] );
\r
6547 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6548 formatBytes( stream_.userFormat );
\r
6550 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6551 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6552 pa_strerror( pa_error ) << ".";
\r
6553 errorText_ = errorStream_.str();
\r
6554 error( RtError::WARNING );
\r
6556 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6557 convertBuffer( stream_.userBuffer[INPUT],
\r
6558 stream_.deviceBuffer,
\r
6559 stream_.convertInfo[INPUT] );
\r
6564 MUTEX_UNLOCK( &stream_.mutex );
\r
6565 RtApi::tickStreamTime();
\r
6567 if ( doStopStream == 1 )
\r
6571 void RtApiPulse::startStream( void )
\r
6573 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6575 if ( stream_.state == STREAM_CLOSED ) {
\r
6576 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6577 error( RtError::INVALID_USE );
\r
6580 if ( stream_.state == STREAM_RUNNING ) {
\r
6581 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6582 error( RtError::WARNING );
\r
6586 MUTEX_LOCK( &stream_.mutex );
\r
6588 stream_.state = STREAM_RUNNING;
\r
6590 pah->runnable = true;
\r
6591 pthread_cond_signal( &pah->runnable_cv );
\r
6592 MUTEX_UNLOCK( &stream_.mutex );
\r
6595 void RtApiPulse::stopStream( void )
\r
6597 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6599 if ( stream_.state == STREAM_CLOSED ) {
\r
6600 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6601 error( RtError::INVALID_USE );
\r
6604 if ( stream_.state == STREAM_STOPPED ) {
\r
6605 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6606 error( RtError::WARNING );
\r
6610 stream_.state = STREAM_STOPPED;
\r
6611 MUTEX_LOCK( &stream_.mutex );
\r
6613 if ( pah && pah->s_play ) {
\r
6615 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6616 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6617 pa_strerror( pa_error ) << ".";
\r
6618 errorText_ = errorStream_.str();
\r
6619 MUTEX_UNLOCK( &stream_.mutex );
\r
6620 error( RtError::SYSTEM_ERROR );
\r
6624 stream_.state = STREAM_STOPPED;
\r
6625 MUTEX_UNLOCK( &stream_.mutex );
\r
6628 void RtApiPulse::abortStream( void )
\r
6630 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6632 if ( stream_.state == STREAM_CLOSED ) {
\r
6633 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6634 error( RtError::INVALID_USE );
\r
6637 if ( stream_.state == STREAM_STOPPED ) {
\r
6638 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6639 error( RtError::WARNING );
\r
6643 stream_.state = STREAM_STOPPED;
\r
6644 MUTEX_LOCK( &stream_.mutex );
\r
6646 if ( pah && pah->s_play ) {
\r
6648 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6649 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6650 pa_strerror( pa_error ) << ".";
\r
6651 errorText_ = errorStream_.str();
\r
6652 MUTEX_UNLOCK( &stream_.mutex );
\r
6653 error( RtError::SYSTEM_ERROR );
\r
6657 stream_.state = STREAM_STOPPED;
\r
6658 MUTEX_UNLOCK( &stream_.mutex );
\r
6661 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6662 unsigned int channels, unsigned int firstChannel,
\r
6663 unsigned int sampleRate, RtAudioFormat format,
\r
6664 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6666 PulseAudioHandle *pah = 0;
\r
6667 unsigned long bufferBytes = 0;
\r
6668 pa_sample_spec ss;
\r
6670 if ( device != 0 ) return false;
\r
6671 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6672 if ( channels != 1 && channels != 2 ) {
\r
6673 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6676 ss.channels = channels;
\r
6678 if ( firstChannel != 0 ) return false;
\r
6680 bool sr_found = false;
\r
6681 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6682 if ( sampleRate == *sr ) {
\r
6684 stream_.sampleRate = sampleRate;
\r
6685 ss.rate = sampleRate;
\r
6689 if ( !sr_found ) {
\r
6690 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6694 bool sf_found = 0;
\r
6695 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6696 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6697 if ( format == sf->rtaudio_format ) {
\r
6699 stream_.userFormat = sf->rtaudio_format;
\r
6700 ss.format = sf->pa_format;
\r
6704 if ( !sf_found ) {
\r
6705 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6709 // Set interleaving parameters.
\r
6710 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6711 else stream_.userInterleaved = true;
\r
6712 stream_.deviceInterleaved[mode] = true;
\r
6713 stream_.nBuffers = 1;
\r
6714 stream_.doByteSwap[mode] = false;
\r
6715 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6716 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6717 stream_.nUserChannels[mode] = channels;
\r
6718 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6719 stream_.channelOffset[mode] = 0;
\r
6721 // Allocate necessary internal buffers.
\r
6722 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6723 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6724 if ( stream_.userBuffer[mode] == NULL ) {
\r
6725 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6728 stream_.bufferSize = *bufferSize;
\r
6730 if ( stream_.doConvertBuffer[mode] ) {
\r
6732 bool makeBuffer = true;
\r
6733 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6734 if ( mode == INPUT ) {
\r
6735 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6736 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6737 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6741 if ( makeBuffer ) {
\r
6742 bufferBytes *= *bufferSize;
\r
6743 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6744 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6745 if ( stream_.deviceBuffer == NULL ) {
\r
6746 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6752 stream_.device[mode] = device;
\r
6753 stream_.state = STREAM_STOPPED;
\r
6755 // Setup the buffer conversion information structure.
\r
6756 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6758 if ( !stream_.apiHandle ) {
\r
6759 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6761 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6765 stream_.apiHandle = pah;
\r
6766 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6767 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6771 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6776 pah->s_rec = pa_simple_new( NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error );
\r
6777 if ( !pah->s_rec ) {
\r
6778 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6783 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6784 if ( !pah->s_play ) {
\r
6785 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6793 if ( stream_.mode == UNINITIALIZED )
\r
6794 stream_.mode = mode;
\r
6795 else if ( stream_.mode == mode )
\r
6798 stream_.mode = DUPLEX;
\r
6800 stream_.state = STREAM_STOPPED;
\r
6802 if ( !stream_.callbackInfo.isRunning ) {
\r
6803 stream_.callbackInfo.object = this;
\r
6804 stream_.callbackInfo.isRunning = true;
\r
6805 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6806 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6817 //******************** End of __LINUX_PULSE__ *********************//
\r
6820 #if defined(__LINUX_OSS__)
\r
6822 #include <unistd.h>
\r
6823 #include <sys/ioctl.h>
\r
6824 #include <unistd.h>
\r
6825 #include <fcntl.h>
\r
6826 #include "soundcard.h"
\r
6827 #include <errno.h>
\r
6830 extern "C" void *ossCallbackHandler(void * ptr);
\r
6832 // A structure to hold various information related to the OSS API
\r
6833 // implementation.
\r
6834 struct OssHandle {
\r
6835 int id[2]; // device ids
\r
6838 pthread_cond_t runnable;
\r
6841 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6844 RtApiOss :: RtApiOss()
\r
6846 // Nothing to do here.
\r
6849 RtApiOss :: ~RtApiOss()
\r
6851 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6854 unsigned int RtApiOss :: getDeviceCount( void )
\r
6856 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6857 if ( mixerfd == -1 ) {
\r
6858 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6859 error( RtError::WARNING );
\r
6863 oss_sysinfo sysinfo;
\r
6864 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6866 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6867 error( RtError::WARNING );
\r
6872 return sysinfo.numaudios;
\r
6875 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6877 RtAudio::DeviceInfo info;
\r
6878 info.probed = false;
\r
6880 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6881 if ( mixerfd == -1 ) {
\r
6882 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6883 error( RtError::WARNING );
\r
6887 oss_sysinfo sysinfo;
\r
6888 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6889 if ( result == -1 ) {
\r
6891 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6892 error( RtError::WARNING );
\r
6896 unsigned nDevices = sysinfo.numaudios;
\r
6897 if ( nDevices == 0 ) {
\r
6899 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6900 error( RtError::INVALID_USE );
\r
6903 if ( device >= nDevices ) {
\r
6905 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6906 error( RtError::INVALID_USE );
\r
6909 oss_audioinfo ainfo;
\r
6910 ainfo.dev = device;
\r
6911 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6913 if ( result == -1 ) {
\r
6914 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6915 errorText_ = errorStream_.str();
\r
6916 error( RtError::WARNING );
\r
6921 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6922 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6923 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6924 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6925 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6928 // Probe data formats ... do for input
\r
6929 unsigned long mask = ainfo.iformats;
\r
6930 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6931 info.nativeFormats |= RTAUDIO_SINT16;
\r
6932 if ( mask & AFMT_S8 )
\r
6933 info.nativeFormats |= RTAUDIO_SINT8;
\r
6934 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6935 info.nativeFormats |= RTAUDIO_SINT32;
\r
6936 if ( mask & AFMT_FLOAT )
\r
6937 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6938 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6939 info.nativeFormats |= RTAUDIO_SINT24;
\r
6941 // Check that we have at least one supported format
\r
6942 if ( info.nativeFormats == 0 ) {
\r
6943 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6944 errorText_ = errorStream_.str();
\r
6945 error( RtError::WARNING );
\r
6949 // Probe the supported sample rates.
\r
6950 info.sampleRates.clear();
\r
6951 if ( ainfo.nrates ) {
\r
6952 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6953 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6954 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6955 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6962 // Check min and max rate values;
\r
6963 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6964 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6965 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6969 if ( info.sampleRates.size() == 0 ) {
\r
6970 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6971 errorText_ = errorStream_.str();
\r
6972 error( RtError::WARNING );
\r
6975 info.probed = true;
\r
6976 info.name = ainfo.name;
\r
6983 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6984 unsigned int firstChannel, unsigned int sampleRate,
\r
6985 RtAudioFormat format, unsigned int *bufferSize,
\r
6986 RtAudio::StreamOptions *options )
\r
6988 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6989 if ( mixerfd == -1 ) {
\r
6990 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6994 oss_sysinfo sysinfo;
\r
6995 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6996 if ( result == -1 ) {
\r
6998 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7002 unsigned nDevices = sysinfo.numaudios;
\r
7003 if ( nDevices == 0 ) {
\r
7004 // This should not happen because a check is made before this function is called.
\r
7006 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7010 if ( device >= nDevices ) {
\r
7011 // This should not happen because a check is made before this function is called.
\r
7013 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7017 oss_audioinfo ainfo;
\r
7018 ainfo.dev = device;
\r
7019 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7021 if ( result == -1 ) {
\r
7022 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7023 errorText_ = errorStream_.str();
\r
7027 // Check if device supports input or output
\r
7028 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7029 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7030 if ( mode == OUTPUT )
\r
7031 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7033 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7034 errorText_ = errorStream_.str();
\r
7039 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7040 if ( mode == OUTPUT )
\r
7041 flags |= O_WRONLY;
\r
7042 else { // mode == INPUT
\r
7043 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7044 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7045 close( handle->id[0] );
\r
7046 handle->id[0] = 0;
\r
7047 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7048 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7049 errorText_ = errorStream_.str();
\r
7052 // Check that the number previously set channels is the same.
\r
7053 if ( stream_.nUserChannels[0] != channels ) {
\r
7054 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7055 errorText_ = errorStream_.str();
\r
7061 flags |= O_RDONLY;
\r
7064 // Set exclusive access if specified.
\r
7065 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7067 // Try to open the device.
\r
7069 fd = open( ainfo.devnode, flags, 0 );
\r
7071 if ( errno == EBUSY )
\r
7072 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7074 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7075 errorText_ = errorStream_.str();
\r
7079 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7081 if ( flags | O_RDWR ) {
\r
7082 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7083 if ( result == -1) {
\r
7084 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7085 errorText_ = errorStream_.str();
\r
7091 // Check the device channel support.
\r
7092 stream_.nUserChannels[mode] = channels;
\r
7093 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7095 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7096 errorText_ = errorStream_.str();
\r
7100 // Set the number of channels.
\r
7101 int deviceChannels = channels + firstChannel;
\r
7102 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7103 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7105 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7106 errorText_ = errorStream_.str();
\r
7109 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7111 // Get the data format mask
\r
7113 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7114 if ( result == -1 ) {
\r
7116 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7117 errorText_ = errorStream_.str();
\r
7121 // Determine how to set the device format.
\r
7122 stream_.userFormat = format;
\r
7123 int deviceFormat = -1;
\r
7124 stream_.doByteSwap[mode] = false;
\r
7125 if ( format == RTAUDIO_SINT8 ) {
\r
7126 if ( mask & AFMT_S8 ) {
\r
7127 deviceFormat = AFMT_S8;
\r
7128 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7131 else if ( format == RTAUDIO_SINT16 ) {
\r
7132 if ( mask & AFMT_S16_NE ) {
\r
7133 deviceFormat = AFMT_S16_NE;
\r
7134 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7136 else if ( mask & AFMT_S16_OE ) {
\r
7137 deviceFormat = AFMT_S16_OE;
\r
7138 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7139 stream_.doByteSwap[mode] = true;
\r
7142 else if ( format == RTAUDIO_SINT24 ) {
\r
7143 if ( mask & AFMT_S24_NE ) {
\r
7144 deviceFormat = AFMT_S24_NE;
\r
7145 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7147 else if ( mask & AFMT_S24_OE ) {
\r
7148 deviceFormat = AFMT_S24_OE;
\r
7149 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7150 stream_.doByteSwap[mode] = true;
\r
7153 else if ( format == RTAUDIO_SINT32 ) {
\r
7154 if ( mask & AFMT_S32_NE ) {
\r
7155 deviceFormat = AFMT_S32_NE;
\r
7156 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7158 else if ( mask & AFMT_S32_OE ) {
\r
7159 deviceFormat = AFMT_S32_OE;
\r
7160 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7161 stream_.doByteSwap[mode] = true;
\r
7165 if ( deviceFormat == -1 ) {
\r
7166 // The user requested format is not natively supported by the device.
\r
7167 if ( mask & AFMT_S16_NE ) {
\r
7168 deviceFormat = AFMT_S16_NE;
\r
7169 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7171 else if ( mask & AFMT_S32_NE ) {
\r
7172 deviceFormat = AFMT_S32_NE;
\r
7173 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7175 else if ( mask & AFMT_S24_NE ) {
\r
7176 deviceFormat = AFMT_S24_NE;
\r
7177 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7179 else if ( mask & AFMT_S16_OE ) {
\r
7180 deviceFormat = AFMT_S16_OE;
\r
7181 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7182 stream_.doByteSwap[mode] = true;
\r
7184 else if ( mask & AFMT_S32_OE ) {
\r
7185 deviceFormat = AFMT_S32_OE;
\r
7186 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7187 stream_.doByteSwap[mode] = true;
\r
7189 else if ( mask & AFMT_S24_OE ) {
\r
7190 deviceFormat = AFMT_S24_OE;
\r
7191 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7192 stream_.doByteSwap[mode] = true;
\r
7194 else if ( mask & AFMT_S8) {
\r
7195 deviceFormat = AFMT_S8;
\r
7196 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7200 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7201 // This really shouldn't happen ...
\r
7203 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7204 errorText_ = errorStream_.str();
\r
7208 // Set the data format.
\r
7209 int temp = deviceFormat;
\r
7210 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7211 if ( result == -1 || deviceFormat != temp ) {
\r
7213 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7214 errorText_ = errorStream_.str();
\r
7218 // Attempt to set the buffer size. According to OSS, the minimum
\r
7219 // number of buffers is two. The supposed minimum buffer size is 16
\r
7220 // bytes, so that will be our lower bound. The argument to this
\r
7221 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7222 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7223 // We'll check the actual value used near the end of the setup
\r
7225 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7226 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7228 if ( options ) buffers = options->numberOfBuffers;
\r
7229 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7230 if ( buffers < 2 ) buffers = 3;
\r
7231 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7232 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7233 if ( result == -1 ) {
\r
7235 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7236 errorText_ = errorStream_.str();
\r
7239 stream_.nBuffers = buffers;
\r
7241 // Save buffer size (in sample frames).
\r
7242 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7243 stream_.bufferSize = *bufferSize;
\r
7245 // Set the sample rate.
\r
7246 int srate = sampleRate;
\r
7247 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7248 if ( result == -1 ) {
\r
7250 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7251 errorText_ = errorStream_.str();
\r
7255 // Verify the sample rate setup worked.
\r
7256 if ( abs( srate - sampleRate ) > 100 ) {
\r
7258 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7259 errorText_ = errorStream_.str();
\r
7262 stream_.sampleRate = sampleRate;
\r
7264 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7265 // We're doing duplex setup here.
\r
7266 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7267 stream_.nDeviceChannels[0] = deviceChannels;
\r
7270 // Set interleaving parameters.
\r
7271 stream_.userInterleaved = true;
\r
7272 stream_.deviceInterleaved[mode] = true;
\r
7273 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7274 stream_.userInterleaved = false;
\r
7276 // Set flags for buffer conversion
\r
7277 stream_.doConvertBuffer[mode] = false;
\r
7278 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7279 stream_.doConvertBuffer[mode] = true;
\r
7280 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7281 stream_.doConvertBuffer[mode] = true;
\r
7282 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7283 stream_.nUserChannels[mode] > 1 )
\r
7284 stream_.doConvertBuffer[mode] = true;
\r
7286 // Allocate the stream handles if necessary and then save.
\r
7287 if ( stream_.apiHandle == 0 ) {
\r
7289 handle = new OssHandle;
\r
7291 catch ( std::bad_alloc& ) {
\r
7292 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7296 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7297 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7301 stream_.apiHandle = (void *) handle;
\r
7304 handle = (OssHandle *) stream_.apiHandle;
\r
7306 handle->id[mode] = fd;
\r
7308 // Allocate necessary internal buffers.
\r
7309 unsigned long bufferBytes;
\r
7310 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7311 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7312 if ( stream_.userBuffer[mode] == NULL ) {
\r
7313 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7317 if ( stream_.doConvertBuffer[mode] ) {
\r
7319 bool makeBuffer = true;
\r
7320 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7321 if ( mode == INPUT ) {
\r
7322 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7323 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7324 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7328 if ( makeBuffer ) {
\r
7329 bufferBytes *= *bufferSize;
\r
7330 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7331 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7332 if ( stream_.deviceBuffer == NULL ) {
\r
7333 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7339 stream_.device[mode] = device;
\r
7340 stream_.state = STREAM_STOPPED;
\r
7342 // Setup the buffer conversion information structure.
\r
7343 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7345 // Setup thread if necessary.
\r
7346 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7347 // We had already set up an output stream.
\r
7348 stream_.mode = DUPLEX;
\r
7349 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7352 stream_.mode = mode;
\r
7354 // Setup callback thread.
\r
7355 stream_.callbackInfo.object = (void *) this;
\r
7357 // Set the thread attributes for joinable and realtime scheduling
\r
7358 // priority. The higher priority will only take affect if the
\r
7359 // program is run as root or suid.
\r
7360 pthread_attr_t attr;
\r
7361 pthread_attr_init( &attr );
\r
7362 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7363 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7364 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7365 struct sched_param param;
\r
7366 int priority = options->priority;
\r
7367 int min = sched_get_priority_min( SCHED_RR );
\r
7368 int max = sched_get_priority_max( SCHED_RR );
\r
7369 if ( priority < min ) priority = min;
\r
7370 else if ( priority > max ) priority = max;
\r
7371 param.sched_priority = priority;
\r
7372 pthread_attr_setschedparam( &attr, ¶m );
\r
7373 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7376 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7378 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7381 stream_.callbackInfo.isRunning = true;
\r
7382 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7383 pthread_attr_destroy( &attr );
\r
7385 stream_.callbackInfo.isRunning = false;
\r
7386 errorText_ = "RtApiOss::error creating callback thread!";
\r
7395 pthread_cond_destroy( &handle->runnable );
\r
7396 if ( handle->id[0] ) close( handle->id[0] );
\r
7397 if ( handle->id[1] ) close( handle->id[1] );
\r
7399 stream_.apiHandle = 0;
\r
7402 for ( int i=0; i<2; i++ ) {
\r
7403 if ( stream_.userBuffer[i] ) {
\r
7404 free( stream_.userBuffer[i] );
\r
7405 stream_.userBuffer[i] = 0;
\r
7409 if ( stream_.deviceBuffer ) {
\r
7410 free( stream_.deviceBuffer );
\r
7411 stream_.deviceBuffer = 0;
\r
7417 void RtApiOss :: closeStream()
\r
7419 if ( stream_.state == STREAM_CLOSED ) {
\r
7420 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7421 error( RtError::WARNING );
\r
7425 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7426 stream_.callbackInfo.isRunning = false;
\r
7427 MUTEX_LOCK( &stream_.mutex );
\r
7428 if ( stream_.state == STREAM_STOPPED )
\r
7429 pthread_cond_signal( &handle->runnable );
\r
7430 MUTEX_UNLOCK( &stream_.mutex );
\r
7431 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7433 if ( stream_.state == STREAM_RUNNING ) {
\r
7434 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7435 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7437 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7438 stream_.state = STREAM_STOPPED;
\r
7442 pthread_cond_destroy( &handle->runnable );
\r
7443 if ( handle->id[0] ) close( handle->id[0] );
\r
7444 if ( handle->id[1] ) close( handle->id[1] );
\r
7446 stream_.apiHandle = 0;
\r
7449 for ( int i=0; i<2; i++ ) {
\r
7450 if ( stream_.userBuffer[i] ) {
\r
7451 free( stream_.userBuffer[i] );
\r
7452 stream_.userBuffer[i] = 0;
\r
7456 if ( stream_.deviceBuffer ) {
\r
7457 free( stream_.deviceBuffer );
\r
7458 stream_.deviceBuffer = 0;
\r
7461 stream_.mode = UNINITIALIZED;
\r
7462 stream_.state = STREAM_CLOSED;
\r
7465 void RtApiOss :: startStream()
\r
7468 if ( stream_.state == STREAM_RUNNING ) {
\r
7469 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7470 error( RtError::WARNING );
\r
7474 MUTEX_LOCK( &stream_.mutex );
\r
7476 stream_.state = STREAM_RUNNING;
\r
7478 // No need to do anything else here ... OSS automatically starts
\r
7479 // when fed samples.
\r
7481 MUTEX_UNLOCK( &stream_.mutex );
\r
7483 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7484 pthread_cond_signal( &handle->runnable );
\r
7487 void RtApiOss :: stopStream()
\r
7490 if ( stream_.state == STREAM_STOPPED ) {
\r
7491 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7492 error( RtError::WARNING );
\r
7496 MUTEX_LOCK( &stream_.mutex );
\r
7498 // The state might change while waiting on a mutex.
\r
7499 if ( stream_.state == STREAM_STOPPED ) {
\r
7500 MUTEX_UNLOCK( &stream_.mutex );
\r
7505 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7506 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7508 // Flush the output with zeros a few times.
\r
7511 RtAudioFormat format;
\r
7513 if ( stream_.doConvertBuffer[0] ) {
\r
7514 buffer = stream_.deviceBuffer;
\r
7515 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7516 format = stream_.deviceFormat[0];
\r
7519 buffer = stream_.userBuffer[0];
\r
7520 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7521 format = stream_.userFormat;
\r
7524 memset( buffer, 0, samples * formatBytes(format) );
\r
7525 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7526 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7527 if ( result == -1 ) {
\r
7528 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7529 error( RtError::WARNING );
\r
7533 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7534 if ( result == -1 ) {
\r
7535 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7536 errorText_ = errorStream_.str();
\r
7539 handle->triggered = false;
\r
7542 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7543 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7544 if ( result == -1 ) {
\r
7545 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7546 errorText_ = errorStream_.str();
\r
7552 stream_.state = STREAM_STOPPED;
\r
7553 MUTEX_UNLOCK( &stream_.mutex );
\r
7555 if ( result != -1 ) return;
\r
7556 error( RtError::SYSTEM_ERROR );
\r
7559 void RtApiOss :: abortStream()
\r
7562 if ( stream_.state == STREAM_STOPPED ) {
\r
7563 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7564 error( RtError::WARNING );
\r
7568 MUTEX_LOCK( &stream_.mutex );
\r
7570 // The state might change while waiting on a mutex.
\r
7571 if ( stream_.state == STREAM_STOPPED ) {
\r
7572 MUTEX_UNLOCK( &stream_.mutex );
\r
7577 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7578 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7579 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7580 if ( result == -1 ) {
\r
7581 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7582 errorText_ = errorStream_.str();
\r
7585 handle->triggered = false;
\r
7588 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7589 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7590 if ( result == -1 ) {
\r
7591 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7592 errorText_ = errorStream_.str();
\r
7598 stream_.state = STREAM_STOPPED;
\r
7599 MUTEX_UNLOCK( &stream_.mutex );
\r
7601 if ( result != -1 ) return;
\r
7602 error( RtError::SYSTEM_ERROR );
\r
7605 void RtApiOss :: callbackEvent()
\r
7607 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7608 if ( stream_.state == STREAM_STOPPED ) {
\r
7609 MUTEX_LOCK( &stream_.mutex );
\r
7610 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7611 if ( stream_.state != STREAM_RUNNING ) {
\r
7612 MUTEX_UNLOCK( &stream_.mutex );
\r
7615 MUTEX_UNLOCK( &stream_.mutex );
\r
7618 if ( stream_.state == STREAM_CLOSED ) {
\r
7619 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7620 error( RtError::WARNING );
\r
7624 // Invoke user callback to get fresh output data.
\r
7625 int doStopStream = 0;
\r
7626 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7627 double streamTime = getStreamTime();
\r
7628 RtAudioStreamStatus status = 0;
\r
7629 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7630 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7631 handle->xrun[0] = false;
\r
7633 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7634 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7635 handle->xrun[1] = false;
\r
7637 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7638 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7639 if ( doStopStream == 2 ) {
\r
7640 this->abortStream();
\r
7644 MUTEX_LOCK( &stream_.mutex );
\r
7646 // The state might change while waiting on a mutex.
\r
7647 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7652 RtAudioFormat format;
\r
7654 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7656 // Setup parameters and do buffer conversion if necessary.
\r
7657 if ( stream_.doConvertBuffer[0] ) {
\r
7658 buffer = stream_.deviceBuffer;
\r
7659 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7660 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7661 format = stream_.deviceFormat[0];
\r
7664 buffer = stream_.userBuffer[0];
\r
7665 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7666 format = stream_.userFormat;
\r
7669 // Do byte swapping if necessary.
\r
7670 if ( stream_.doByteSwap[0] )
\r
7671 byteSwapBuffer( buffer, samples, format );
\r
7673 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7675 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7676 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7677 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7678 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7679 handle->triggered = true;
\r
7682 // Write samples to device.
\r
7683 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7685 if ( result == -1 ) {
\r
7686 // We'll assume this is an underrun, though there isn't a
\r
7687 // specific means for determining that.
\r
7688 handle->xrun[0] = true;
\r
7689 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7690 error( RtError::WARNING );
\r
7691 // Continue on to input section.
\r
7695 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7697 // Setup parameters.
\r
7698 if ( stream_.doConvertBuffer[1] ) {
\r
7699 buffer = stream_.deviceBuffer;
\r
7700 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7701 format = stream_.deviceFormat[1];
\r
7704 buffer = stream_.userBuffer[1];
\r
7705 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7706 format = stream_.userFormat;
\r
7709 // Read samples from device.
\r
7710 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7712 if ( result == -1 ) {
\r
7713 // We'll assume this is an overrun, though there isn't a
\r
7714 // specific means for determining that.
\r
7715 handle->xrun[1] = true;
\r
7716 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7717 error( RtError::WARNING );
\r
7721 // Do byte swapping if necessary.
\r
7722 if ( stream_.doByteSwap[1] )
\r
7723 byteSwapBuffer( buffer, samples, format );
\r
7725 // Do buffer conversion if necessary.
\r
7726 if ( stream_.doConvertBuffer[1] )
\r
7727 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7731 MUTEX_UNLOCK( &stream_.mutex );
\r
7733 RtApi::tickStreamTime();
\r
7734 if ( doStopStream == 1 ) this->stopStream();
\r
7737 extern "C" void *ossCallbackHandler( void *ptr )
\r
7739 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7740 RtApiOss *object = (RtApiOss *) info->object;
\r
7741 bool *isRunning = &info->isRunning;
\r
7743 while ( *isRunning == true ) {
\r
7744 pthread_testcancel();
\r
7745 object->callbackEvent();
\r
7748 pthread_exit( NULL );
\r
7751 //******************** End of __LINUX_OSS__ *********************//
\r
7755 // *************************************************** //
\r
7757 // Protected common (OS-independent) RtAudio methods.
\r
7759 // *************************************************** //
\r
7761 // This method can be modified to control the behavior of error
\r
7762 // message printing.
\r
7763 void RtApi :: error( RtError::Type type )
\r
7765 errorStream_.str(""); // clear the ostringstream
\r
7766 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7767 std::cerr << '\n' << errorText_ << "\n\n";
\r
7768 else if ( type != RtError::WARNING )
\r
7769 throw( RtError( errorText_, type ) );
\r
7772 void RtApi :: verifyStream()
\r
7774 if ( stream_.state == STREAM_CLOSED ) {
\r
7775 errorText_ = "RtApi:: a stream is not open!";
\r
7776 error( RtError::INVALID_USE );
\r
7780 void RtApi :: clearStreamInfo()
\r
7782 stream_.mode = UNINITIALIZED;
\r
7783 stream_.state = STREAM_CLOSED;
\r
7784 stream_.sampleRate = 0;
\r
7785 stream_.bufferSize = 0;
\r
7786 stream_.nBuffers = 0;
\r
7787 stream_.userFormat = 0;
\r
7788 stream_.userInterleaved = true;
\r
7789 stream_.streamTime = 0.0;
\r
7790 stream_.apiHandle = 0;
\r
7791 stream_.deviceBuffer = 0;
\r
7792 stream_.callbackInfo.callback = 0;
\r
7793 stream_.callbackInfo.userData = 0;
\r
7794 stream_.callbackInfo.isRunning = false;
\r
7795 for ( int i=0; i<2; i++ ) {
\r
7796 stream_.device[i] = 11111;
\r
7797 stream_.doConvertBuffer[i] = false;
\r
7798 stream_.deviceInterleaved[i] = true;
\r
7799 stream_.doByteSwap[i] = false;
\r
7800 stream_.nUserChannels[i] = 0;
\r
7801 stream_.nDeviceChannels[i] = 0;
\r
7802 stream_.channelOffset[i] = 0;
\r
7803 stream_.deviceFormat[i] = 0;
\r
7804 stream_.latency[i] = 0;
\r
7805 stream_.userBuffer[i] = 0;
\r
7806 stream_.convertInfo[i].channels = 0;
\r
7807 stream_.convertInfo[i].inJump = 0;
\r
7808 stream_.convertInfo[i].outJump = 0;
\r
7809 stream_.convertInfo[i].inFormat = 0;
\r
7810 stream_.convertInfo[i].outFormat = 0;
\r
7811 stream_.convertInfo[i].inOffset.clear();
\r
7812 stream_.convertInfo[i].outOffset.clear();
\r
7816 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7818 if ( format == RTAUDIO_SINT16 )
\r
7820 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7822 else if ( format == RTAUDIO_FLOAT64 )
\r
7824 else if ( format == RTAUDIO_SINT24 )
\r
7826 else if ( format == RTAUDIO_SINT8 )
\r
7829 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7830 error( RtError::WARNING );
\r
7835 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7837 if ( mode == INPUT ) { // convert device to user buffer
\r
7838 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7839 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7840 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7841 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7843 else { // convert user to device buffer
\r
7844 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7845 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7846 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7847 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7850 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7851 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7853 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7855 // Set up the interleave/deinterleave offsets.
\r
7856 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7857 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7858 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7859 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7860 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7861 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7862 stream_.convertInfo[mode].inJump = 1;
\r
7866 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7867 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7868 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7869 stream_.convertInfo[mode].outJump = 1;
\r
7873 else { // no (de)interleaving
\r
7874 if ( stream_.userInterleaved ) {
\r
7875 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7876 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7877 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7881 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7882 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7883 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7884 stream_.convertInfo[mode].inJump = 1;
\r
7885 stream_.convertInfo[mode].outJump = 1;
\r
7890 // Add channel offset.
\r
7891 if ( firstChannel > 0 ) {
\r
7892 if ( stream_.deviceInterleaved[mode] ) {
\r
7893 if ( mode == OUTPUT ) {
\r
7894 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7895 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7898 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7899 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7903 if ( mode == OUTPUT ) {
\r
7904 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7905 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7908 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7909 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7915 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7917 // This function does format conversion, input/output channel compensation, and
\r
7918 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7919 // the lower three bytes of a 32-bit integer.
\r
7921 // Clear our device buffer when in/out duplex device channels are different
\r
7922 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7923 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7924 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7927 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7929 Float64 *out = (Float64 *)outBuffer;
\r
7931 if (info.inFormat == RTAUDIO_SINT8) {
\r
7932 signed char *in = (signed char *)inBuffer;
\r
7933 scale = 1.0 / 127.5;
\r
7934 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7935 for (j=0; j<info.channels; j++) {
\r
7936 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7937 out[info.outOffset[j]] += 0.5;
\r
7938 out[info.outOffset[j]] *= scale;
\r
7940 in += info.inJump;
\r
7941 out += info.outJump;
\r
7944 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7945 Int16 *in = (Int16 *)inBuffer;
\r
7946 scale = 1.0 / 32767.5;
\r
7947 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7948 for (j=0; j<info.channels; j++) {
\r
7949 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7950 out[info.outOffset[j]] += 0.5;
\r
7951 out[info.outOffset[j]] *= scale;
\r
7953 in += info.inJump;
\r
7954 out += info.outJump;
\r
7957 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7958 Int24 *in = (Int24 *)inBuffer;
\r
7959 scale = 1.0 / 8388607.5;
\r
7960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7961 for (j=0; j<info.channels; j++) {
\r
7962 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
7963 out[info.outOffset[j]] += 0.5;
\r
7964 out[info.outOffset[j]] *= scale;
\r
7966 in += info.inJump;
\r
7967 out += info.outJump;
\r
7970 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7971 Int32 *in = (Int32 *)inBuffer;
\r
7972 scale = 1.0 / 2147483647.5;
\r
7973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7974 for (j=0; j<info.channels; j++) {
\r
7975 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7976 out[info.outOffset[j]] += 0.5;
\r
7977 out[info.outOffset[j]] *= scale;
\r
7979 in += info.inJump;
\r
7980 out += info.outJump;
\r
7983 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7984 Float32 *in = (Float32 *)inBuffer;
\r
7985 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7986 for (j=0; j<info.channels; j++) {
\r
7987 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7989 in += info.inJump;
\r
7990 out += info.outJump;
\r
7993 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7994 // Channel compensation and/or (de)interleaving only.
\r
7995 Float64 *in = (Float64 *)inBuffer;
\r
7996 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7997 for (j=0; j<info.channels; j++) {
\r
7998 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8000 in += info.inJump;
\r
8001 out += info.outJump;
\r
8005 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8007 Float32 *out = (Float32 *)outBuffer;
\r
8009 if (info.inFormat == RTAUDIO_SINT8) {
\r
8010 signed char *in = (signed char *)inBuffer;
\r
8011 scale = (Float32) ( 1.0 / 127.5 );
\r
8012 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8013 for (j=0; j<info.channels; j++) {
\r
8014 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8015 out[info.outOffset[j]] += 0.5;
\r
8016 out[info.outOffset[j]] *= scale;
\r
8018 in += info.inJump;
\r
8019 out += info.outJump;
\r
8022 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8023 Int16 *in = (Int16 *)inBuffer;
\r
8024 scale = (Float32) ( 1.0 / 32767.5 );
\r
8025 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8026 for (j=0; j<info.channels; j++) {
\r
8027 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8028 out[info.outOffset[j]] += 0.5;
\r
8029 out[info.outOffset[j]] *= scale;
\r
8031 in += info.inJump;
\r
8032 out += info.outJump;
\r
8035 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8036 Int24 *in = (Int24 *)inBuffer;
\r
8037 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8038 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8039 for (j=0; j<info.channels; j++) {
\r
8040 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8041 out[info.outOffset[j]] += 0.5;
\r
8042 out[info.outOffset[j]] *= scale;
\r
8044 in += info.inJump;
\r
8045 out += info.outJump;
\r
8048 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8049 Int32 *in = (Int32 *)inBuffer;
\r
8050 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8051 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8052 for (j=0; j<info.channels; j++) {
\r
8053 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8054 out[info.outOffset[j]] += 0.5;
\r
8055 out[info.outOffset[j]] *= scale;
\r
8057 in += info.inJump;
\r
8058 out += info.outJump;
\r
8061 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8062 // Channel compensation and/or (de)interleaving only.
\r
8063 Float32 *in = (Float32 *)inBuffer;
\r
8064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8065 for (j=0; j<info.channels; j++) {
\r
8066 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8068 in += info.inJump;
\r
8069 out += info.outJump;
\r
8072 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8073 Float64 *in = (Float64 *)inBuffer;
\r
8074 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8075 for (j=0; j<info.channels; j++) {
\r
8076 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8078 in += info.inJump;
\r
8079 out += info.outJump;
\r
8083 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8084 Int32 *out = (Int32 *)outBuffer;
\r
8085 if (info.inFormat == RTAUDIO_SINT8) {
\r
8086 signed char *in = (signed char *)inBuffer;
\r
8087 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8088 for (j=0; j<info.channels; j++) {
\r
8089 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8090 out[info.outOffset[j]] <<= 24;
\r
8092 in += info.inJump;
\r
8093 out += info.outJump;
\r
8096 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8097 Int16 *in = (Int16 *)inBuffer;
\r
8098 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8099 for (j=0; j<info.channels; j++) {
\r
8100 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8101 out[info.outOffset[j]] <<= 16;
\r
8103 in += info.inJump;
\r
8104 out += info.outJump;
\r
8107 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8108 Int24 *in = (Int24 *)inBuffer;
\r
8109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8110 for (j=0; j<info.channels; j++) {
\r
8111 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8112 out[info.outOffset[j]] <<= 8;
\r
8114 in += info.inJump;
\r
8115 out += info.outJump;
\r
8118 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8119 // Channel compensation and/or (de)interleaving only.
\r
8120 Int32 *in = (Int32 *)inBuffer;
\r
8121 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8122 for (j=0; j<info.channels; j++) {
\r
8123 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8125 in += info.inJump;
\r
8126 out += info.outJump;
\r
8129 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8130 Float32 *in = (Float32 *)inBuffer;
\r
8131 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8132 for (j=0; j<info.channels; j++) {
\r
8133 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8135 in += info.inJump;
\r
8136 out += info.outJump;
\r
8139 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8140 Float64 *in = (Float64 *)inBuffer;
\r
8141 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8142 for (j=0; j<info.channels; j++) {
\r
8143 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8145 in += info.inJump;
\r
8146 out += info.outJump;
\r
8150 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8151 Int24 *out = (Int24 *)outBuffer;
\r
8152 if (info.inFormat == RTAUDIO_SINT8) {
\r
8153 signed char *in = (signed char *)inBuffer;
\r
8154 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8155 for (j=0; j<info.channels; j++) {
\r
8156 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8157 //out[info.outOffset[j]] <<= 16;
\r
8159 in += info.inJump;
\r
8160 out += info.outJump;
\r
8163 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8164 Int16 *in = (Int16 *)inBuffer;
\r
8165 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8166 for (j=0; j<info.channels; j++) {
\r
8167 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8168 //out[info.outOffset[j]] <<= 8;
\r
8170 in += info.inJump;
\r
8171 out += info.outJump;
\r
8174 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8175 // Channel compensation and/or (de)interleaving only.
\r
8176 Int24 *in = (Int24 *)inBuffer;
\r
8177 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8178 for (j=0; j<info.channels; j++) {
\r
8179 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8181 in += info.inJump;
\r
8182 out += info.outJump;
\r
8185 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8186 Int32 *in = (Int32 *)inBuffer;
\r
8187 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8188 for (j=0; j<info.channels; j++) {
\r
8189 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8190 //out[info.outOffset[j]] >>= 8;
\r
8192 in += info.inJump;
\r
8193 out += info.outJump;
\r
8196 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8197 Float32 *in = (Float32 *)inBuffer;
\r
8198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8199 for (j=0; j<info.channels; j++) {
\r
8200 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8202 in += info.inJump;
\r
8203 out += info.outJump;
\r
8206 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8207 Float64 *in = (Float64 *)inBuffer;
\r
8208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8209 for (j=0; j<info.channels; j++) {
\r
8210 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8212 in += info.inJump;
\r
8213 out += info.outJump;
\r
8217 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8218 Int16 *out = (Int16 *)outBuffer;
\r
8219 if (info.inFormat == RTAUDIO_SINT8) {
\r
8220 signed char *in = (signed char *)inBuffer;
\r
8221 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8222 for (j=0; j<info.channels; j++) {
\r
8223 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8224 out[info.outOffset[j]] <<= 8;
\r
8226 in += info.inJump;
\r
8227 out += info.outJump;
\r
8230 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8231 // Channel compensation and/or (de)interleaving only.
\r
8232 Int16 *in = (Int16 *)inBuffer;
\r
8233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8234 for (j=0; j<info.channels; j++) {
\r
8235 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8237 in += info.inJump;
\r
8238 out += info.outJump;
\r
8241 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8242 Int24 *in = (Int24 *)inBuffer;
\r
8243 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8244 for (j=0; j<info.channels; j++) {
\r
8245 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8247 in += info.inJump;
\r
8248 out += info.outJump;
\r
8251 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8252 Int32 *in = (Int32 *)inBuffer;
\r
8253 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8254 for (j=0; j<info.channels; j++) {
\r
8255 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8257 in += info.inJump;
\r
8258 out += info.outJump;
\r
8261 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8262 Float32 *in = (Float32 *)inBuffer;
\r
8263 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8264 for (j=0; j<info.channels; j++) {
\r
8265 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8267 in += info.inJump;
\r
8268 out += info.outJump;
\r
8271 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8272 Float64 *in = (Float64 *)inBuffer;
\r
8273 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8274 for (j=0; j<info.channels; j++) {
\r
8275 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8277 in += info.inJump;
\r
8278 out += info.outJump;
\r
8282 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8283 signed char *out = (signed char *)outBuffer;
\r
8284 if (info.inFormat == RTAUDIO_SINT8) {
\r
8285 // Channel compensation and/or (de)interleaving only.
\r
8286 signed char *in = (signed char *)inBuffer;
\r
8287 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8288 for (j=0; j<info.channels; j++) {
\r
8289 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8291 in += info.inJump;
\r
8292 out += info.outJump;
\r
8295 if (info.inFormat == RTAUDIO_SINT16) {
\r
8296 Int16 *in = (Int16 *)inBuffer;
\r
8297 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8298 for (j=0; j<info.channels; j++) {
\r
8299 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8301 in += info.inJump;
\r
8302 out += info.outJump;
\r
8305 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8306 Int24 *in = (Int24 *)inBuffer;
\r
8307 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8308 for (j=0; j<info.channels; j++) {
\r
8309 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8311 in += info.inJump;
\r
8312 out += info.outJump;
\r
8315 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8316 Int32 *in = (Int32 *)inBuffer;
\r
8317 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8318 for (j=0; j<info.channels; j++) {
\r
8319 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8321 in += info.inJump;
\r
8322 out += info.outJump;
\r
8325 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8326 Float32 *in = (Float32 *)inBuffer;
\r
8327 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8328 for (j=0; j<info.channels; j++) {
\r
8329 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8331 in += info.inJump;
\r
8332 out += info.outJump;
\r
8335 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8336 Float64 *in = (Float64 *)inBuffer;
\r
8337 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8338 for (j=0; j<info.channels; j++) {
\r
8339 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8341 in += info.inJump;
\r
8342 out += info.outJump;
\r
8348 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8349 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8350 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8352 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8354 register char val;
\r
8355 register char *ptr;
\r
8358 if ( format == RTAUDIO_SINT16 ) {
\r
8359 for ( unsigned int i=0; i<samples; i++ ) {
\r
8360 // Swap 1st and 2nd bytes.
\r
8362 *(ptr) = *(ptr+1);
\r
8365 // Increment 2 bytes.
\r
8369 else if ( format == RTAUDIO_SINT32 ||
\r
8370 format == RTAUDIO_FLOAT32 ) {
\r
8371 for ( unsigned int i=0; i<samples; i++ ) {
\r
8372 // Swap 1st and 4th bytes.
\r
8374 *(ptr) = *(ptr+3);
\r
8377 // Swap 2nd and 3rd bytes.
\r
8380 *(ptr) = *(ptr+1);
\r
8383 // Increment 3 more bytes.
\r
8387 else if ( format == RTAUDIO_SINT24 ) {
\r
8388 for ( unsigned int i=0; i<samples; i++ ) {
\r
8389 // Swap 1st and 3rd bytes.
\r
8391 *(ptr) = *(ptr+2);
\r
8394 // Increment 2 more bytes.
\r
8398 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8399 for ( unsigned int i=0; i<samples; i++ ) {
\r
8400 // Swap 1st and 8th bytes
\r
8402 *(ptr) = *(ptr+7);
\r
8405 // Swap 2nd and 7th bytes
\r
8408 *(ptr) = *(ptr+5);
\r
8411 // Swap 3rd and 6th bytes
\r
8414 *(ptr) = *(ptr+3);
\r
8417 // Swap 4th and 5th bytes
\r
8420 *(ptr) = *(ptr+1);
\r
8423 // Increment 5 more bytes.
\r
8429 // Indentation settings for Vim and Emacs
\r
8431 // Local Variables:
\r
8432 // c-basic-offset: 2
\r
8433 // indent-tabs-mode: nil
\r
8436 // vim: et sts=2 sw=2
\r