1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2012 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.11
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_PULSE__)
\r
91 apis.push_back( LINUX_PULSE );
\r
93 #if defined(__LINUX_OSS__)
\r
94 apis.push_back( LINUX_OSS );
\r
96 #if defined(__WINDOWS_ASIO__)
\r
97 apis.push_back( WINDOWS_ASIO );
\r
99 #if defined(__WINDOWS_DS__)
\r
100 apis.push_back( WINDOWS_DS );
\r
102 #if defined(__MACOSX_CORE__)
\r
103 apis.push_back( MACOSX_CORE );
\r
105 #if defined(__RTAUDIO_DUMMY__)
\r
106 apis.push_back( RTAUDIO_DUMMY );
\r
110 void RtAudio :: openRtApi( RtAudio::Api api )
\r
116 #if defined(__UNIX_JACK__)
\r
117 if ( api == UNIX_JACK )
\r
118 rtapi_ = new RtApiJack();
\r
120 #if defined(__LINUX_ALSA__)
\r
121 if ( api == LINUX_ALSA )
\r
122 rtapi_ = new RtApiAlsa();
\r
124 #if defined(__LINUX_PULSE__)
\r
125 if ( api == LINUX_PULSE )
\r
126 rtapi_ = new RtApiPulse();
\r
128 #if defined(__LINUX_OSS__)
\r
129 if ( api == LINUX_OSS )
\r
130 rtapi_ = new RtApiOss();
\r
132 #if defined(__WINDOWS_ASIO__)
\r
133 if ( api == WINDOWS_ASIO )
\r
134 rtapi_ = new RtApiAsio();
\r
136 #if defined(__WINDOWS_DS__)
\r
137 if ( api == WINDOWS_DS )
\r
138 rtapi_ = new RtApiDs();
\r
140 #if defined(__MACOSX_CORE__)
\r
141 if ( api == MACOSX_CORE )
\r
142 rtapi_ = new RtApiCore();
\r
144 #if defined(__RTAUDIO_DUMMY__)
\r
145 if ( api == RTAUDIO_DUMMY )
\r
146 rtapi_ = new RtApiDummy();
\r
150 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
154 if ( api != UNSPECIFIED ) {
\r
155 // Attempt to open the specified API.
\r
157 if ( rtapi_ ) return;
\r
159 // No compiled support for specified API value. Issue a debug
\r
160 // warning and continue as if no API was specified.
\r
161 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
164 // Iterate through the compiled APIs and return as soon as we find
\r
165 // one with at least one device or we reach the end of the list.
\r
166 std::vector< RtAudio::Api > apis;
\r
167 getCompiledApi( apis );
\r
168 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
169 openRtApi( apis[i] );
\r
170 if ( rtapi_->getDeviceCount() ) break;
\r
173 if ( rtapi_ ) return;
\r
175 // It should not be possible to get here because the preprocessor
\r
176 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
177 // API-specific definitions are passed to the compiler. But just in
\r
178 // case something weird happens, we'll print out an error message.
\r
179 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
182 RtAudio :: ~RtAudio() throw()
\r
187 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
188 RtAudio::StreamParameters *inputParameters,
\r
189 RtAudioFormat format, unsigned int sampleRate,
\r
190 unsigned int *bufferFrames,
\r
191 RtAudioCallback callback, void *userData,
\r
192 RtAudio::StreamOptions *options )
\r
194 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
195 sampleRate, bufferFrames, callback,
\r
196 userData, options );
\r
199 // *************************************************** //
\r
201 // Public RtApi definitions (see end of file for
\r
202 // private or protected utility functions).
\r
204 // *************************************************** //
\r
208 stream_.state = STREAM_CLOSED;
\r
209 stream_.mode = UNINITIALIZED;
\r
210 stream_.apiHandle = 0;
\r
211 stream_.userBuffer[0] = 0;
\r
212 stream_.userBuffer[1] = 0;
\r
213 MUTEX_INITIALIZE( &stream_.mutex );
\r
214 showWarnings_ = true;
\r
219 MUTEX_DESTROY( &stream_.mutex );
\r
222 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
223 RtAudio::StreamParameters *iParams,
\r
224 RtAudioFormat format, unsigned int sampleRate,
\r
225 unsigned int *bufferFrames,
\r
226 RtAudioCallback callback, void *userData,
\r
227 RtAudio::StreamOptions *options )
\r
229 if ( stream_.state != STREAM_CLOSED ) {
\r
230 errorText_ = "RtApi::openStream: a stream is already open!";
\r
231 error( RtError::INVALID_USE );
\r
234 if ( oParams && oParams->nChannels < 1 ) {
\r
235 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
236 error( RtError::INVALID_USE );
\r
239 if ( iParams && iParams->nChannels < 1 ) {
\r
240 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
241 error( RtError::INVALID_USE );
\r
244 if ( oParams == NULL && iParams == NULL ) {
\r
245 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
246 error( RtError::INVALID_USE );
\r
249 if ( formatBytes(format) == 0 ) {
\r
250 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
251 error( RtError::INVALID_USE );
\r
254 unsigned int nDevices = getDeviceCount();
\r
255 unsigned int oChannels = 0;
\r
257 oChannels = oParams->nChannels;
\r
258 if ( oParams->deviceId >= nDevices ) {
\r
259 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
260 error( RtError::INVALID_USE );
\r
264 unsigned int iChannels = 0;
\r
266 iChannels = iParams->nChannels;
\r
267 if ( iParams->deviceId >= nDevices ) {
\r
268 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
269 error( RtError::INVALID_USE );
\r
276 if ( oChannels > 0 ) {
\r
278 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
279 sampleRate, format, bufferFrames, options );
\r
280 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
283 if ( iChannels > 0 ) {
\r
285 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
286 sampleRate, format, bufferFrames, options );
\r
287 if ( result == false ) {
\r
288 if ( oChannels > 0 ) closeStream();
\r
289 error( RtError::SYSTEM_ERROR );
\r
293 stream_.callbackInfo.callback = (void *) callback;
\r
294 stream_.callbackInfo.userData = userData;
\r
296 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
297 stream_.state = STREAM_STOPPED;
\r
300 unsigned int RtApi :: getDefaultInputDevice( void )
\r
302 // Should be implemented in subclasses if possible.
\r
306 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
308 // Should be implemented in subclasses if possible.
\r
312 void RtApi :: closeStream( void )
\r
314 // MUST be implemented in subclasses!
\r
318 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
319 unsigned int firstChannel, unsigned int sampleRate,
\r
320 RtAudioFormat format, unsigned int *bufferSize,
\r
321 RtAudio::StreamOptions *options )
\r
323 // MUST be implemented in subclasses!
\r
327 void RtApi :: tickStreamTime( void )
\r
329 // Subclasses that do not provide their own implementation of
\r
330 // getStreamTime should call this function once per buffer I/O to
\r
331 // provide basic stream time support.
\r
333 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
335 #if defined( HAVE_GETTIMEOFDAY )
\r
336 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
340 long RtApi :: getStreamLatency( void )
\r
344 long totalLatency = 0;
\r
345 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
346 totalLatency = stream_.latency[0];
\r
347 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
348 totalLatency += stream_.latency[1];
\r
350 return totalLatency;
\r
353 double RtApi :: getStreamTime( void )
\r
357 #if defined( HAVE_GETTIMEOFDAY )
\r
358 // Return a very accurate estimate of the stream time by
\r
359 // adding in the elapsed time since the last tick.
\r
360 struct timeval then;
\r
361 struct timeval now;
\r
363 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
364 return stream_.streamTime;
\r
366 gettimeofday( &now, NULL );
\r
367 then = stream_.lastTickTimestamp;
\r
368 return stream_.streamTime +
\r
369 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
370 (then.tv_sec + 0.000001 * then.tv_usec));
\r
372 return stream_.streamTime;
\r
376 unsigned int RtApi :: getStreamSampleRate( void )
\r
380 return stream_.sampleRate;
\r
384 // *************************************************** //
\r
386 // OS/API-specific methods.
\r
388 // *************************************************** //
\r
390 #if defined(__MACOSX_CORE__)
\r
392 // The OS X CoreAudio API is designed to use a separate callback
\r
393 // procedure for each of its audio devices. A single RtAudio duplex
\r
394 // stream using two different devices is supported here, though it
\r
395 // cannot be guaranteed to always behave correctly because we cannot
\r
396 // synchronize these two callbacks.
\r
398 // A property listener is installed for over/underrun information.
\r
399 // However, no functionality is currently provided to allow property
\r
400 // listeners to trigger user handlers because it is unclear what could
\r
401 // be done if a critical stream parameter (buffer size, sample rate,
\r
402 // device disconnect) notification arrived. The listeners entail
\r
403 // quite a bit of extra code and most likely, a user program wouldn't
\r
404 // be prepared for the result anyway. However, we do provide a flag
\r
405 // to the client callback function to inform of an over/underrun.
\r
407 // A structure to hold various information related to the CoreAudio API
\r
409 struct CoreHandle {
\r
410 AudioDeviceID id[2]; // device ids
\r
411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
412 AudioDeviceIOProcID procId[2];
\r
414 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
415 UInt32 nStreams[2]; // number of streams to use
\r
417 char *deviceBuffer;
\r
418 pthread_cond_t condition;
\r
419 int drainCounter; // Tracks callback counts when draining
\r
420 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
423 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
426 ThreadHandle threadId;
\r
428 RtApiCore:: RtApiCore()
\r
430 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
431 // This is a largely undocumented but absolutely necessary
\r
432 // requirement starting with OS-X 10.6. If not called, queries and
\r
433 // updates to various audio device properties are not handled
\r
435 CFRunLoopRef theRunLoop = NULL;
\r
436 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
437 kAudioObjectPropertyScopeGlobal,
\r
438 kAudioObjectPropertyElementMaster };
\r
439 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
440 if ( result != noErr ) {
\r
441 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
442 error( RtError::WARNING );
\r
447 RtApiCore :: ~RtApiCore()
\r
449 // The subclass destructor gets called before the base class
\r
450 // destructor, so close an existing stream before deallocating
\r
451 // apiDeviceId memory.
\r
452 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
455 unsigned int RtApiCore :: getDeviceCount( void )
\r
457 // Find out how many audio devices there are, if any.
\r
459 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
460 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
461 if ( result != noErr ) {
\r
462 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
463 error( RtError::WARNING );
\r
467 return dataSize / sizeof( AudioDeviceID );
\r
470 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
472 unsigned int nDevices = getDeviceCount();
\r
473 if ( nDevices <= 1 ) return 0;
\r
476 UInt32 dataSize = sizeof( AudioDeviceID );
\r
477 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
478 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
479 if ( result != noErr ) {
\r
480 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
481 error( RtError::WARNING );
\r
485 dataSize *= nDevices;
\r
486 AudioDeviceID deviceList[ nDevices ];
\r
487 property.mSelector = kAudioHardwarePropertyDevices;
\r
488 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
489 if ( result != noErr ) {
\r
490 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
491 error( RtError::WARNING );
\r
495 for ( unsigned int i=0; i<nDevices; i++ )
\r
496 if ( id == deviceList[i] ) return i;
\r
498 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
499 error( RtError::WARNING );
\r
503 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
505 unsigned int nDevices = getDeviceCount();
\r
506 if ( nDevices <= 1 ) return 0;
\r
509 UInt32 dataSize = sizeof( AudioDeviceID );
\r
510 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
511 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
512 if ( result != noErr ) {
\r
513 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
514 error( RtError::WARNING );
\r
518 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
519 AudioDeviceID deviceList[ nDevices ];
\r
520 property.mSelector = kAudioHardwarePropertyDevices;
\r
521 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
522 if ( result != noErr ) {
\r
523 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
524 error( RtError::WARNING );
\r
528 for ( unsigned int i=0; i<nDevices; i++ )
\r
529 if ( id == deviceList[i] ) return i;
\r
531 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
532 error( RtError::WARNING );
\r
536 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
538 RtAudio::DeviceInfo info;
\r
539 info.probed = false;
\r
542 unsigned int nDevices = getDeviceCount();
\r
543 if ( nDevices == 0 ) {
\r
544 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
545 error( RtError::INVALID_USE );
\r
548 if ( device >= nDevices ) {
\r
549 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
550 error( RtError::INVALID_USE );
\r
553 AudioDeviceID deviceList[ nDevices ];
\r
554 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
555 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
556 kAudioObjectPropertyScopeGlobal,
\r
557 kAudioObjectPropertyElementMaster };
\r
558 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
559 0, NULL, &dataSize, (void *) &deviceList );
\r
560 if ( result != noErr ) {
\r
561 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
562 error( RtError::WARNING );
\r
566 AudioDeviceID id = deviceList[ device ];
\r
568 // Get the device name.
\r
570 CFStringRef cfname;
\r
571 dataSize = sizeof( CFStringRef );
\r
572 property.mSelector = kAudioObjectPropertyManufacturer;
\r
573 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
574 if ( result != noErr ) {
\r
575 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
576 errorText_ = errorStream_.str();
\r
577 error( RtError::WARNING );
\r
581 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
582 int length = CFStringGetLength(cfname);
\r
583 char *mname = (char *)malloc(length * 3 + 1);
\r
584 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
585 info.name.append( (const char *)mname, strlen(mname) );
\r
586 info.name.append( ": " );
\r
587 CFRelease( cfname );
\r
590 property.mSelector = kAudioObjectPropertyName;
\r
591 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
592 if ( result != noErr ) {
\r
593 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
594 errorText_ = errorStream_.str();
\r
595 error( RtError::WARNING );
\r
599 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
600 length = CFStringGetLength(cfname);
\r
601 char *name = (char *)malloc(length * 3 + 1);
\r
602 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
603 info.name.append( (const char *)name, strlen(name) );
\r
604 CFRelease( cfname );
\r
607 // Get the output stream "configuration".
\r
608 AudioBufferList *bufferList = nil;
\r
609 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
610 property.mScope = kAudioDevicePropertyScopeOutput;
\r
611 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
613 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
614 if ( result != noErr || dataSize == 0 ) {
\r
615 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
616 errorText_ = errorStream_.str();
\r
617 error( RtError::WARNING );
\r
621 // Allocate the AudioBufferList.
\r
622 bufferList = (AudioBufferList *) malloc( dataSize );
\r
623 if ( bufferList == NULL ) {
\r
624 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
625 error( RtError::WARNING );
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
630 if ( result != noErr || dataSize == 0 ) {
\r
631 free( bufferList );
\r
632 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
633 errorText_ = errorStream_.str();
\r
634 error( RtError::WARNING );
\r
638 // Get output channel information.
\r
639 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
640 for ( i=0; i<nStreams; i++ )
\r
641 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
642 free( bufferList );
\r
644 // Get the input stream "configuration".
\r
645 property.mScope = kAudioDevicePropertyScopeInput;
\r
646 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
647 if ( result != noErr || dataSize == 0 ) {
\r
648 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
649 errorText_ = errorStream_.str();
\r
650 error( RtError::WARNING );
\r
654 // Allocate the AudioBufferList.
\r
655 bufferList = (AudioBufferList *) malloc( dataSize );
\r
656 if ( bufferList == NULL ) {
\r
657 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
658 error( RtError::WARNING );
\r
662 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
663 if (result != noErr || dataSize == 0) {
\r
664 free( bufferList );
\r
665 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
666 errorText_ = errorStream_.str();
\r
667 error( RtError::WARNING );
\r
671 // Get input channel information.
\r
672 nStreams = bufferList->mNumberBuffers;
\r
673 for ( i=0; i<nStreams; i++ )
\r
674 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
675 free( bufferList );
\r
677 // If device opens for both playback and capture, we determine the channels.
\r
678 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
679 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
681 // Probe the device sample rates.
\r
682 bool isInput = false;
\r
683 if ( info.outputChannels == 0 ) isInput = true;
\r
685 // Determine the supported sample rates.
\r
686 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
687 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
688 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
689 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
690 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
691 errorText_ = errorStream_.str();
\r
692 error( RtError::WARNING );
\r
696 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
697 AudioValueRange rangeList[ nRanges ];
\r
698 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
699 if ( result != kAudioHardwareNoError ) {
\r
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
701 errorText_ = errorStream_.str();
\r
702 error( RtError::WARNING );
\r
706 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
707 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
708 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
709 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
712 info.sampleRates.clear();
\r
713 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
714 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
715 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
718 if ( info.sampleRates.size() == 0 ) {
\r
719 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
720 errorText_ = errorStream_.str();
\r
721 error( RtError::WARNING );
\r
725 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
726 // Thus, any other "physical" formats supported by the device are of
\r
727 // no interest to the client.
\r
728 info.nativeFormats = RTAUDIO_FLOAT32;
\r
730 if ( info.outputChannels > 0 )
\r
731 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
732 if ( info.inputChannels > 0 )
\r
733 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
735 info.probed = true;
\r
739 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
740 const AudioTimeStamp* inNow,
\r
741 const AudioBufferList* inInputData,
\r
742 const AudioTimeStamp* inInputTime,
\r
743 AudioBufferList* outOutputData,
\r
744 const AudioTimeStamp* inOutputTime,
\r
745 void* infoPointer )
\r
747 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
749 RtApiCore *object = (RtApiCore *) info->object;
\r
750 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
751 return kAudioHardwareUnspecifiedError;
\r
753 return kAudioHardwareNoError;
\r
756 OSStatus xrunListener( AudioObjectID inDevice,
\r
758 const AudioObjectPropertyAddress properties[],
\r
759 void* handlePointer )
\r
761 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
762 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
763 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
764 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
765 handle->xrun[1] = true;
\r
767 handle->xrun[0] = true;
\r
771 return kAudioHardwareNoError;
\r
774 OSStatus rateListener( AudioObjectID inDevice,
\r
776 const AudioObjectPropertyAddress properties[],
\r
777 void* ratePointer )
\r
780 Float64 *rate = (Float64 *) ratePointer;
\r
781 UInt32 dataSize = sizeof( Float64 );
\r
782 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
783 kAudioObjectPropertyScopeGlobal,
\r
784 kAudioObjectPropertyElementMaster };
\r
785 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
786 return kAudioHardwareNoError;
\r
789 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
790 unsigned int firstChannel, unsigned int sampleRate,
\r
791 RtAudioFormat format, unsigned int *bufferSize,
\r
792 RtAudio::StreamOptions *options )
\r
795 unsigned int nDevices = getDeviceCount();
\r
796 if ( nDevices == 0 ) {
\r
797 // This should not happen because a check is made before this function is called.
\r
798 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
802 if ( device >= nDevices ) {
\r
803 // This should not happen because a check is made before this function is called.
\r
804 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
808 AudioDeviceID deviceList[ nDevices ];
\r
809 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
810 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
811 kAudioObjectPropertyScopeGlobal,
\r
812 kAudioObjectPropertyElementMaster };
\r
813 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
814 0, NULL, &dataSize, (void *) &deviceList );
\r
815 if ( result != noErr ) {
\r
816 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
820 AudioDeviceID id = deviceList[ device ];
\r
822 // Setup for stream mode.
\r
823 bool isInput = false;
\r
824 if ( mode == INPUT ) {
\r
826 property.mScope = kAudioDevicePropertyScopeInput;
\r
829 property.mScope = kAudioDevicePropertyScopeOutput;
\r
831 // Get the stream "configuration".
\r
832 AudioBufferList *bufferList = nil;
\r
834 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
835 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
836 if ( result != noErr || dataSize == 0 ) {
\r
837 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
838 errorText_ = errorStream_.str();
\r
842 // Allocate the AudioBufferList.
\r
843 bufferList = (AudioBufferList *) malloc( dataSize );
\r
844 if ( bufferList == NULL ) {
\r
845 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
849 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
850 if (result != noErr || dataSize == 0) {
\r
851 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
852 errorText_ = errorStream_.str();
\r
856 // Search for one or more streams that contain the desired number of
\r
857 // channels. CoreAudio devices can have an arbitrary number of
\r
858 // streams and each stream can have an arbitrary number of channels.
\r
859 // For each stream, a single buffer of interleaved samples is
\r
860 // provided. RtAudio prefers the use of one stream of interleaved
\r
861 // data or multiple consecutive single-channel streams. However, we
\r
862 // now support multiple consecutive multi-channel streams of
\r
863 // interleaved data as well.
\r
864 UInt32 iStream, offsetCounter = firstChannel;
\r
865 UInt32 nStreams = bufferList->mNumberBuffers;
\r
866 bool monoMode = false;
\r
867 bool foundStream = false;
\r
869 // First check that the device supports the requested number of
\r
871 UInt32 deviceChannels = 0;
\r
872 for ( iStream=0; iStream<nStreams; iStream++ )
\r
873 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
875 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
876 free( bufferList );
\r
877 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
878 errorText_ = errorStream_.str();
\r
882 // Look for a single stream meeting our needs.
\r
883 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
884 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
885 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
886 if ( streamChannels >= channels + offsetCounter ) {
\r
887 firstStream = iStream;
\r
888 channelOffset = offsetCounter;
\r
889 foundStream = true;
\r
892 if ( streamChannels > offsetCounter ) break;
\r
893 offsetCounter -= streamChannels;
\r
896 // If we didn't find a single stream above, then we should be able
\r
897 // to meet the channel specification with multiple streams.
\r
898 if ( foundStream == false ) {
\r
900 offsetCounter = firstChannel;
\r
901 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
902 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
903 if ( streamChannels > offsetCounter ) break;
\r
904 offsetCounter -= streamChannels;
\r
907 firstStream = iStream;
\r
908 channelOffset = offsetCounter;
\r
909 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
911 if ( streamChannels > 1 ) monoMode = false;
\r
912 while ( channelCounter > 0 ) {
\r
913 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
914 if ( streamChannels > 1 ) monoMode = false;
\r
915 channelCounter -= streamChannels;
\r
920 free( bufferList );
\r
922 // Determine the buffer size.
\r
923 AudioValueRange bufferRange;
\r
924 dataSize = sizeof( AudioValueRange );
\r
925 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
926 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
928 if ( result != noErr ) {
\r
929 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
930 errorText_ = errorStream_.str();
\r
934 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
935 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
936 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
938 // Set the buffer size. For multiple streams, I'm assuming we only
\r
939 // need to make this setting for the master channel.
\r
940 UInt32 theSize = (UInt32) *bufferSize;
\r
941 dataSize = sizeof( UInt32 );
\r
942 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
943 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
945 if ( result != noErr ) {
\r
946 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
947 errorText_ = errorStream_.str();
\r
951 // If attempting to setup a duplex stream, the bufferSize parameter
\r
952 // MUST be the same in both directions!
\r
953 *bufferSize = theSize;
\r
954 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
955 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
956 errorText_ = errorStream_.str();
\r
960 stream_.bufferSize = *bufferSize;
\r
961 stream_.nBuffers = 1;
\r
963 // Try to set "hog" mode ... it's not clear to me this is working.
\r
964 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
966 dataSize = sizeof( hog_pid );
\r
967 property.mSelector = kAudioDevicePropertyHogMode;
\r
968 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
969 if ( result != noErr ) {
\r
970 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
971 errorText_ = errorStream_.str();
\r
975 if ( hog_pid != getpid() ) {
\r
976 hog_pid = getpid();
\r
977 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
978 if ( result != noErr ) {
\r
979 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
980 errorText_ = errorStream_.str();
\r
986 // Check and if necessary, change the sample rate for the device.
\r
987 Float64 nominalRate;
\r
988 dataSize = sizeof( Float64 );
\r
989 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
990 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
992 if ( result != noErr ) {
\r
993 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
994 errorText_ = errorStream_.str();
\r
998 // Only change the sample rate if off by more than 1 Hz.
\r
999 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1001 // Set a property listener for the sample rate change
\r
1002 Float64 reportedRate = 0.0;
\r
1003 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1004 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1005 if ( result != noErr ) {
\r
1006 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1007 errorText_ = errorStream_.str();
\r
1011 nominalRate = (Float64) sampleRate;
\r
1012 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1014 if ( result != noErr ) {
\r
1015 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1016 errorText_ = errorStream_.str();
\r
1020 // Now wait until the reported nominal rate is what we just set.
\r
1021 UInt32 microCounter = 0;
\r
1022 while ( reportedRate != nominalRate ) {
\r
1023 microCounter += 5000;
\r
1024 if ( microCounter > 5000000 ) break;
\r
1028 // Remove the property listener.
\r
1029 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1031 if ( microCounter > 5000000 ) {
\r
1032 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1033 errorText_ = errorStream_.str();
\r
1038 // Now set the stream format for all streams. Also, check the
\r
1039 // physical format of the device and change that if necessary.
\r
1040 AudioStreamBasicDescription description;
\r
1041 dataSize = sizeof( AudioStreamBasicDescription );
\r
1042 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1043 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1044 if ( result != noErr ) {
\r
1045 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1046 errorText_ = errorStream_.str();
\r
1050 // Set the sample rate and data format id. However, only make the
\r
1051 // change if the sample rate is not within 1.0 of the desired
\r
1052 // rate and the format is not linear pcm.
\r
1053 bool updateFormat = false;
\r
1054 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1055 description.mSampleRate = (Float64) sampleRate;
\r
1056 updateFormat = true;
\r
1059 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1060 description.mFormatID = kAudioFormatLinearPCM;
\r
1061 updateFormat = true;
\r
1064 if ( updateFormat ) {
\r
1065 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1066 if ( result != noErr ) {
\r
1067 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1068 errorText_ = errorStream_.str();
\r
1073 // Now check the physical format.
\r
1074 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1075 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1076 if ( result != noErr ) {
\r
1077 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1078 errorText_ = errorStream_.str();
\r
1082 //std::cout << "Current physical stream format:" << std::endl;
\r
1083 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1084 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1085 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1086 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1088 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1089 description.mFormatID = kAudioFormatLinearPCM;
\r
1090 //description.mSampleRate = (Float64) sampleRate;
\r
1091 AudioStreamBasicDescription testDescription = description;
\r
1092 UInt32 formatFlags;
\r
1094 // We'll try higher bit rates first and then work our way down.
\r
1095 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1096 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1097 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1098 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1099 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1100 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1101 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1102 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1103 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1104 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1105 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1106 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1107 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1109 bool setPhysicalFormat = false;
\r
1110 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1111 testDescription = description;
\r
1112 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1113 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1114 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1115 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1117 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1118 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1119 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1120 if ( result == noErr ) {
\r
1121 setPhysicalFormat = true;
\r
1122 //std::cout << "Updated physical stream format:" << std::endl;
\r
1123 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1124 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1125 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1126 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1131 if ( !setPhysicalFormat ) {
\r
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1133 errorText_ = errorStream_.str();
\r
1136 } // done setting virtual/physical formats.
\r
1138 // Get the stream / device latency.
\r
1140 dataSize = sizeof( UInt32 );
\r
1141 property.mSelector = kAudioDevicePropertyLatency;
\r
1142 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1144 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1147 errorText_ = errorStream_.str();
\r
1148 error( RtError::WARNING );
\r
1152 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1153 // always be presented in native-endian format, so we should never
\r
1154 // need to byte swap.
\r
1155 stream_.doByteSwap[mode] = false;
\r
1157 // From the CoreAudio documentation, PCM data must be supplied as
\r
1159 stream_.userFormat = format;
\r
1160 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1162 if ( streamCount == 1 )
\r
1163 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1164 else // multiple streams
\r
1165 stream_.nDeviceChannels[mode] = channels;
\r
1166 stream_.nUserChannels[mode] = channels;
\r
1167 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1168 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1169 else stream_.userInterleaved = true;
\r
1170 stream_.deviceInterleaved[mode] = true;
\r
1171 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1173 // Set flags for buffer conversion.
\r
1174 stream_.doConvertBuffer[mode] = false;
\r
1175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1176 stream_.doConvertBuffer[mode] = true;
\r
1177 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1178 stream_.doConvertBuffer[mode] = true;
\r
1179 if ( streamCount == 1 ) {
\r
1180 if ( stream_.nUserChannels[mode] > 1 &&
\r
1181 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1182 stream_.doConvertBuffer[mode] = true;
\r
1184 else if ( monoMode && stream_.userInterleaved )
\r
1185 stream_.doConvertBuffer[mode] = true;
\r
1187 // Allocate our CoreHandle structure for the stream.
\r
1188 CoreHandle *handle = 0;
\r
1189 if ( stream_.apiHandle == 0 ) {
\r
1191 handle = new CoreHandle;
\r
1193 catch ( std::bad_alloc& ) {
\r
1194 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1198 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1199 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1202 stream_.apiHandle = (void *) handle;
\r
1205 handle = (CoreHandle *) stream_.apiHandle;
\r
1206 handle->iStream[mode] = firstStream;
\r
1207 handle->nStreams[mode] = streamCount;
\r
1208 handle->id[mode] = id;
\r
1210 // Allocate necessary internal buffers.
\r
1211 unsigned long bufferBytes;
\r
1212 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1213 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1214 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1215 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1216 if ( stream_.userBuffer[mode] == NULL ) {
\r
1217 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1221 // If possible, we will make use of the CoreAudio stream buffers as
\r
1222 // "device buffers". However, we can't do this if using multiple
\r
1224 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1226 bool makeBuffer = true;
\r
1227 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1228 if ( mode == INPUT ) {
\r
1229 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1230 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1231 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1235 if ( makeBuffer ) {
\r
1236 bufferBytes *= *bufferSize;
\r
1237 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1238 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1239 if ( stream_.deviceBuffer == NULL ) {
\r
1240 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1246 stream_.sampleRate = sampleRate;
\r
1247 stream_.device[mode] = device;
\r
1248 stream_.state = STREAM_STOPPED;
\r
1249 stream_.callbackInfo.object = (void *) this;
\r
1251 // Setup the buffer conversion information structure.
\r
1252 if ( stream_.doConvertBuffer[mode] ) {
\r
1253 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1254 else setConvertInfo( mode, channelOffset );
\r
1257 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1258 // Only one callback procedure per device.
\r
1259 stream_.mode = DUPLEX;
\r
1261 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1262 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1264 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1265 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1267 if ( result != noErr ) {
\r
1268 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1269 errorText_ = errorStream_.str();
\r
1272 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1273 stream_.mode = DUPLEX;
\r
1275 stream_.mode = mode;
\r
1278 // Setup the device property listener for over/underload.
\r
1279 property.mSelector = kAudioDeviceProcessorOverload;
\r
1280 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1286 pthread_cond_destroy( &handle->condition );
\r
1288 stream_.apiHandle = 0;
\r
1291 for ( int i=0; i<2; i++ ) {
\r
1292 if ( stream_.userBuffer[i] ) {
\r
1293 free( stream_.userBuffer[i] );
\r
1294 stream_.userBuffer[i] = 0;
\r
1298 if ( stream_.deviceBuffer ) {
\r
1299 free( stream_.deviceBuffer );
\r
1300 stream_.deviceBuffer = 0;
\r
1306 void RtApiCore :: closeStream( void )
\r
1308 if ( stream_.state == STREAM_CLOSED ) {
\r
1309 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1310 error( RtError::WARNING );
\r
1314 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1315 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1316 if ( stream_.state == STREAM_RUNNING )
\r
1317 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1321 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1322 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1326 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1327 if ( stream_.state == STREAM_RUNNING )
\r
1328 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1329 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1330 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1332 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1333 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1337 for ( int i=0; i<2; i++ ) {
\r
1338 if ( stream_.userBuffer[i] ) {
\r
1339 free( stream_.userBuffer[i] );
\r
1340 stream_.userBuffer[i] = 0;
\r
1344 if ( stream_.deviceBuffer ) {
\r
1345 free( stream_.deviceBuffer );
\r
1346 stream_.deviceBuffer = 0;
\r
1349 // Destroy pthread condition variable.
\r
1350 pthread_cond_destroy( &handle->condition );
\r
1352 stream_.apiHandle = 0;
\r
1354 stream_.mode = UNINITIALIZED;
\r
1355 stream_.state = STREAM_CLOSED;
\r
1358 void RtApiCore :: startStream( void )
\r
1361 if ( stream_.state == STREAM_RUNNING ) {
\r
1362 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1363 error( RtError::WARNING );
\r
1367 OSStatus result = noErr;
\r
1368 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1369 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1371 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1372 if ( result != noErr ) {
\r
1373 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1374 errorText_ = errorStream_.str();
\r
1379 if ( stream_.mode == INPUT ||
\r
1380 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1382 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1383 if ( result != noErr ) {
\r
1384 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1385 errorText_ = errorStream_.str();
\r
1390 handle->drainCounter = 0;
\r
1391 handle->internalDrain = false;
\r
1392 stream_.state = STREAM_RUNNING;
\r
1395 if ( result == noErr ) return;
\r
1396 error( RtError::SYSTEM_ERROR );
\r
1399 void RtApiCore :: stopStream( void )
\r
1402 if ( stream_.state == STREAM_STOPPED ) {
\r
1403 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1404 error( RtError::WARNING );
\r
1408 OSStatus result = noErr;
\r
1409 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1412 if ( handle->drainCounter == 0 ) {
\r
1413 handle->drainCounter = 2;
\r
1414 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1417 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1418 if ( result != noErr ) {
\r
1419 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1420 errorText_ = errorStream_.str();
\r
1425 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1427 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1428 if ( result != noErr ) {
\r
1429 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1430 errorText_ = errorStream_.str();
\r
1435 stream_.state = STREAM_STOPPED;
\r
1438 if ( result == noErr ) return;
\r
1439 error( RtError::SYSTEM_ERROR );
\r
1442 void RtApiCore :: abortStream( void )
\r
1445 if ( stream_.state == STREAM_STOPPED ) {
\r
1446 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1447 error( RtError::WARNING );
\r
1451 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1452 handle->drainCounter = 2;
\r
1457 // This function will be called by a spawned thread when the user
\r
1458 // callback function signals that the stream should be stopped or
\r
1459 // aborted. It is better to handle it this way because the
\r
1460 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1461 // function is called.
\r
1462 extern "C" void *coreStopStream( void *ptr )
\r
1464 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1465 RtApiCore *object = (RtApiCore *) info->object;
\r
1467 object->stopStream();
\r
1468 pthread_exit( NULL );
\r
1471 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1472 const AudioBufferList *inBufferList,
\r
1473 const AudioBufferList *outBufferList )
\r
1475 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1476 if ( stream_.state == STREAM_CLOSED ) {
\r
1477 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1478 error( RtError::WARNING );
\r
1482 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1483 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1485 // Check if we were draining the stream and signal is finished.
\r
1486 if ( handle->drainCounter > 3 ) {
\r
1488 stream_.state = STREAM_STOPPING;
\r
1489 if ( handle->internalDrain == true )
\r
1490 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1491 else // external call to stopStream()
\r
1492 pthread_cond_signal( &handle->condition );
\r
1496 AudioDeviceID outputDevice = handle->id[0];
\r
1498 // Invoke user callback to get fresh output data UNLESS we are
\r
1499 // draining stream or duplex mode AND the input/output devices are
\r
1500 // different AND this function is called for the input device.
\r
1501 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1502 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1503 double streamTime = getStreamTime();
\r
1504 RtAudioStreamStatus status = 0;
\r
1505 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1506 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1507 handle->xrun[0] = false;
\r
1509 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1510 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1511 handle->xrun[1] = false;
\r
1514 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1515 stream_.bufferSize, streamTime, status, info->userData );
\r
1516 if ( cbReturnValue == 2 ) {
\r
1517 stream_.state = STREAM_STOPPING;
\r
1518 handle->drainCounter = 2;
\r
1522 else if ( cbReturnValue == 1 ) {
\r
1523 handle->drainCounter = 1;
\r
1524 handle->internalDrain = true;
\r
1528 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1530 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1532 if ( handle->nStreams[0] == 1 ) {
\r
1533 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1535 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1537 else { // fill multiple streams with zeros
\r
1538 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1539 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1541 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1545 else if ( handle->nStreams[0] == 1 ) {
\r
1546 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1547 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1548 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1550 else { // copy from user buffer
\r
1551 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1552 stream_.userBuffer[0],
\r
1553 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1556 else { // fill multiple streams
\r
1557 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1558 if ( stream_.doConvertBuffer[0] ) {
\r
1559 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1560 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1563 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1564 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1565 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1566 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1567 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1570 else { // fill multiple multi-channel streams with interleaved data
\r
1571 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1572 Float32 *out, *in;
\r
1574 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1575 UInt32 inChannels = stream_.nUserChannels[0];
\r
1576 if ( stream_.doConvertBuffer[0] ) {
\r
1577 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1578 inChannels = stream_.nDeviceChannels[0];
\r
1581 if ( inInterleaved ) inOffset = 1;
\r
1582 else inOffset = stream_.bufferSize;
\r
1584 channelsLeft = inChannels;
\r
1585 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1587 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1588 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1591 // Account for possible channel offset in first stream
\r
1592 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1593 streamChannels -= stream_.channelOffset[0];
\r
1594 outJump = stream_.channelOffset[0];
\r
1598 // Account for possible unfilled channels at end of the last stream
\r
1599 if ( streamChannels > channelsLeft ) {
\r
1600 outJump = streamChannels - channelsLeft;
\r
1601 streamChannels = channelsLeft;
\r
1604 // Determine input buffer offsets and skips
\r
1605 if ( inInterleaved ) {
\r
1606 inJump = inChannels;
\r
1607 in += inChannels - channelsLeft;
\r
1611 in += (inChannels - channelsLeft) * inOffset;
\r
1614 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1615 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1616 *out++ = in[j*inOffset];
\r
1621 channelsLeft -= streamChannels;
\r
1626 if ( handle->drainCounter ) {
\r
1627 handle->drainCounter++;
\r
1632 AudioDeviceID inputDevice;
\r
1633 inputDevice = handle->id[1];
\r
1634 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1636 if ( handle->nStreams[1] == 1 ) {
\r
1637 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1638 convertBuffer( stream_.userBuffer[1],
\r
1639 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1640 stream_.convertInfo[1] );
\r
1642 else { // copy to user buffer
\r
1643 memcpy( stream_.userBuffer[1],
\r
1644 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1645 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1648 else { // read from multiple streams
\r
1649 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1650 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1652 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1653 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1655 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1656 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1659 else { // read from multiple multi-channel streams
\r
1660 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1661 Float32 *out, *in;
\r
1663 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1664 UInt32 outChannels = stream_.nUserChannels[1];
\r
1665 if ( stream_.doConvertBuffer[1] ) {
\r
1666 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1667 outChannels = stream_.nDeviceChannels[1];
\r
1670 if ( outInterleaved ) outOffset = 1;
\r
1671 else outOffset = stream_.bufferSize;
\r
1673 channelsLeft = outChannels;
\r
1674 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1676 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1677 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1680 // Account for possible channel offset in first stream
\r
1681 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1682 streamChannels -= stream_.channelOffset[1];
\r
1683 inJump = stream_.channelOffset[1];
\r
1687 // Account for possible unread channels at end of the last stream
\r
1688 if ( streamChannels > channelsLeft ) {
\r
1689 inJump = streamChannels - channelsLeft;
\r
1690 streamChannels = channelsLeft;
\r
1693 // Determine output buffer offsets and skips
\r
1694 if ( outInterleaved ) {
\r
1695 outJump = outChannels;
\r
1696 out += outChannels - channelsLeft;
\r
1700 out += (outChannels - channelsLeft) * outOffset;
\r
1703 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1704 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1705 out[j*outOffset] = *in++;
\r
1710 channelsLeft -= streamChannels;
\r
1714 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1715 convertBuffer( stream_.userBuffer[1],
\r
1716 stream_.deviceBuffer,
\r
1717 stream_.convertInfo[1] );
\r
1723 //MUTEX_UNLOCK( &stream_.mutex );
\r
1725 RtApi::tickStreamTime();
\r
1729 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1733 case kAudioHardwareNotRunningError:
\r
1734 return "kAudioHardwareNotRunningError";
\r
1736 case kAudioHardwareUnspecifiedError:
\r
1737 return "kAudioHardwareUnspecifiedError";
\r
1739 case kAudioHardwareUnknownPropertyError:
\r
1740 return "kAudioHardwareUnknownPropertyError";
\r
1742 case kAudioHardwareBadPropertySizeError:
\r
1743 return "kAudioHardwareBadPropertySizeError";
\r
1745 case kAudioHardwareIllegalOperationError:
\r
1746 return "kAudioHardwareIllegalOperationError";
\r
1748 case kAudioHardwareBadObjectError:
\r
1749 return "kAudioHardwareBadObjectError";
\r
1751 case kAudioHardwareBadDeviceError:
\r
1752 return "kAudioHardwareBadDeviceError";
\r
1754 case kAudioHardwareBadStreamError:
\r
1755 return "kAudioHardwareBadStreamError";
\r
1757 case kAudioHardwareUnsupportedOperationError:
\r
1758 return "kAudioHardwareUnsupportedOperationError";
\r
1760 case kAudioDeviceUnsupportedFormatError:
\r
1761 return "kAudioDeviceUnsupportedFormatError";
\r
1763 case kAudioDevicePermissionsError:
\r
1764 return "kAudioDevicePermissionsError";
\r
1767 return "CoreAudio unknown error";
\r
1771 //******************** End of __MACOSX_CORE__ *********************//
\r
1774 #if defined(__UNIX_JACK__)
\r
1776 // JACK is a low-latency audio server, originally written for the
\r
1777 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1778 // connect a number of different applications to an audio device, as
\r
1779 // well as allowing them to share audio between themselves.
\r
1781 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1782 // have ports connected to the server. The JACK server is typically
\r
1783 // started in a terminal as follows:
\r
1785 // .jackd -d alsa -d hw:0
\r
1787 // or through an interface program such as qjackctl. Many of the
\r
1788 // parameters normally set for a stream are fixed by the JACK server
\r
1789 // and can be specified when the JACK server is started. In
\r
1792 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1794 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1795 // frames, and number of buffers = 4. Once the server is running, it
\r
1796 // is not possible to override these values. If the values are not
\r
1797 // specified in the command-line, the JACK server uses default values.
\r
1799 // The JACK server does not have to be running when an instance of
\r
1800 // RtApiJack is created, though the function getDeviceCount() will
\r
1801 // report 0 devices found until JACK has been started. When no
\r
1802 // devices are available (i.e., the JACK server is not running), a
\r
1803 // stream cannot be opened.
\r
1805 #include <jack/jack.h>
\r
1806 #include <unistd.h>
\r
1809 // A structure to hold various information related to the Jack API
\r
1810 // implementation.
\r
1811 struct JackHandle {
\r
1812 jack_client_t *client;
\r
1813 jack_port_t **ports[2];
\r
1814 std::string deviceName[2];
\r
1816 pthread_cond_t condition;
\r
1817 int drainCounter; // Tracks callback counts when draining
\r
1818 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1821 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1824 ThreadHandle threadId;
\r
1825 void jackSilentError( const char * ) {};
\r
1827 RtApiJack :: RtApiJack()
\r
1829 // Nothing to do here.
\r
1830 #if !defined(__RTAUDIO_DEBUG__)
\r
1831 // Turn off Jack's internal error reporting.
\r
1832 jack_set_error_function( &jackSilentError );
\r
1836 RtApiJack :: ~RtApiJack()
\r
1838 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1841 unsigned int RtApiJack :: getDeviceCount( void )
\r
1843 // See if we can become a jack client.
\r
1844 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1845 jack_status_t *status = NULL;
\r
1846 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1847 if ( client == 0 ) return 0;
\r
1849 const char **ports;
\r
1850 std::string port, previousPort;
\r
1851 unsigned int nChannels = 0, nDevices = 0;
\r
1852 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1854 // Parse the port names up to the first colon (:).
\r
1855 size_t iColon = 0;
\r
1857 port = (char *) ports[ nChannels ];
\r
1858 iColon = port.find(":");
\r
1859 if ( iColon != std::string::npos ) {
\r
1860 port = port.substr( 0, iColon + 1 );
\r
1861 if ( port != previousPort ) {
\r
1863 previousPort = port;
\r
1866 } while ( ports[++nChannels] );
\r
1870 jack_client_close( client );
\r
1874 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1876 RtAudio::DeviceInfo info;
\r
1877 info.probed = false;
\r
1879 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1880 jack_status_t *status = NULL;
\r
1881 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1882 if ( client == 0 ) {
\r
1883 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1884 error( RtError::WARNING );
\r
1888 const char **ports;
\r
1889 std::string port, previousPort;
\r
1890 unsigned int nPorts = 0, nDevices = 0;
\r
1891 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1893 // Parse the port names up to the first colon (:).
\r
1894 size_t iColon = 0;
\r
1896 port = (char *) ports[ nPorts ];
\r
1897 iColon = port.find(":");
\r
1898 if ( iColon != std::string::npos ) {
\r
1899 port = port.substr( 0, iColon );
\r
1900 if ( port != previousPort ) {
\r
1901 if ( nDevices == device ) info.name = port;
\r
1903 previousPort = port;
\r
1906 } while ( ports[++nPorts] );
\r
1910 if ( device >= nDevices ) {
\r
1911 jack_client_close( client );
\r
1912 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1913 error( RtError::INVALID_USE );
\r
1916 // Get the current jack server sample rate.
\r
1917 info.sampleRates.clear();
\r
1918 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1920 // Count the available ports containing the client name as device
\r
1921 // channels. Jack "input ports" equal RtAudio output channels.
\r
1922 unsigned int nChannels = 0;
\r
1923 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1925 while ( ports[ nChannels ] ) nChannels++;
\r
1927 info.outputChannels = nChannels;
\r
1930 // Jack "output ports" equal RtAudio input channels.
\r
1932 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1934 while ( ports[ nChannels ] ) nChannels++;
\r
1936 info.inputChannels = nChannels;
\r
1939 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1940 jack_client_close(client);
\r
1941 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1942 error( RtError::WARNING );
\r
1946 // If device opens for both playback and capture, we determine the channels.
\r
1947 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1948 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1950 // Jack always uses 32-bit floats.
\r
1951 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1953 // Jack doesn't provide default devices so we'll use the first available one.
\r
1954 if ( device == 0 && info.outputChannels > 0 )
\r
1955 info.isDefaultOutput = true;
\r
1956 if ( device == 0 && info.inputChannels > 0 )
\r
1957 info.isDefaultInput = true;
\r
1959 jack_client_close(client);
\r
1960 info.probed = true;
\r
1964 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1966 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1968 RtApiJack *object = (RtApiJack *) info->object;
\r
1969 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1974 // This function will be called by a spawned thread when the Jack
\r
1975 // server signals that it is shutting down. It is necessary to handle
\r
1976 // it this way because the jackShutdown() function must return before
\r
1977 // the jack_deactivate() function (in closeStream()) will return.
\r
1978 extern "C" void *jackCloseStream( void *ptr )
\r
1980 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1981 RtApiJack *object = (RtApiJack *) info->object;
\r
1983 object->closeStream();
\r
1985 pthread_exit( NULL );
\r
1987 void jackShutdown( void *infoPointer )
\r
1989 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1990 RtApiJack *object = (RtApiJack *) info->object;
\r
1992 // Check current stream state. If stopped, then we'll assume this
\r
1993 // was called as a result of a call to RtApiJack::stopStream (the
\r
1994 // deactivation of a client handle causes this function to be called).
\r
1995 // If not, we'll assume the Jack server is shutting down or some
\r
1996 // other problem occurred and we should close the stream.
\r
1997 if ( object->isStreamRunning() == false ) return;
\r
1999 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2000 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2003 int jackXrun( void *infoPointer )
\r
2005 JackHandle *handle = (JackHandle *) infoPointer;
\r
2007 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2008 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2013 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2014 unsigned int firstChannel, unsigned int sampleRate,
\r
2015 RtAudioFormat format, unsigned int *bufferSize,
\r
2016 RtAudio::StreamOptions *options )
\r
2018 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2020 // Look for jack server and try to become a client (only do once per stream).
\r
2021 jack_client_t *client = 0;
\r
2022 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2023 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2024 jack_status_t *status = NULL;
\r
2025 if ( options && !options->streamName.empty() )
\r
2026 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2028 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2029 if ( client == 0 ) {
\r
2030 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2031 error( RtError::WARNING );
\r
2036 // The handle must have been created on an earlier pass.
\r
2037 client = handle->client;
\r
2040 const char **ports;
\r
2041 std::string port, previousPort, deviceName;
\r
2042 unsigned int nPorts = 0, nDevices = 0;
\r
2043 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2045 // Parse the port names up to the first colon (:).
\r
2046 size_t iColon = 0;
\r
2048 port = (char *) ports[ nPorts ];
\r
2049 iColon = port.find(":");
\r
2050 if ( iColon != std::string::npos ) {
\r
2051 port = port.substr( 0, iColon );
\r
2052 if ( port != previousPort ) {
\r
2053 if ( nDevices == device ) deviceName = port;
\r
2055 previousPort = port;
\r
2058 } while ( ports[++nPorts] );
\r
2062 if ( device >= nDevices ) {
\r
2063 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2067 // Count the available ports containing the client name as device
\r
2068 // channels. Jack "input ports" equal RtAudio output channels.
\r
2069 unsigned int nChannels = 0;
\r
2070 unsigned long flag = JackPortIsInput;
\r
2071 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2072 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2074 while ( ports[ nChannels ] ) nChannels++;
\r
2078 // Compare the jack ports for specified client to the requested number of channels.
\r
2079 if ( nChannels < (channels + firstChannel) ) {
\r
2080 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2081 errorText_ = errorStream_.str();
\r
2085 // Check the jack server sample rate.
\r
2086 unsigned int jackRate = jack_get_sample_rate( client );
\r
2087 if ( sampleRate != jackRate ) {
\r
2088 jack_client_close( client );
\r
2089 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2090 errorText_ = errorStream_.str();
\r
2093 stream_.sampleRate = jackRate;
\r
2095 // Get the latency of the JACK port.
\r
2096 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2097 if ( ports[ firstChannel ] ) {
\r
2098 // Added by Ge Wang
\r
2099 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2100 // the range (usually the min and max are equal)
\r
2101 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2102 // get the latency range
\r
2103 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2104 // be optimistic, use the min!
\r
2105 stream_.latency[mode] = latrange.min;
\r
2106 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2110 // The jack server always uses 32-bit floating-point data.
\r
2111 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2112 stream_.userFormat = format;
\r
2114 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2115 else stream_.userInterleaved = true;
\r
2117 // Jack always uses non-interleaved buffers.
\r
2118 stream_.deviceInterleaved[mode] = false;
\r
2120 // Jack always provides host byte-ordered data.
\r
2121 stream_.doByteSwap[mode] = false;
\r
2123 // Get the buffer size. The buffer size and number of buffers
\r
2124 // (periods) is set when the jack server is started.
\r
2125 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2126 *bufferSize = stream_.bufferSize;
\r
2128 stream_.nDeviceChannels[mode] = channels;
\r
2129 stream_.nUserChannels[mode] = channels;
\r
2131 // Set flags for buffer conversion.
\r
2132 stream_.doConvertBuffer[mode] = false;
\r
2133 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2134 stream_.doConvertBuffer[mode] = true;
\r
2135 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2136 stream_.nUserChannels[mode] > 1 )
\r
2137 stream_.doConvertBuffer[mode] = true;
\r
2139 // Allocate our JackHandle structure for the stream.
\r
2140 if ( handle == 0 ) {
\r
2142 handle = new JackHandle;
\r
2144 catch ( std::bad_alloc& ) {
\r
2145 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2149 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2150 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2153 stream_.apiHandle = (void *) handle;
\r
2154 handle->client = client;
\r
2156 handle->deviceName[mode] = deviceName;
\r
2158 // Allocate necessary internal buffers.
\r
2159 unsigned long bufferBytes;
\r
2160 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2161 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2162 if ( stream_.userBuffer[mode] == NULL ) {
\r
2163 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2167 if ( stream_.doConvertBuffer[mode] ) {
\r
2169 bool makeBuffer = true;
\r
2170 if ( mode == OUTPUT )
\r
2171 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2172 else { // mode == INPUT
\r
2173 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2174 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2175 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2176 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2180 if ( makeBuffer ) {
\r
2181 bufferBytes *= *bufferSize;
\r
2182 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2183 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2184 if ( stream_.deviceBuffer == NULL ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2191 // Allocate memory for the Jack ports (channels) identifiers.
\r
2192 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2193 if ( handle->ports[mode] == NULL ) {
\r
2194 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2198 stream_.device[mode] = device;
\r
2199 stream_.channelOffset[mode] = firstChannel;
\r
2200 stream_.state = STREAM_STOPPED;
\r
2201 stream_.callbackInfo.object = (void *) this;
\r
2203 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2204 // We had already set up the stream for output.
\r
2205 stream_.mode = DUPLEX;
\r
2207 stream_.mode = mode;
\r
2208 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2209 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2210 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2213 // Register our ports.
\r
2215 if ( mode == OUTPUT ) {
\r
2216 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2217 snprintf( label, 64, "outport %d", i );
\r
2218 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2219 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2223 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2224 snprintf( label, 64, "inport %d", i );
\r
2225 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2226 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2230 // Setup the buffer conversion information structure. We don't use
\r
2231 // buffers to do channel offsets, so we override that parameter
\r
2233 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2239 pthread_cond_destroy( &handle->condition );
\r
2240 jack_client_close( handle->client );
\r
2242 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2243 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2246 stream_.apiHandle = 0;
\r
2249 for ( int i=0; i<2; i++ ) {
\r
2250 if ( stream_.userBuffer[i] ) {
\r
2251 free( stream_.userBuffer[i] );
\r
2252 stream_.userBuffer[i] = 0;
\r
2256 if ( stream_.deviceBuffer ) {
\r
2257 free( stream_.deviceBuffer );
\r
2258 stream_.deviceBuffer = 0;
\r
2264 void RtApiJack :: closeStream( void )
\r
2266 if ( stream_.state == STREAM_CLOSED ) {
\r
2267 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2268 error( RtError::WARNING );
\r
2272 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2275 if ( stream_.state == STREAM_RUNNING )
\r
2276 jack_deactivate( handle->client );
\r
2278 jack_client_close( handle->client );
\r
2282 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2283 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2284 pthread_cond_destroy( &handle->condition );
\r
2286 stream_.apiHandle = 0;
\r
2289 for ( int i=0; i<2; i++ ) {
\r
2290 if ( stream_.userBuffer[i] ) {
\r
2291 free( stream_.userBuffer[i] );
\r
2292 stream_.userBuffer[i] = 0;
\r
2296 if ( stream_.deviceBuffer ) {
\r
2297 free( stream_.deviceBuffer );
\r
2298 stream_.deviceBuffer = 0;
\r
2301 stream_.mode = UNINITIALIZED;
\r
2302 stream_.state = STREAM_CLOSED;
\r
2305 void RtApiJack :: startStream( void )
\r
2308 if ( stream_.state == STREAM_RUNNING ) {
\r
2309 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2310 error( RtError::WARNING );
\r
2314 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2315 int result = jack_activate( handle->client );
\r
2317 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2321 const char **ports;
\r
2323 // Get the list of available ports.
\r
2324 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2326 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2327 if ( ports == NULL) {
\r
2328 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2332 // Now make the port connections. Since RtAudio wasn't designed to
\r
2333 // allow the user to select particular channels of a device, we'll
\r
2334 // just open the first "nChannels" ports with offset.
\r
2335 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2337 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2338 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2341 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2348 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2350 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2351 if ( ports == NULL) {
\r
2352 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2356 // Now make the port connections. See note above.
\r
2357 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2359 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2360 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2363 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2370 handle->drainCounter = 0;
\r
2371 handle->internalDrain = false;
\r
2372 stream_.state = STREAM_RUNNING;
\r
2375 if ( result == 0 ) return;
\r
2376 error( RtError::SYSTEM_ERROR );
\r
2379 void RtApiJack :: stopStream( void )
\r
2382 if ( stream_.state == STREAM_STOPPED ) {
\r
2383 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2384 error( RtError::WARNING );
\r
2388 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2389 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2391 if ( handle->drainCounter == 0 ) {
\r
2392 handle->drainCounter = 2;
\r
2393 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2397 jack_deactivate( handle->client );
\r
2398 stream_.state = STREAM_STOPPED;
\r
2401 void RtApiJack :: abortStream( void )
\r
2404 if ( stream_.state == STREAM_STOPPED ) {
\r
2405 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2406 error( RtError::WARNING );
\r
2410 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2411 handle->drainCounter = 2;
\r
2416 // This function will be called by a spawned thread when the user
\r
2417 // callback function signals that the stream should be stopped or
\r
2418 // aborted. It is necessary to handle it this way because the
\r
2419 // callbackEvent() function must return before the jack_deactivate()
\r
2420 // function will return.
\r
2421 extern "C" void *jackStopStream( void *ptr )
\r
2423 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2424 RtApiJack *object = (RtApiJack *) info->object;
\r
2426 object->stopStream();
\r
2427 pthread_exit( NULL );
\r
2430 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2432 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2433 if ( stream_.state == STREAM_CLOSED ) {
\r
2434 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2435 error( RtError::WARNING );
\r
2438 if ( stream_.bufferSize != nframes ) {
\r
2439 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2440 error( RtError::WARNING );
\r
2444 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2445 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2447 // Check if we were draining the stream and signal is finished.
\r
2448 if ( handle->drainCounter > 3 ) {
\r
2450 stream_.state = STREAM_STOPPING;
\r
2451 if ( handle->internalDrain == true )
\r
2452 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2454 pthread_cond_signal( &handle->condition );
\r
2458 // Invoke user callback first, to get fresh output data.
\r
2459 if ( handle->drainCounter == 0 ) {
\r
2460 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2461 double streamTime = getStreamTime();
\r
2462 RtAudioStreamStatus status = 0;
\r
2463 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2464 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2465 handle->xrun[0] = false;
\r
2467 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2468 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2469 handle->xrun[1] = false;
\r
2471 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2472 stream_.bufferSize, streamTime, status, info->userData );
\r
2473 if ( cbReturnValue == 2 ) {
\r
2474 stream_.state = STREAM_STOPPING;
\r
2475 handle->drainCounter = 2;
\r
2477 pthread_create( &id, NULL, jackStopStream, info );
\r
2480 else if ( cbReturnValue == 1 ) {
\r
2481 handle->drainCounter = 1;
\r
2482 handle->internalDrain = true;
\r
2486 jack_default_audio_sample_t *jackbuffer;
\r
2487 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2490 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2492 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2493 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2494 memset( jackbuffer, 0, bufferBytes );
\r
2498 else if ( stream_.doConvertBuffer[0] ) {
\r
2500 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2502 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2503 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2504 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2507 else { // no buffer conversion
\r
2508 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2509 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2510 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2514 if ( handle->drainCounter ) {
\r
2515 handle->drainCounter++;
\r
2520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2522 if ( stream_.doConvertBuffer[1] ) {
\r
2523 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2524 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2525 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2527 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2529 else { // no buffer conversion
\r
2530 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2531 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2532 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2538 RtApi::tickStreamTime();
\r
2541 //******************** End of __UNIX_JACK__ *********************//
\r
2544 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2546 // The ASIO API is designed around a callback scheme, so this
\r
2547 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2548 // Jack. The primary constraint with ASIO is that it only allows
\r
2549 // access to a single driver at a time. Thus, it is not possible to
\r
2550 // have more than one simultaneous RtAudio stream.
\r
2552 // This implementation also requires a number of external ASIO files
\r
2553 // and a few global variables. The ASIO callback scheme does not
\r
2554 // allow for the passing of user data, so we must create a global
\r
2555 // pointer to our callbackInfo structure.
\r
2557 // On unix systems, we make use of a pthread condition variable.
\r
2558 // Since there is no equivalent in Windows, I hacked something based
\r
2559 // on information found in
\r
2560 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2562 #include "asiosys.h"
\r
2564 #include "iasiothiscallresolver.h"
\r
2565 #include "asiodrivers.h"
\r
2568 AsioDrivers drivers;
\r
2569 ASIOCallbacks asioCallbacks;
\r
2570 ASIODriverInfo driverInfo;
\r
2571 CallbackInfo *asioCallbackInfo;
\r
2574 struct AsioHandle {
\r
2575 int drainCounter; // Tracks callback counts when draining
\r
2576 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2577 ASIOBufferInfo *bufferInfos;
\r
2581 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2584 // Function declarations (definitions at end of section)
\r
2585 static const char* getAsioErrorString( ASIOError result );
\r
2586 void sampleRateChanged( ASIOSampleRate sRate );
\r
2587 long asioMessages( long selector, long value, void* message, double* opt );
\r
2589 RtApiAsio :: RtApiAsio()
\r
2591 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2592 // CoInitialize beforehand, but it must be for appartment threading
\r
2593 // (in which case, CoInitilialize will return S_FALSE here).
\r
2594 coInitialized_ = false;
\r
2595 HRESULT hr = CoInitialize( NULL );
\r
2596 if ( FAILED(hr) ) {
\r
2597 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2598 error( RtError::WARNING );
\r
2600 coInitialized_ = true;
\r
2602 drivers.removeCurrentDriver();
\r
2603 driverInfo.asioVersion = 2;
\r
2605 // See note in DirectSound implementation about GetDesktopWindow().
\r
2606 driverInfo.sysRef = GetForegroundWindow();
\r
2609 RtApiAsio :: ~RtApiAsio()
\r
2611 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2612 if ( coInitialized_ ) CoUninitialize();
\r
2615 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2617 return (unsigned int) drivers.asioGetNumDev();
\r
2620 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2622 RtAudio::DeviceInfo info;
\r
2623 info.probed = false;
\r
2626 unsigned int nDevices = getDeviceCount();
\r
2627 if ( nDevices == 0 ) {
\r
2628 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2629 error( RtError::INVALID_USE );
\r
2632 if ( device >= nDevices ) {
\r
2633 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2634 error( RtError::INVALID_USE );
\r
2637 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2638 if ( stream_.state != STREAM_CLOSED ) {
\r
2639 if ( device >= devices_.size() ) {
\r
2640 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2641 error( RtError::WARNING );
\r
2644 return devices_[ device ];
\r
2647 char driverName[32];
\r
2648 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2649 if ( result != ASE_OK ) {
\r
2650 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2651 errorText_ = errorStream_.str();
\r
2652 error( RtError::WARNING );
\r
2656 info.name = driverName;
\r
2658 if ( !drivers.loadDriver( driverName ) ) {
\r
2659 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2660 errorText_ = errorStream_.str();
\r
2661 error( RtError::WARNING );
\r
2665 result = ASIOInit( &driverInfo );
\r
2666 if ( result != ASE_OK ) {
\r
2667 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2668 errorText_ = errorStream_.str();
\r
2669 error( RtError::WARNING );
\r
2673 // Determine the device channel information.
\r
2674 long inputChannels, outputChannels;
\r
2675 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2676 if ( result != ASE_OK ) {
\r
2677 drivers.removeCurrentDriver();
\r
2678 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2679 errorText_ = errorStream_.str();
\r
2680 error( RtError::WARNING );
\r
2684 info.outputChannels = outputChannels;
\r
2685 info.inputChannels = inputChannels;
\r
2686 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2687 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2689 // Determine the supported sample rates.
\r
2690 info.sampleRates.clear();
\r
2691 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2692 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2693 if ( result == ASE_OK )
\r
2694 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2697 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2698 ASIOChannelInfo channelInfo;
\r
2699 channelInfo.channel = 0;
\r
2700 channelInfo.isInput = true;
\r
2701 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2702 result = ASIOGetChannelInfo( &channelInfo );
\r
2703 if ( result != ASE_OK ) {
\r
2704 drivers.removeCurrentDriver();
\r
2705 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2706 errorText_ = errorStream_.str();
\r
2707 error( RtError::WARNING );
\r
2711 info.nativeFormats = 0;
\r
2712 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2713 info.nativeFormats |= RTAUDIO_SINT16;
\r
2714 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2715 info.nativeFormats |= RTAUDIO_SINT32;
\r
2716 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2717 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2718 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2719 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2720 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2721 info.nativeFormats |= RTAUDIO_SINT24;
\r
2723 if ( info.outputChannels > 0 )
\r
2724 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2725 if ( info.inputChannels > 0 )
\r
2726 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2728 info.probed = true;
\r
2729 drivers.removeCurrentDriver();
\r
2733 void bufferSwitch( long index, ASIOBool processNow )
\r
2735 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2736 object->callbackEvent( index );
\r
2739 void RtApiAsio :: saveDeviceInfo( void )
\r
2743 unsigned int nDevices = getDeviceCount();
\r
2744 devices_.resize( nDevices );
\r
2745 for ( unsigned int i=0; i<nDevices; i++ )
\r
2746 devices_[i] = getDeviceInfo( i );
\r
2749 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2750 unsigned int firstChannel, unsigned int sampleRate,
\r
2751 RtAudioFormat format, unsigned int *bufferSize,
\r
2752 RtAudio::StreamOptions *options )
\r
2754 // For ASIO, a duplex stream MUST use the same driver.
\r
2755 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2756 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2760 char driverName[32];
\r
2761 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2762 if ( result != ASE_OK ) {
\r
2763 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2764 errorText_ = errorStream_.str();
\r
2768 // Only load the driver once for duplex stream.
\r
2769 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2770 // The getDeviceInfo() function will not work when a stream is open
\r
2771 // because ASIO does not allow multiple devices to run at the same
\r
2772 // time. Thus, we'll probe the system before opening a stream and
\r
2773 // save the results for use by getDeviceInfo().
\r
2774 this->saveDeviceInfo();
\r
2776 if ( !drivers.loadDriver( driverName ) ) {
\r
2777 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2782 result = ASIOInit( &driverInfo );
\r
2783 if ( result != ASE_OK ) {
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2790 // Check the device channel count.
\r
2791 long inputChannels, outputChannels;
\r
2792 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2793 if ( result != ASE_OK ) {
\r
2794 drivers.removeCurrentDriver();
\r
2795 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2800 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2801 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2802 drivers.removeCurrentDriver();
\r
2803 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2804 errorText_ = errorStream_.str();
\r
2807 stream_.nDeviceChannels[mode] = channels;
\r
2808 stream_.nUserChannels[mode] = channels;
\r
2809 stream_.channelOffset[mode] = firstChannel;
\r
2811 // Verify the sample rate is supported.
\r
2812 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2813 if ( result != ASE_OK ) {
\r
2814 drivers.removeCurrentDriver();
\r
2815 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2816 errorText_ = errorStream_.str();
\r
2820 // Get the current sample rate
\r
2821 ASIOSampleRate currentRate;
\r
2822 result = ASIOGetSampleRate( ¤tRate );
\r
2823 if ( result != ASE_OK ) {
\r
2824 drivers.removeCurrentDriver();
\r
2825 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2826 errorText_ = errorStream_.str();
\r
2830 // Set the sample rate only if necessary
\r
2831 if ( currentRate != sampleRate ) {
\r
2832 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2841 // Determine the driver data type.
\r
2842 ASIOChannelInfo channelInfo;
\r
2843 channelInfo.channel = 0;
\r
2844 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2845 else channelInfo.isInput = true;
\r
2846 result = ASIOGetChannelInfo( &channelInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 drivers.removeCurrentDriver();
\r
2849 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2850 errorText_ = errorStream_.str();
\r
2854 // Assuming WINDOWS host is always little-endian.
\r
2855 stream_.doByteSwap[mode] = false;
\r
2856 stream_.userFormat = format;
\r
2857 stream_.deviceFormat[mode] = 0;
\r
2858 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2859 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2860 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2862 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2863 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2864 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2866 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2867 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2868 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2870 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2871 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2872 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2874 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2875 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2876 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2879 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Set the buffer size. For a duplex stream, this will end up
\r
2887 // setting the buffer size based on the input constraints, which
\r
2889 long minSize, maxSize, preferSize, granularity;
\r
2890 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2891 if ( result != ASE_OK ) {
\r
2892 drivers.removeCurrentDriver();
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2894 errorText_ = errorStream_.str();
\r
2898 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2899 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2900 else if ( granularity == -1 ) {
\r
2901 // Make sure bufferSize is a power of two.
\r
2902 int log2_of_min_size = 0;
\r
2903 int log2_of_max_size = 0;
\r
2905 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2906 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2907 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2910 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2911 int min_delta_num = log2_of_min_size;
\r
2913 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2914 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2915 if (current_delta < min_delta) {
\r
2916 min_delta = current_delta;
\r
2917 min_delta_num = i;
\r
2921 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2922 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2923 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2925 else if ( granularity != 0 ) {
\r
2926 // Set to an even multiple of granularity, rounding up.
\r
2927 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2930 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2931 drivers.removeCurrentDriver();
\r
2932 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2936 stream_.bufferSize = *bufferSize;
\r
2937 stream_.nBuffers = 2;
\r
2939 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2940 else stream_.userInterleaved = true;
\r
2942 // ASIO always uses non-interleaved buffers.
\r
2943 stream_.deviceInterleaved[mode] = false;
\r
2945 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2946 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2947 if ( handle == 0 ) {
\r
2949 handle = new AsioHandle;
\r
2951 catch ( std::bad_alloc& ) {
\r
2952 //if ( handle == NULL ) {
\r
2953 drivers.removeCurrentDriver();
\r
2954 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2957 handle->bufferInfos = 0;
\r
2959 // Create a manual-reset event.
\r
2960 handle->condition = CreateEvent( NULL, // no security
\r
2961 TRUE, // manual-reset
\r
2962 FALSE, // non-signaled initially
\r
2963 NULL ); // unnamed
\r
2964 stream_.apiHandle = (void *) handle;
\r
2967 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2968 // and output separately, we'll have to dispose of previously
\r
2969 // created output buffers for a duplex stream.
\r
2970 long inputLatency, outputLatency;
\r
2971 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2972 ASIODisposeBuffers();
\r
2973 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2976 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2977 bool buffersAllocated = false;
\r
2978 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2979 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2980 if ( handle->bufferInfos == NULL ) {
\r
2981 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2982 errorText_ = errorStream_.str();
\r
2986 ASIOBufferInfo *infos;
\r
2987 infos = handle->bufferInfos;
\r
2988 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2989 infos->isInput = ASIOFalse;
\r
2990 infos->channelNum = i + stream_.channelOffset[0];
\r
2991 infos->buffers[0] = infos->buffers[1] = 0;
\r
2993 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2994 infos->isInput = ASIOTrue;
\r
2995 infos->channelNum = i + stream_.channelOffset[1];
\r
2996 infos->buffers[0] = infos->buffers[1] = 0;
\r
2999 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3000 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3001 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3002 asioCallbacks.asioMessage = &asioMessages;
\r
3003 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3004 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3005 if ( result != ASE_OK ) {
\r
3006 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3007 errorText_ = errorStream_.str();
\r
3010 buffersAllocated = true;
\r
3012 // Set flags for buffer conversion.
\r
3013 stream_.doConvertBuffer[mode] = false;
\r
3014 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3015 stream_.doConvertBuffer[mode] = true;
\r
3016 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3017 stream_.nUserChannels[mode] > 1 )
\r
3018 stream_.doConvertBuffer[mode] = true;
\r
3020 // Allocate necessary internal buffers
\r
3021 unsigned long bufferBytes;
\r
3022 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3023 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3024 if ( stream_.userBuffer[mode] == NULL ) {
\r
3025 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3029 if ( stream_.doConvertBuffer[mode] ) {
\r
3031 bool makeBuffer = true;
\r
3032 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3033 if ( mode == INPUT ) {
\r
3034 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3035 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3036 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3040 if ( makeBuffer ) {
\r
3041 bufferBytes *= *bufferSize;
\r
3042 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3043 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3044 if ( stream_.deviceBuffer == NULL ) {
\r
3045 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3051 stream_.sampleRate = sampleRate;
\r
3052 stream_.device[mode] = device;
\r
3053 stream_.state = STREAM_STOPPED;
\r
3054 asioCallbackInfo = &stream_.callbackInfo;
\r
3055 stream_.callbackInfo.object = (void *) this;
\r
3056 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3057 // We had already set up an output stream.
\r
3058 stream_.mode = DUPLEX;
\r
3060 stream_.mode = mode;
\r
3062 // Determine device latencies
\r
3063 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3064 if ( result != ASE_OK ) {
\r
3065 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3066 errorText_ = errorStream_.str();
\r
3067 error( RtError::WARNING); // warn but don't fail
\r
3070 stream_.latency[0] = outputLatency;
\r
3071 stream_.latency[1] = inputLatency;
\r
3074 // Setup the buffer conversion information structure. We don't use
\r
3075 // buffers to do channel offsets, so we override that parameter
\r
3077 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3082 if ( buffersAllocated )
\r
3083 ASIODisposeBuffers();
\r
3084 drivers.removeCurrentDriver();
\r
3087 CloseHandle( handle->condition );
\r
3088 if ( handle->bufferInfos )
\r
3089 free( handle->bufferInfos );
\r
3091 stream_.apiHandle = 0;
\r
3094 for ( int i=0; i<2; i++ ) {
\r
3095 if ( stream_.userBuffer[i] ) {
\r
3096 free( stream_.userBuffer[i] );
\r
3097 stream_.userBuffer[i] = 0;
\r
3101 if ( stream_.deviceBuffer ) {
\r
3102 free( stream_.deviceBuffer );
\r
3103 stream_.deviceBuffer = 0;
\r
3109 void RtApiAsio :: closeStream()
\r
3111 if ( stream_.state == STREAM_CLOSED ) {
\r
3112 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3113 error( RtError::WARNING );
\r
3117 if ( stream_.state == STREAM_RUNNING ) {
\r
3118 stream_.state = STREAM_STOPPED;
\r
3121 ASIODisposeBuffers();
\r
3122 drivers.removeCurrentDriver();
\r
3124 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3126 CloseHandle( handle->condition );
\r
3127 if ( handle->bufferInfos )
\r
3128 free( handle->bufferInfos );
\r
3130 stream_.apiHandle = 0;
\r
3133 for ( int i=0; i<2; i++ ) {
\r
3134 if ( stream_.userBuffer[i] ) {
\r
3135 free( stream_.userBuffer[i] );
\r
3136 stream_.userBuffer[i] = 0;
\r
3140 if ( stream_.deviceBuffer ) {
\r
3141 free( stream_.deviceBuffer );
\r
3142 stream_.deviceBuffer = 0;
\r
3145 stream_.mode = UNINITIALIZED;
\r
3146 stream_.state = STREAM_CLOSED;
\r
3149 bool stopThreadCalled = false;
\r
3151 void RtApiAsio :: startStream()
\r
3154 if ( stream_.state == STREAM_RUNNING ) {
\r
3155 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3156 error( RtError::WARNING );
\r
3160 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3161 ASIOError result = ASIOStart();
\r
3162 if ( result != ASE_OK ) {
\r
3163 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3164 errorText_ = errorStream_.str();
\r
3168 handle->drainCounter = 0;
\r
3169 handle->internalDrain = false;
\r
3170 ResetEvent( handle->condition );
\r
3171 stream_.state = STREAM_RUNNING;
\r
3175 stopThreadCalled = false;
\r
3177 if ( result == ASE_OK ) return;
\r
3178 error( RtError::SYSTEM_ERROR );
\r
3181 void RtApiAsio :: stopStream()
\r
3184 if ( stream_.state == STREAM_STOPPED ) {
\r
3185 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3186 error( RtError::WARNING );
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3191 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3192 if ( handle->drainCounter == 0 ) {
\r
3193 handle->drainCounter = 2;
\r
3194 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3198 stream_.state = STREAM_STOPPED;
\r
3200 ASIOError result = ASIOStop();
\r
3201 if ( result != ASE_OK ) {
\r
3202 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3203 errorText_ = errorStream_.str();
\r
3206 if ( result == ASE_OK ) return;
\r
3207 error( RtError::SYSTEM_ERROR );
\r
3210 void RtApiAsio :: abortStream()
\r
3213 if ( stream_.state == STREAM_STOPPED ) {
\r
3214 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3215 error( RtError::WARNING );
\r
3219 // The following lines were commented-out because some behavior was
\r
3220 // noted where the device buffers need to be zeroed to avoid
\r
3221 // continuing sound, even when the device buffers are completely
\r
3222 // disposed. So now, calling abort is the same as calling stop.
\r
3223 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3224 // handle->drainCounter = 2;
\r
3228 // This function will be called by a spawned thread when the user
\r
3229 // callback function signals that the stream should be stopped or
\r
3230 // aborted. It is necessary to handle it this way because the
\r
3231 // callbackEvent() function must return before the ASIOStop()
\r
3232 // function will return.
\r
3233 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3235 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3236 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3238 object->stopStream();
\r
3239 _endthreadex( 0 );
\r
3243 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3245 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3246 if ( stream_.state == STREAM_CLOSED ) {
\r
3247 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3248 error( RtError::WARNING );
\r
3252 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3253 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 // Check if we were draining the stream and signal if finished.
\r
3256 if ( handle->drainCounter > 3 ) {
\r
3258 stream_.state = STREAM_STOPPING;
\r
3259 if ( handle->internalDrain == false )
\r
3260 SetEvent( handle->condition );
\r
3261 else { // spawn a thread to stop the stream
\r
3262 unsigned threadId;
\r
3263 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3264 &stream_.callbackInfo, 0, &threadId );
\r
3269 // Invoke user callback to get fresh output data UNLESS we are
\r
3270 // draining stream.
\r
3271 if ( handle->drainCounter == 0 ) {
\r
3272 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3273 double streamTime = getStreamTime();
\r
3274 RtAudioStreamStatus status = 0;
\r
3275 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3276 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3279 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3280 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3283 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3284 stream_.bufferSize, streamTime, status, info->userData );
\r
3285 if ( cbReturnValue == 2 ) {
\r
3286 stream_.state = STREAM_STOPPING;
\r
3287 handle->drainCounter = 2;
\r
3288 unsigned threadId;
\r
3289 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3290 &stream_.callbackInfo, 0, &threadId );
\r
3293 else if ( cbReturnValue == 1 ) {
\r
3294 handle->drainCounter = 1;
\r
3295 handle->internalDrain = true;
\r
3299 unsigned int nChannels, bufferBytes, i, j;
\r
3300 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3301 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3303 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3305 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3307 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3308 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3309 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3313 else if ( stream_.doConvertBuffer[0] ) {
\r
3315 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3316 if ( stream_.doByteSwap[0] )
\r
3317 byteSwapBuffer( stream_.deviceBuffer,
\r
3318 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3319 stream_.deviceFormat[0] );
\r
3321 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3322 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3323 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3324 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3330 if ( stream_.doByteSwap[0] )
\r
3331 byteSwapBuffer( stream_.userBuffer[0],
\r
3332 stream_.bufferSize * stream_.nUserChannels[0],
\r
3333 stream_.userFormat );
\r
3335 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3336 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3337 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3338 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3343 if ( handle->drainCounter ) {
\r
3344 handle->drainCounter++;
\r
3349 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3351 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3353 if (stream_.doConvertBuffer[1]) {
\r
3355 // Always interleave ASIO input data.
\r
3356 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3357 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3358 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3359 handle->bufferInfos[i].buffers[bufferIndex],
\r
3363 if ( stream_.doByteSwap[1] )
\r
3364 byteSwapBuffer( stream_.deviceBuffer,
\r
3365 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3366 stream_.deviceFormat[1] );
\r
3367 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3373 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3374 handle->bufferInfos[i].buffers[bufferIndex],
\r
3379 if ( stream_.doByteSwap[1] )
\r
3380 byteSwapBuffer( stream_.userBuffer[1],
\r
3381 stream_.bufferSize * stream_.nUserChannels[1],
\r
3382 stream_.userFormat );
\r
3387 // The following call was suggested by Malte Clasen. While the API
\r
3388 // documentation indicates it should not be required, some device
\r
3389 // drivers apparently do not function correctly without it.
\r
3390 ASIOOutputReady();
\r
3392 RtApi::tickStreamTime();
\r
3396 void sampleRateChanged( ASIOSampleRate sRate )
\r
3398 // The ASIO documentation says that this usually only happens during
\r
3399 // external sync. Audio processing is not stopped by the driver,
\r
3400 // actual sample rate might not have even changed, maybe only the
\r
3401 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3404 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3406 object->stopStream();
\r
3408 catch ( RtError &exception ) {
\r
3409 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3413 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3416 long asioMessages( long selector, long value, void* message, double* opt )
\r
3420 switch( selector ) {
\r
3421 case kAsioSelectorSupported:
\r
3422 if ( value == kAsioResetRequest
\r
3423 || value == kAsioEngineVersion
\r
3424 || value == kAsioResyncRequest
\r
3425 || value == kAsioLatenciesChanged
\r
3426 // The following three were added for ASIO 2.0, you don't
\r
3427 // necessarily have to support them.
\r
3428 || value == kAsioSupportsTimeInfo
\r
3429 || value == kAsioSupportsTimeCode
\r
3430 || value == kAsioSupportsInputMonitor)
\r
3433 case kAsioResetRequest:
\r
3434 // Defer the task and perform the reset of the driver during the
\r
3435 // next "safe" situation. You cannot reset the driver right now,
\r
3436 // as this code is called from the driver. Reset the driver is
\r
3437 // done by completely destruct is. I.e. ASIOStop(),
\r
3438 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3440 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3443 case kAsioResyncRequest:
\r
3444 // This informs the application that the driver encountered some
\r
3445 // non-fatal data loss. It is used for synchronization purposes
\r
3446 // of different media. Added mainly to work around the Win16Mutex
\r
3447 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3448 // which could lose data because the Mutex was held too long by
\r
3449 // another thread. However a driver can issue it in other
\r
3450 // situations, too.
\r
3451 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3455 case kAsioLatenciesChanged:
\r
3456 // This will inform the host application that the drivers were
\r
3457 // latencies changed. Beware, it this does not mean that the
\r
3458 // buffer sizes have changed! You might need to update internal
\r
3460 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3463 case kAsioEngineVersion:
\r
3464 // Return the supported ASIO version of the host application. If
\r
3465 // a host application does not implement this selector, ASIO 1.0
\r
3466 // is assumed by the driver.
\r
3469 case kAsioSupportsTimeInfo:
\r
3470 // Informs the driver whether the
\r
3471 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3472 // For compatibility with ASIO 1.0 drivers the host application
\r
3473 // should always support the "old" bufferSwitch method, too.
\r
3476 case kAsioSupportsTimeCode:
\r
3477 // Informs the driver whether application is interested in time
\r
3478 // code info. If an application does not need to know about time
\r
3479 // code, the driver has less work to do.
\r
3486 static const char* getAsioErrorString( ASIOError result )
\r
3491 const char*message;
\r
3494 static Messages m[] =
\r
3496 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3497 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3498 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3499 { ASE_InvalidMode, "Invalid mode." },
\r
3500 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3501 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3502 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3505 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3506 if ( m[i].value == result ) return m[i].message;
\r
3508 return "Unknown error.";
\r
3510 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3514 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3516 // Modified by Robin Davies, October 2005
\r
3517 // - Improvements to DirectX pointer chasing.
\r
3518 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3519 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3520 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3521 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3523 #include <dsound.h>
\r
3524 #include <assert.h>
\r
3525 #include <algorithm>
\r
3527 #if defined(__MINGW32__)
\r
3528 // missing from latest mingw winapi
\r
3529 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3530 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3531 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3532 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3535 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3537 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3538 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3541 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3543 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3544 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3545 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3546 return pointer >= earlierPointer && pointer < laterPointer;
\r
3549 // A structure to hold various information related to the DirectSound
\r
3550 // API implementation.
\r
3552 unsigned int drainCounter; // Tracks callback counts when draining
\r
3553 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3557 UINT bufferPointer[2];
\r
3558 DWORD dsBufferSize[2];
\r
3559 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3563 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3566 // Declarations for utility functions, callbacks, and structures
\r
3567 // specific to the DirectSound implementation.
\r
3568 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3569 LPCTSTR description,
\r
3571 LPVOID lpContext );
\r
3573 static const char* getErrorString( int code );
\r
3575 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3584 : found(false) { validId[0] = false; validId[1] = false; }
\r
3587 std::vector< DsDevice > dsDevices;
\r
3589 RtApiDs :: RtApiDs()
\r
3591 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3592 // accept whatever the mainline chose for a threading model.
\r
3593 coInitialized_ = false;
\r
3594 HRESULT hr = CoInitialize( NULL );
\r
3595 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3598 RtApiDs :: ~RtApiDs()
\r
3600 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3601 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3604 // The DirectSound default output is always the first device.
\r
3605 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3610 // The DirectSound default input is always the first input device,
\r
3611 // which is the first capture device enumerated.
\r
3612 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3617 unsigned int RtApiDs :: getDeviceCount( void )
\r
3619 // Set query flag for previously found devices to false, so that we
\r
3620 // can check for any devices that have disappeared.
\r
3621 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3622 dsDevices[i].found = false;
\r
3624 // Query DirectSound devices.
\r
3625 bool isInput = false;
\r
3626 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3627 if ( FAILED( result ) ) {
\r
3628 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3629 errorText_ = errorStream_.str();
\r
3630 error( RtError::WARNING );
\r
3633 // Query DirectSoundCapture devices.
\r
3635 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3636 if ( FAILED( result ) ) {
\r
3637 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3638 errorText_ = errorStream_.str();
\r
3639 error( RtError::WARNING );
\r
3642 // Clean out any devices that may have disappeared.
\r
3643 std::vector< int > indices;
\r
3644 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3645 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3646 unsigned int nErased = 0;
\r
3647 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3648 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3650 return dsDevices.size();
\r
3653 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3655 RtAudio::DeviceInfo info;
\r
3656 info.probed = false;
\r
3658 if ( dsDevices.size() == 0 ) {
\r
3659 // Force a query of all devices
\r
3661 if ( dsDevices.size() == 0 ) {
\r
3662 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3663 error( RtError::INVALID_USE );
\r
3667 if ( device >= dsDevices.size() ) {
\r
3668 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3669 error( RtError::INVALID_USE );
\r
3673 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3675 LPDIRECTSOUND output;
\r
3677 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3678 if ( FAILED( result ) ) {
\r
3679 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3680 errorText_ = errorStream_.str();
\r
3681 error( RtError::WARNING );
\r
3685 outCaps.dwSize = sizeof( outCaps );
\r
3686 result = output->GetCaps( &outCaps );
\r
3687 if ( FAILED( result ) ) {
\r
3688 output->Release();
\r
3689 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3690 errorText_ = errorStream_.str();
\r
3691 error( RtError::WARNING );
\r
3695 // Get output channel information.
\r
3696 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3698 // Get sample rate information.
\r
3699 info.sampleRates.clear();
\r
3700 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3701 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3702 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3703 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3706 // Get format information.
\r
3707 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3708 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3710 output->Release();
\r
3712 if ( getDefaultOutputDevice() == device )
\r
3713 info.isDefaultOutput = true;
\r
3715 if ( dsDevices[ device ].validId[1] == false ) {
\r
3716 info.name = dsDevices[ device ].name;
\r
3717 info.probed = true;
\r
3723 LPDIRECTSOUNDCAPTURE input;
\r
3724 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3725 if ( FAILED( result ) ) {
\r
3726 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3727 errorText_ = errorStream_.str();
\r
3728 error( RtError::WARNING );
\r
3733 inCaps.dwSize = sizeof( inCaps );
\r
3734 result = input->GetCaps( &inCaps );
\r
3735 if ( FAILED( result ) ) {
\r
3737 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3738 errorText_ = errorStream_.str();
\r
3739 error( RtError::WARNING );
\r
3743 // Get input channel information.
\r
3744 info.inputChannels = inCaps.dwChannels;
\r
3746 // Get sample rate and format information.
\r
3747 std::vector<unsigned int> rates;
\r
3748 if ( inCaps.dwChannels >= 2 ) {
\r
3749 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3750 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3751 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3752 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3753 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3754 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3755 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3756 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3758 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3759 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3760 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3761 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3762 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3764 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3765 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3766 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3767 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3768 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3771 else if ( inCaps.dwChannels == 1 ) {
\r
3772 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3775 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3776 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3781 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3787 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3791 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3794 else info.inputChannels = 0; // technically, this would be an error
\r
3798 if ( info.inputChannels == 0 ) return info;
\r
3800 // Copy the supported rates to the info structure but avoid duplication.
\r
3802 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3804 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3805 if ( rates[i] == info.sampleRates[j] ) {
\r
3810 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3812 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3814 // If device opens for both playback and capture, we determine the channels.
\r
3815 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3816 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3818 if ( device == 0 ) info.isDefaultInput = true;
\r
3820 // Copy name and return.
\r
3821 info.name = dsDevices[ device ].name;
\r
3822 info.probed = true;
\r
3826 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3827 unsigned int firstChannel, unsigned int sampleRate,
\r
3828 RtAudioFormat format, unsigned int *bufferSize,
\r
3829 RtAudio::StreamOptions *options )
\r
3831 if ( channels + firstChannel > 2 ) {
\r
3832 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3836 unsigned int nDevices = dsDevices.size();
\r
3837 if ( nDevices == 0 ) {
\r
3838 // This should not happen because a check is made before this function is called.
\r
3839 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3843 if ( device >= nDevices ) {
\r
3844 // This should not happen because a check is made before this function is called.
\r
3845 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3849 if ( mode == OUTPUT ) {
\r
3850 if ( dsDevices[ device ].validId[0] == false ) {
\r
3851 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3852 errorText_ = errorStream_.str();
\r
3856 else { // mode == INPUT
\r
3857 if ( dsDevices[ device ].validId[1] == false ) {
\r
3858 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3859 errorText_ = errorStream_.str();
\r
3864 // According to a note in PortAudio, using GetDesktopWindow()
\r
3865 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3866 // that occur when the application's window is not the foreground
\r
3867 // window. Also, if the application window closes before the
\r
3868 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3869 // problems when using GetDesktopWindow() but it seems fine now
\r
3870 // (January 2010). I'll leave it commented here.
\r
3871 // HWND hWnd = GetForegroundWindow();
\r
3872 HWND hWnd = GetDesktopWindow();
\r
3874 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3875 // two. This is a judgement call and a value of two is probably too
\r
3876 // low for capture, but it should work for playback.
\r
3878 if ( options ) nBuffers = options->numberOfBuffers;
\r
3879 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3880 if ( nBuffers < 2 ) nBuffers = 3;
\r
3882 // Check the lower range of the user-specified buffer size and set
\r
3883 // (arbitrarily) to a lower bound of 32.
\r
3884 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3886 // Create the wave format structure. The data format setting will
\r
3887 // be determined later.
\r
3888 WAVEFORMATEX waveFormat;
\r
3889 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3890 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3891 waveFormat.nChannels = channels + firstChannel;
\r
3892 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3894 // Determine the device buffer size. By default, we'll use the value
\r
3895 // defined above (32K), but we will grow it to make allowances for
\r
3896 // very large software buffer sizes.
\r
3897 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3898 DWORD dsPointerLeadTime = 0;
\r
3900 void *ohandle = 0, *bhandle = 0;
\r
3902 if ( mode == OUTPUT ) {
\r
3904 LPDIRECTSOUND output;
\r
3905 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3906 if ( FAILED( result ) ) {
\r
3907 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3908 errorText_ = errorStream_.str();
\r
3913 outCaps.dwSize = sizeof( outCaps );
\r
3914 result = output->GetCaps( &outCaps );
\r
3915 if ( FAILED( result ) ) {
\r
3916 output->Release();
\r
3917 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3918 errorText_ = errorStream_.str();
\r
3922 // Check channel information.
\r
3923 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3924 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3925 errorText_ = errorStream_.str();
\r
3929 // Check format information. Use 16-bit format unless not
\r
3930 // supported or user requests 8-bit.
\r
3931 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3932 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3933 waveFormat.wBitsPerSample = 16;
\r
3934 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3937 waveFormat.wBitsPerSample = 8;
\r
3938 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3940 stream_.userFormat = format;
\r
3942 // Update wave format structure and buffer information.
\r
3943 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3944 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3945 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3947 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3948 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3949 dsBufferSize *= 2;
\r
3951 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3952 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3953 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3954 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3955 if ( FAILED( result ) ) {
\r
3956 output->Release();
\r
3957 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3958 errorText_ = errorStream_.str();
\r
3962 // Even though we will write to the secondary buffer, we need to
\r
3963 // access the primary buffer to set the correct output format
\r
3964 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3965 // buffer description.
\r
3966 DSBUFFERDESC bufferDescription;
\r
3967 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3968 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3969 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3971 // Obtain the primary buffer
\r
3972 LPDIRECTSOUNDBUFFER buffer;
\r
3973 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3974 if ( FAILED( result ) ) {
\r
3975 output->Release();
\r
3976 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3977 errorText_ = errorStream_.str();
\r
3981 // Set the primary DS buffer sound format.
\r
3982 result = buffer->SetFormat( &waveFormat );
\r
3983 if ( FAILED( result ) ) {
\r
3984 output->Release();
\r
3985 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
3986 errorText_ = errorStream_.str();
\r
3990 // Setup the secondary DS buffer description.
\r
3991 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3992 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3993 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
3994 DSBCAPS_GLOBALFOCUS |
\r
3995 DSBCAPS_GETCURRENTPOSITION2 |
\r
3996 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
3997 bufferDescription.dwBufferBytes = dsBufferSize;
\r
3998 bufferDescription.lpwfxFormat = &waveFormat;
\r
4000 // Try to create the secondary DS buffer. If that doesn't work,
\r
4001 // try to use software mixing. Otherwise, there's a problem.
\r
4002 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4003 if ( FAILED( result ) ) {
\r
4004 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4005 DSBCAPS_GLOBALFOCUS |
\r
4006 DSBCAPS_GETCURRENTPOSITION2 |
\r
4007 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4008 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4009 if ( FAILED( result ) ) {
\r
4010 output->Release();
\r
4011 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4012 errorText_ = errorStream_.str();
\r
4017 // Get the buffer size ... might be different from what we specified.
\r
4019 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4020 result = buffer->GetCaps( &dsbcaps );
\r
4021 if ( FAILED( result ) ) {
\r
4022 output->Release();
\r
4023 buffer->Release();
\r
4024 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4025 errorText_ = errorStream_.str();
\r
4029 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4031 // Lock the DS buffer
\r
4034 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4035 if ( FAILED( result ) ) {
\r
4036 output->Release();
\r
4037 buffer->Release();
\r
4038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4039 errorText_ = errorStream_.str();
\r
4043 // Zero the DS buffer
\r
4044 ZeroMemory( audioPtr, dataLen );
\r
4046 // Unlock the DS buffer
\r
4047 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4048 if ( FAILED( result ) ) {
\r
4049 output->Release();
\r
4050 buffer->Release();
\r
4051 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4052 errorText_ = errorStream_.str();
\r
4056 ohandle = (void *) output;
\r
4057 bhandle = (void *) buffer;
\r
4060 if ( mode == INPUT ) {
\r
4062 LPDIRECTSOUNDCAPTURE input;
\r
4063 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4064 if ( FAILED( result ) ) {
\r
4065 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4066 errorText_ = errorStream_.str();
\r
4071 inCaps.dwSize = sizeof( inCaps );
\r
4072 result = input->GetCaps( &inCaps );
\r
4073 if ( FAILED( result ) ) {
\r
4075 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4076 errorText_ = errorStream_.str();
\r
4080 // Check channel information.
\r
4081 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4082 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4086 // Check format information. Use 16-bit format unless user
\r
4087 // requests 8-bit.
\r
4088 DWORD deviceFormats;
\r
4089 if ( channels + firstChannel == 2 ) {
\r
4090 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4091 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4092 waveFormat.wBitsPerSample = 8;
\r
4093 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4095 else { // assume 16-bit is supported
\r
4096 waveFormat.wBitsPerSample = 16;
\r
4097 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4100 else { // channel == 1
\r
4101 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4102 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4103 waveFormat.wBitsPerSample = 8;
\r
4104 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4106 else { // assume 16-bit is supported
\r
4107 waveFormat.wBitsPerSample = 16;
\r
4108 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4111 stream_.userFormat = format;
\r
4113 // Update wave format structure and buffer information.
\r
4114 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4115 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4116 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4118 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4119 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4120 dsBufferSize *= 2;
\r
4122 // Setup the secondary DS buffer description.
\r
4123 DSCBUFFERDESC bufferDescription;
\r
4124 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4125 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4126 bufferDescription.dwFlags = 0;
\r
4127 bufferDescription.dwReserved = 0;
\r
4128 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4129 bufferDescription.lpwfxFormat = &waveFormat;
\r
4131 // Create the capture buffer.
\r
4132 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4133 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4134 if ( FAILED( result ) ) {
\r
4136 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4137 errorText_ = errorStream_.str();
\r
4141 // Get the buffer size ... might be different from what we specified.
\r
4142 DSCBCAPS dscbcaps;
\r
4143 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4144 result = buffer->GetCaps( &dscbcaps );
\r
4145 if ( FAILED( result ) ) {
\r
4147 buffer->Release();
\r
4148 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4149 errorText_ = errorStream_.str();
\r
4153 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4155 // NOTE: We could have a problem here if this is a duplex stream
\r
4156 // and the play and capture hardware buffer sizes are different
\r
4157 // (I'm actually not sure if that is a problem or not).
\r
4158 // Currently, we are not verifying that.
\r
4160 // Lock the capture buffer
\r
4163 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4164 if ( FAILED( result ) ) {
\r
4166 buffer->Release();
\r
4167 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4168 errorText_ = errorStream_.str();
\r
4172 // Zero the buffer
\r
4173 ZeroMemory( audioPtr, dataLen );
\r
4175 // Unlock the buffer
\r
4176 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4177 if ( FAILED( result ) ) {
\r
4179 buffer->Release();
\r
4180 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4181 errorText_ = errorStream_.str();
\r
4185 ohandle = (void *) input;
\r
4186 bhandle = (void *) buffer;
\r
4189 // Set various stream parameters
\r
4190 DsHandle *handle = 0;
\r
4191 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4192 stream_.nUserChannels[mode] = channels;
\r
4193 stream_.bufferSize = *bufferSize;
\r
4194 stream_.channelOffset[mode] = firstChannel;
\r
4195 stream_.deviceInterleaved[mode] = true;
\r
4196 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4197 else stream_.userInterleaved = true;
\r
4199 // Set flag for buffer conversion
\r
4200 stream_.doConvertBuffer[mode] = false;
\r
4201 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4202 stream_.doConvertBuffer[mode] = true;
\r
4203 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4204 stream_.doConvertBuffer[mode] = true;
\r
4205 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4206 stream_.nUserChannels[mode] > 1 )
\r
4207 stream_.doConvertBuffer[mode] = true;
\r
4209 // Allocate necessary internal buffers
\r
4210 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4211 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4212 if ( stream_.userBuffer[mode] == NULL ) {
\r
4213 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4217 if ( stream_.doConvertBuffer[mode] ) {
\r
4219 bool makeBuffer = true;
\r
4220 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4221 if ( mode == INPUT ) {
\r
4222 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4223 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4224 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4228 if ( makeBuffer ) {
\r
4229 bufferBytes *= *bufferSize;
\r
4230 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4231 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4232 if ( stream_.deviceBuffer == NULL ) {
\r
4233 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4239 // Allocate our DsHandle structures for the stream.
\r
4240 if ( stream_.apiHandle == 0 ) {
\r
4242 handle = new DsHandle;
\r
4244 catch ( std::bad_alloc& ) {
\r
4245 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4249 // Create a manual-reset event.
\r
4250 handle->condition = CreateEvent( NULL, // no security
\r
4251 TRUE, // manual-reset
\r
4252 FALSE, // non-signaled initially
\r
4253 NULL ); // unnamed
\r
4254 stream_.apiHandle = (void *) handle;
\r
4257 handle = (DsHandle *) stream_.apiHandle;
\r
4258 handle->id[mode] = ohandle;
\r
4259 handle->buffer[mode] = bhandle;
\r
4260 handle->dsBufferSize[mode] = dsBufferSize;
\r
4261 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4263 stream_.device[mode] = device;
\r
4264 stream_.state = STREAM_STOPPED;
\r
4265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4266 // We had already set up an output stream.
\r
4267 stream_.mode = DUPLEX;
\r
4269 stream_.mode = mode;
\r
4270 stream_.nBuffers = nBuffers;
\r
4271 stream_.sampleRate = sampleRate;
\r
4273 // Setup the buffer conversion information structure.
\r
4274 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4276 // Setup the callback thread.
\r
4277 if ( stream_.callbackInfo.isRunning == false ) {
\r
4278 unsigned threadId;
\r
4279 stream_.callbackInfo.isRunning = true;
\r
4280 stream_.callbackInfo.object = (void *) this;
\r
4281 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4282 &stream_.callbackInfo, 0, &threadId );
\r
4283 if ( stream_.callbackInfo.thread == 0 ) {
\r
4284 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4288 // Boost DS thread priority
\r
4289 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4295 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4296 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4297 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4298 if ( buffer ) buffer->Release();
\r
4299 object->Release();
\r
4301 if ( handle->buffer[1] ) {
\r
4302 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4303 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4304 if ( buffer ) buffer->Release();
\r
4305 object->Release();
\r
4307 CloseHandle( handle->condition );
\r
4309 stream_.apiHandle = 0;
\r
4312 for ( int i=0; i<2; i++ ) {
\r
4313 if ( stream_.userBuffer[i] ) {
\r
4314 free( stream_.userBuffer[i] );
\r
4315 stream_.userBuffer[i] = 0;
\r
4319 if ( stream_.deviceBuffer ) {
\r
4320 free( stream_.deviceBuffer );
\r
4321 stream_.deviceBuffer = 0;
\r
4327 void RtApiDs :: closeStream()
\r
4329 if ( stream_.state == STREAM_CLOSED ) {
\r
4330 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4331 error( RtError::WARNING );
\r
4335 // Stop the callback thread.
\r
4336 stream_.callbackInfo.isRunning = false;
\r
4337 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4338 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4340 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4342 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4343 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4344 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4347 buffer->Release();
\r
4349 object->Release();
\r
4351 if ( handle->buffer[1] ) {
\r
4352 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4353 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4356 buffer->Release();
\r
4358 object->Release();
\r
4360 CloseHandle( handle->condition );
\r
4362 stream_.apiHandle = 0;
\r
4365 for ( int i=0; i<2; i++ ) {
\r
4366 if ( stream_.userBuffer[i] ) {
\r
4367 free( stream_.userBuffer[i] );
\r
4368 stream_.userBuffer[i] = 0;
\r
4372 if ( stream_.deviceBuffer ) {
\r
4373 free( stream_.deviceBuffer );
\r
4374 stream_.deviceBuffer = 0;
\r
4377 stream_.mode = UNINITIALIZED;
\r
4378 stream_.state = STREAM_CLOSED;
\r
4381 void RtApiDs :: startStream()
\r
4384 if ( stream_.state == STREAM_RUNNING ) {
\r
4385 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4386 error( RtError::WARNING );
\r
4390 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4392 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4393 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4394 // this is already in effect.
\r
4395 timeBeginPeriod( 1 );
\r
4397 buffersRolling = false;
\r
4398 duplexPrerollBytes = 0;
\r
4400 if ( stream_.mode == DUPLEX ) {
\r
4401 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4402 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4405 HRESULT result = 0;
\r
4406 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4408 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4409 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4410 if ( FAILED( result ) ) {
\r
4411 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4412 errorText_ = errorStream_.str();
\r
4417 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4419 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4420 result = buffer->Start( DSCBSTART_LOOPING );
\r
4421 if ( FAILED( result ) ) {
\r
4422 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4423 errorText_ = errorStream_.str();
\r
4428 handle->drainCounter = 0;
\r
4429 handle->internalDrain = false;
\r
4430 ResetEvent( handle->condition );
\r
4431 stream_.state = STREAM_RUNNING;
\r
4434 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4437 void RtApiDs :: stopStream()
\r
4440 if ( stream_.state == STREAM_STOPPED ) {
\r
4441 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4442 error( RtError::WARNING );
\r
4446 HRESULT result = 0;
\r
4449 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4451 if ( handle->drainCounter == 0 ) {
\r
4452 handle->drainCounter = 2;
\r
4453 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4456 stream_.state = STREAM_STOPPED;
\r
4458 // Stop the buffer and clear memory
\r
4459 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4460 result = buffer->Stop();
\r
4461 if ( FAILED( result ) ) {
\r
4462 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4463 errorText_ = errorStream_.str();
\r
4467 // Lock the buffer and clear it so that if we start to play again,
\r
4468 // we won't have old data playing.
\r
4469 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4470 if ( FAILED( result ) ) {
\r
4471 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4472 errorText_ = errorStream_.str();
\r
4476 // Zero the DS buffer
\r
4477 ZeroMemory( audioPtr, dataLen );
\r
4479 // Unlock the DS buffer
\r
4480 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4481 if ( FAILED( result ) ) {
\r
4482 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4483 errorText_ = errorStream_.str();
\r
4487 // If we start playing again, we must begin at beginning of buffer.
\r
4488 handle->bufferPointer[0] = 0;
\r
4491 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4492 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4496 stream_.state = STREAM_STOPPED;
\r
4498 result = buffer->Stop();
\r
4499 if ( FAILED( result ) ) {
\r
4500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4501 errorText_ = errorStream_.str();
\r
4505 // Lock the buffer and clear it so that if we start to play again,
\r
4506 // we won't have old data playing.
\r
4507 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4508 if ( FAILED( result ) ) {
\r
4509 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4510 errorText_ = errorStream_.str();
\r
4514 // Zero the DS buffer
\r
4515 ZeroMemory( audioPtr, dataLen );
\r
4517 // Unlock the DS buffer
\r
4518 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4519 if ( FAILED( result ) ) {
\r
4520 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4521 errorText_ = errorStream_.str();
\r
4525 // If we start recording again, we must begin at beginning of buffer.
\r
4526 handle->bufferPointer[1] = 0;
\r
4530 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4531 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4534 void RtApiDs :: abortStream()
\r
4537 if ( stream_.state == STREAM_STOPPED ) {
\r
4538 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4539 error( RtError::WARNING );
\r
4543 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4544 handle->drainCounter = 2;
\r
4549 void RtApiDs :: callbackEvent()
\r
4551 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4552 Sleep( 50 ); // sleep 50 milliseconds
\r
4556 if ( stream_.state == STREAM_CLOSED ) {
\r
4557 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4558 error( RtError::WARNING );
\r
4562 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4563 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4565 // Check if we were draining the stream and signal is finished.
\r
4566 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4568 stream_.state = STREAM_STOPPING;
\r
4569 if ( handle->internalDrain == false )
\r
4570 SetEvent( handle->condition );
\r
4576 // Invoke user callback to get fresh output data UNLESS we are
\r
4577 // draining stream.
\r
4578 if ( handle->drainCounter == 0 ) {
\r
4579 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4580 double streamTime = getStreamTime();
\r
4581 RtAudioStreamStatus status = 0;
\r
4582 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4583 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4584 handle->xrun[0] = false;
\r
4586 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4587 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4588 handle->xrun[1] = false;
\r
4590 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4591 stream_.bufferSize, streamTime, status, info->userData );
\r
4592 if ( cbReturnValue == 2 ) {
\r
4593 stream_.state = STREAM_STOPPING;
\r
4594 handle->drainCounter = 2;
\r
4598 else if ( cbReturnValue == 1 ) {
\r
4599 handle->drainCounter = 1;
\r
4600 handle->internalDrain = true;
\r
4605 DWORD currentWritePointer, safeWritePointer;
\r
4606 DWORD currentReadPointer, safeReadPointer;
\r
4607 UINT nextWritePointer;
\r
4609 LPVOID buffer1 = NULL;
\r
4610 LPVOID buffer2 = NULL;
\r
4611 DWORD bufferSize1 = 0;
\r
4612 DWORD bufferSize2 = 0;
\r
4617 if ( buffersRolling == false ) {
\r
4618 if ( stream_.mode == DUPLEX ) {
\r
4619 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4621 // It takes a while for the devices to get rolling. As a result,
\r
4622 // there's no guarantee that the capture and write device pointers
\r
4623 // will move in lockstep. Wait here for both devices to start
\r
4624 // rolling, and then set our buffer pointers accordingly.
\r
4625 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4626 // bytes later than the write buffer.
\r
4628 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4629 // take place between the two GetCurrentPosition calls... but I'm
\r
4630 // really not sure how to solve the problem. Temporarily boost to
\r
4631 // Realtime priority, maybe; but I'm not sure what priority the
\r
4632 // DirectSound service threads run at. We *should* be roughly
\r
4633 // within a ms or so of correct.
\r
4635 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4636 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4638 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4640 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4641 if ( FAILED( result ) ) {
\r
4642 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4643 errorText_ = errorStream_.str();
\r
4644 error( RtError::SYSTEM_ERROR );
\r
4646 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4647 if ( FAILED( result ) ) {
\r
4648 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4649 errorText_ = errorStream_.str();
\r
4650 error( RtError::SYSTEM_ERROR );
\r
4653 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4654 if ( FAILED( result ) ) {
\r
4655 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4656 errorText_ = errorStream_.str();
\r
4657 error( RtError::SYSTEM_ERROR );
\r
4659 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4660 if ( FAILED( result ) ) {
\r
4661 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4662 errorText_ = errorStream_.str();
\r
4663 error( RtError::SYSTEM_ERROR );
\r
4665 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4669 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4671 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4672 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4673 handle->bufferPointer[1] = safeReadPointer;
\r
4675 else if ( stream_.mode == OUTPUT ) {
\r
4677 // Set the proper nextWritePosition after initial startup.
\r
4678 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4679 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4680 if ( FAILED( result ) ) {
\r
4681 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4682 errorText_ = errorStream_.str();
\r
4683 error( RtError::SYSTEM_ERROR );
\r
4685 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4686 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4689 buffersRolling = true;
\r
4692 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4694 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4696 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4697 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4698 bufferBytes *= formatBytes( stream_.userFormat );
\r
4699 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4702 // Setup parameters and do buffer conversion if necessary.
\r
4703 if ( stream_.doConvertBuffer[0] ) {
\r
4704 buffer = stream_.deviceBuffer;
\r
4705 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4706 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4707 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4710 buffer = stream_.userBuffer[0];
\r
4711 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4712 bufferBytes *= formatBytes( stream_.userFormat );
\r
4715 // No byte swapping necessary in DirectSound implementation.
\r
4717 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4718 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4720 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4721 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4723 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4724 nextWritePointer = handle->bufferPointer[0];
\r
4726 DWORD endWrite, leadPointer;
\r
4728 // Find out where the read and "safe write" pointers are.
\r
4729 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4730 if ( FAILED( result ) ) {
\r
4731 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4732 errorText_ = errorStream_.str();
\r
4733 error( RtError::SYSTEM_ERROR );
\r
4736 // We will copy our output buffer into the region between
\r
4737 // safeWritePointer and leadPointer. If leadPointer is not
\r
4738 // beyond the next endWrite position, wait until it is.
\r
4739 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4740 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4741 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4742 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4743 endWrite = nextWritePointer + bufferBytes;
\r
4745 // Check whether the entire write region is behind the play pointer.
\r
4746 if ( leadPointer >= endWrite ) break;
\r
4748 // If we are here, then we must wait until the leadPointer advances
\r
4749 // beyond the end of our next write region. We use the
\r
4750 // Sleep() function to suspend operation until that happens.
\r
4751 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4752 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4753 if ( millis < 1.0 ) millis = 1.0;
\r
4754 Sleep( (DWORD) millis );
\r
4757 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4758 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4759 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4760 handle->xrun[0] = true;
\r
4761 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4762 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4763 handle->bufferPointer[0] = nextWritePointer;
\r
4764 endWrite = nextWritePointer + bufferBytes;
\r
4767 // Lock free space in the buffer
\r
4768 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4769 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4770 if ( FAILED( result ) ) {
\r
4771 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4772 errorText_ = errorStream_.str();
\r
4773 error( RtError::SYSTEM_ERROR );
\r
4776 // Copy our buffer into the DS buffer
\r
4777 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4778 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4780 // Update our buffer offset and unlock sound buffer
\r
4781 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4782 if ( FAILED( result ) ) {
\r
4783 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4784 errorText_ = errorStream_.str();
\r
4785 error( RtError::SYSTEM_ERROR );
\r
4787 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4788 handle->bufferPointer[0] = nextWritePointer;
\r
4790 if ( handle->drainCounter ) {
\r
4791 handle->drainCounter++;
\r
4796 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4798 // Setup parameters.
\r
4799 if ( stream_.doConvertBuffer[1] ) {
\r
4800 buffer = stream_.deviceBuffer;
\r
4801 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4802 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4805 buffer = stream_.userBuffer[1];
\r
4806 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4807 bufferBytes *= formatBytes( stream_.userFormat );
\r
4810 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4811 long nextReadPointer = handle->bufferPointer[1];
\r
4812 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4814 // Find out where the write and "safe read" pointers are.
\r
4815 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4816 if ( FAILED( result ) ) {
\r
4817 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4818 errorText_ = errorStream_.str();
\r
4819 error( RtError::SYSTEM_ERROR );
\r
4822 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4823 DWORD endRead = nextReadPointer + bufferBytes;
\r
4825 // Handling depends on whether we are INPUT or DUPLEX.
\r
4826 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4827 // then a wait here will drag the write pointers into the forbidden zone.
\r
4829 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4830 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4831 // practical way to sync up the read and write pointers reliably, given the
\r
4832 // the very complex relationship between phase and increment of the read and write
\r
4835 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4836 // provide a pre-roll period of 0.5 seconds in which we return
\r
4837 // zeros from the read buffer while the pointers sync up.
\r
4839 if ( stream_.mode == DUPLEX ) {
\r
4840 if ( safeReadPointer < endRead ) {
\r
4841 if ( duplexPrerollBytes <= 0 ) {
\r
4842 // Pre-roll time over. Be more agressive.
\r
4843 int adjustment = endRead-safeReadPointer;
\r
4845 handle->xrun[1] = true;
\r
4847 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4848 // and perform fine adjustments later.
\r
4849 // - small adjustments: back off by twice as much.
\r
4850 if ( adjustment >= 2*bufferBytes )
\r
4851 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4853 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4855 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4859 // In pre=roll time. Just do it.
\r
4860 nextReadPointer = safeReadPointer - bufferBytes;
\r
4861 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4863 endRead = nextReadPointer + bufferBytes;
\r
4866 else { // mode == INPUT
\r
4867 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4868 // See comments for playback.
\r
4869 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4870 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4871 if ( millis < 1.0 ) millis = 1.0;
\r
4872 Sleep( (DWORD) millis );
\r
4874 // Wake up and find out where we are now.
\r
4875 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4876 if ( FAILED( result ) ) {
\r
4877 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4878 errorText_ = errorStream_.str();
\r
4879 error( RtError::SYSTEM_ERROR );
\r
4882 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4886 // Lock free space in the buffer
\r
4887 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4888 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4889 if ( FAILED( result ) ) {
\r
4890 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4891 errorText_ = errorStream_.str();
\r
4892 error( RtError::SYSTEM_ERROR );
\r
4895 if ( duplexPrerollBytes <= 0 ) {
\r
4896 // Copy our buffer into the DS buffer
\r
4897 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4898 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4901 memset( buffer, 0, bufferSize1 );
\r
4902 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4903 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4906 // Update our buffer offset and unlock sound buffer
\r
4907 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4908 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4909 if ( FAILED( result ) ) {
\r
4910 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4911 errorText_ = errorStream_.str();
\r
4912 error( RtError::SYSTEM_ERROR );
\r
4914 handle->bufferPointer[1] = nextReadPointer;
\r
4916 // No byte swapping necessary in DirectSound implementation.
\r
4918 // If necessary, convert 8-bit data from unsigned to signed.
\r
4919 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4920 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4922 // Do buffer conversion if necessary.
\r
4923 if ( stream_.doConvertBuffer[1] )
\r
4924 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4928 RtApi::tickStreamTime();
\r
4931 // Definitions for utility functions and callbacks
\r
4932 // specific to the DirectSound implementation.
\r
4934 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4936 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4937 RtApiDs *object = (RtApiDs *) info->object;
\r
4938 bool* isRunning = &info->isRunning;
\r
4940 while ( *isRunning == true ) {
\r
4941 object->callbackEvent();
\r
4944 _endthreadex( 0 );
\r
4948 #include "tchar.h"
\r
4950 std::string convertTChar( LPCTSTR name )
\r
4952 #if defined( UNICODE ) || defined( _UNICODE )
\r
4953 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4954 std::string s( length, 0 );
\r
4955 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
4957 std::string s( name );
\r
4963 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4964 LPCTSTR description,
\r
4966 LPVOID lpContext )
\r
4968 bool *isInput = (bool *) lpContext;
\r
4971 bool validDevice = false;
\r
4972 if ( *isInput == true ) {
\r
4974 LPDIRECTSOUNDCAPTURE object;
\r
4976 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
4977 if ( hr != DS_OK ) return TRUE;
\r
4979 caps.dwSize = sizeof(caps);
\r
4980 hr = object->GetCaps( &caps );
\r
4981 if ( hr == DS_OK ) {
\r
4982 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
4983 validDevice = true;
\r
4985 object->Release();
\r
4989 LPDIRECTSOUND object;
\r
4990 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
4991 if ( hr != DS_OK ) return TRUE;
\r
4993 caps.dwSize = sizeof(caps);
\r
4994 hr = object->GetCaps( &caps );
\r
4995 if ( hr == DS_OK ) {
\r
4996 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
4997 validDevice = true;
\r
4999 object->Release();
\r
5002 // If good device, then save its name and guid.
\r
5003 std::string name = convertTChar( description );
\r
5004 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5005 name = "Default Device";
\r
5006 if ( validDevice ) {
\r
5007 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5008 if ( dsDevices[i].name == name ) {
\r
5009 dsDevices[i].found = true;
\r
5011 dsDevices[i].id[1] = lpguid;
\r
5012 dsDevices[i].validId[1] = true;
\r
5015 dsDevices[i].id[0] = lpguid;
\r
5016 dsDevices[i].validId[0] = true;
\r
5023 device.name = name;
\r
5024 device.found = true;
\r
5026 device.id[1] = lpguid;
\r
5027 device.validId[1] = true;
\r
5030 device.id[0] = lpguid;
\r
5031 device.validId[0] = true;
\r
5033 dsDevices.push_back( device );
\r
5039 static const char* getErrorString( int code )
\r
5043 case DSERR_ALLOCATED:
\r
5044 return "Already allocated";
\r
5046 case DSERR_CONTROLUNAVAIL:
\r
5047 return "Control unavailable";
\r
5049 case DSERR_INVALIDPARAM:
\r
5050 return "Invalid parameter";
\r
5052 case DSERR_INVALIDCALL:
\r
5053 return "Invalid call";
\r
5055 case DSERR_GENERIC:
\r
5056 return "Generic error";
\r
5058 case DSERR_PRIOLEVELNEEDED:
\r
5059 return "Priority level needed";
\r
5061 case DSERR_OUTOFMEMORY:
\r
5062 return "Out of memory";
\r
5064 case DSERR_BADFORMAT:
\r
5065 return "The sample rate or the channel format is not supported";
\r
5067 case DSERR_UNSUPPORTED:
\r
5068 return "Not supported";
\r
5070 case DSERR_NODRIVER:
\r
5071 return "No driver";
\r
5073 case DSERR_ALREADYINITIALIZED:
\r
5074 return "Already initialized";
\r
5076 case DSERR_NOAGGREGATION:
\r
5077 return "No aggregation";
\r
5079 case DSERR_BUFFERLOST:
\r
5080 return "Buffer lost";
\r
5082 case DSERR_OTHERAPPHASPRIO:
\r
5083 return "Another application already has priority";
\r
5085 case DSERR_UNINITIALIZED:
\r
5086 return "Uninitialized";
\r
5089 return "DirectSound unknown error";
\r
5092 //******************** End of __WINDOWS_DS__ *********************//
\r
5096 #if defined(__LINUX_ALSA__)
\r
5098 #include <alsa/asoundlib.h>
\r
5099 #include <unistd.h>
\r
5101 // A structure to hold various information related to the ALSA API
\r
5102 // implementation.
\r
5103 struct AlsaHandle {
\r
5104 snd_pcm_t *handles[2];
\r
5105 bool synchronized;
\r
5107 pthread_cond_t runnable_cv;
\r
5111 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5114 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5116 RtApiAlsa :: RtApiAlsa()
\r
5118 // Nothing to do here.
\r
5121 RtApiAlsa :: ~RtApiAlsa()
\r
5123 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5126 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5128 unsigned nDevices = 0;
\r
5129 int result, subdevice, card;
\r
5131 snd_ctl_t *handle;
\r
5133 // Count cards and devices
\r
5135 snd_card_next( &card );
\r
5136 while ( card >= 0 ) {
\r
5137 sprintf( name, "hw:%d", card );
\r
5138 result = snd_ctl_open( &handle, name, 0 );
\r
5139 if ( result < 0 ) {
\r
5140 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5141 errorText_ = errorStream_.str();
\r
5142 error( RtError::WARNING );
\r
5147 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5148 if ( result < 0 ) {
\r
5149 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5150 errorText_ = errorStream_.str();
\r
5151 error( RtError::WARNING );
\r
5154 if ( subdevice < 0 )
\r
5159 snd_ctl_close( handle );
\r
5160 snd_card_next( &card );
\r
5166 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5168 RtAudio::DeviceInfo info;
\r
5169 info.probed = false;
\r
5171 unsigned nDevices = 0;
\r
5172 int result, subdevice, card;
\r
5174 snd_ctl_t *chandle;
\r
5176 // Count cards and devices
\r
5178 snd_card_next( &card );
\r
5179 while ( card >= 0 ) {
\r
5180 sprintf( name, "hw:%d", card );
\r
5181 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5182 if ( result < 0 ) {
\r
5183 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5184 errorText_ = errorStream_.str();
\r
5185 error( RtError::WARNING );
\r
5190 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5191 if ( result < 0 ) {
\r
5192 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5193 errorText_ = errorStream_.str();
\r
5194 error( RtError::WARNING );
\r
5197 if ( subdevice < 0 ) break;
\r
5198 if ( nDevices == device ) {
\r
5199 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5205 snd_ctl_close( chandle );
\r
5206 snd_card_next( &card );
\r
5209 if ( nDevices == 0 ) {
\r
5210 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5211 error( RtError::INVALID_USE );
\r
5214 if ( device >= nDevices ) {
\r
5215 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5216 error( RtError::INVALID_USE );
\r
5221 // If a stream is already open, we cannot probe the stream devices.
\r
5222 // Thus, use the saved results.
\r
5223 if ( stream_.state != STREAM_CLOSED &&
\r
5224 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5225 snd_ctl_close( chandle );
\r
5226 if ( device >= devices_.size() ) {
\r
5227 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5228 error( RtError::WARNING );
\r
5231 return devices_[ device ];
\r
5234 int openMode = SND_PCM_ASYNC;
\r
5235 snd_pcm_stream_t stream;
\r
5236 snd_pcm_info_t *pcminfo;
\r
5237 snd_pcm_info_alloca( &pcminfo );
\r
5238 snd_pcm_t *phandle;
\r
5239 snd_pcm_hw_params_t *params;
\r
5240 snd_pcm_hw_params_alloca( ¶ms );
\r
5242 // First try for playback
\r
5243 stream = SND_PCM_STREAM_PLAYBACK;
\r
5244 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5245 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5246 snd_pcm_info_set_stream( pcminfo, stream );
\r
5248 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5249 if ( result < 0 ) {
\r
5250 // Device probably doesn't support playback.
\r
5251 goto captureProbe;
\r
5254 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5255 if ( result < 0 ) {
\r
5256 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5257 errorText_ = errorStream_.str();
\r
5258 error( RtError::WARNING );
\r
5259 goto captureProbe;
\r
5262 // The device is open ... fill the parameter structure.
\r
5263 result = snd_pcm_hw_params_any( phandle, params );
\r
5264 if ( result < 0 ) {
\r
5265 snd_pcm_close( phandle );
\r
5266 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5267 errorText_ = errorStream_.str();
\r
5268 error( RtError::WARNING );
\r
5269 goto captureProbe;
\r
5272 // Get output channel information.
\r
5273 unsigned int value;
\r
5274 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5275 if ( result < 0 ) {
\r
5276 snd_pcm_close( phandle );
\r
5277 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5278 errorText_ = errorStream_.str();
\r
5279 error( RtError::WARNING );
\r
5280 goto captureProbe;
\r
5282 info.outputChannels = value;
\r
5283 snd_pcm_close( phandle );
\r
5286 // Now try for capture
\r
5287 stream = SND_PCM_STREAM_CAPTURE;
\r
5288 snd_pcm_info_set_stream( pcminfo, stream );
\r
5290 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5291 snd_ctl_close( chandle );
\r
5292 if ( result < 0 ) {
\r
5293 // Device probably doesn't support capture.
\r
5294 if ( info.outputChannels == 0 ) return info;
\r
5295 goto probeParameters;
\r
5298 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5299 if ( result < 0 ) {
\r
5300 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5301 errorText_ = errorStream_.str();
\r
5302 error( RtError::WARNING );
\r
5303 if ( info.outputChannels == 0 ) return info;
\r
5304 goto probeParameters;
\r
5307 // The device is open ... fill the parameter structure.
\r
5308 result = snd_pcm_hw_params_any( phandle, params );
\r
5309 if ( result < 0 ) {
\r
5310 snd_pcm_close( phandle );
\r
5311 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5312 errorText_ = errorStream_.str();
\r
5313 error( RtError::WARNING );
\r
5314 if ( info.outputChannels == 0 ) return info;
\r
5315 goto probeParameters;
\r
5318 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5319 if ( result < 0 ) {
\r
5320 snd_pcm_close( phandle );
\r
5321 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5322 errorText_ = errorStream_.str();
\r
5323 error( RtError::WARNING );
\r
5324 if ( info.outputChannels == 0 ) return info;
\r
5325 goto probeParameters;
\r
5327 info.inputChannels = value;
\r
5328 snd_pcm_close( phandle );
\r
5330 // If device opens for both playback and capture, we determine the channels.
\r
5331 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5332 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5334 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5335 if ( device == 0 && info.outputChannels > 0 )
\r
5336 info.isDefaultOutput = true;
\r
5337 if ( device == 0 && info.inputChannels > 0 )
\r
5338 info.isDefaultInput = true;
\r
5341 // At this point, we just need to figure out the supported data
\r
5342 // formats and sample rates. We'll proceed by opening the device in
\r
5343 // the direction with the maximum number of channels, or playback if
\r
5344 // they are equal. This might limit our sample rate options, but so
\r
5347 if ( info.outputChannels >= info.inputChannels )
\r
5348 stream = SND_PCM_STREAM_PLAYBACK;
\r
5350 stream = SND_PCM_STREAM_CAPTURE;
\r
5351 snd_pcm_info_set_stream( pcminfo, stream );
\r
5353 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5354 if ( result < 0 ) {
\r
5355 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5356 errorText_ = errorStream_.str();
\r
5357 error( RtError::WARNING );
\r
5361 // The device is open ... fill the parameter structure.
\r
5362 result = snd_pcm_hw_params_any( phandle, params );
\r
5363 if ( result < 0 ) {
\r
5364 snd_pcm_close( phandle );
\r
5365 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5366 errorText_ = errorStream_.str();
\r
5367 error( RtError::WARNING );
\r
5371 // Test our discrete set of sample rate values.
\r
5372 info.sampleRates.clear();
\r
5373 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5374 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5375 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5377 if ( info.sampleRates.size() == 0 ) {
\r
5378 snd_pcm_close( phandle );
\r
5379 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5380 errorText_ = errorStream_.str();
\r
5381 error( RtError::WARNING );
\r
5385 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5386 snd_pcm_format_t format;
\r
5387 info.nativeFormats = 0;
\r
5388 format = SND_PCM_FORMAT_S8;
\r
5389 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5390 info.nativeFormats |= RTAUDIO_SINT8;
\r
5391 format = SND_PCM_FORMAT_S16;
\r
5392 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5393 info.nativeFormats |= RTAUDIO_SINT16;
\r
5394 format = SND_PCM_FORMAT_S24;
\r
5395 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5396 info.nativeFormats |= RTAUDIO_SINT24;
\r
5397 format = SND_PCM_FORMAT_S32;
\r
5398 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5399 info.nativeFormats |= RTAUDIO_SINT32;
\r
5400 format = SND_PCM_FORMAT_FLOAT;
\r
5401 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5402 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5403 format = SND_PCM_FORMAT_FLOAT64;
\r
5404 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5405 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5407 // Check that we have at least one supported format
\r
5408 if ( info.nativeFormats == 0 ) {
\r
5409 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5410 errorText_ = errorStream_.str();
\r
5411 error( RtError::WARNING );
\r
5415 // Get the device name
\r
5417 result = snd_card_get_name( card, &cardname );
\r
5418 if ( result >= 0 )
\r
5419 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5422 // That's all ... close the device and return
\r
5423 snd_pcm_close( phandle );
\r
5424 info.probed = true;
\r
5428 void RtApiAlsa :: saveDeviceInfo( void )
\r
5432 unsigned int nDevices = getDeviceCount();
\r
5433 devices_.resize( nDevices );
\r
5434 for ( unsigned int i=0; i<nDevices; i++ )
\r
5435 devices_[i] = getDeviceInfo( i );
\r
5438 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5439 unsigned int firstChannel, unsigned int sampleRate,
\r
5440 RtAudioFormat format, unsigned int *bufferSize,
\r
5441 RtAudio::StreamOptions *options )
\r
5444 #if defined(__RTAUDIO_DEBUG__)
\r
5445 snd_output_t *out;
\r
5446 snd_output_stdio_attach(&out, stderr, 0);
\r
5449 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5451 unsigned nDevices = 0;
\r
5452 int result, subdevice, card;
\r
5454 snd_ctl_t *chandle;
\r
5456 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5457 snprintf(name, sizeof(name), "%s", "default");
\r
5459 // Count cards and devices
\r
5461 snd_card_next( &card );
\r
5462 while ( card >= 0 ) {
\r
5463 sprintf( name, "hw:%d", card );
\r
5464 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5465 if ( result < 0 ) {
\r
5466 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5467 errorText_ = errorStream_.str();
\r
5472 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5473 if ( result < 0 ) break;
\r
5474 if ( subdevice < 0 ) break;
\r
5475 if ( nDevices == device ) {
\r
5476 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5477 snd_ctl_close( chandle );
\r
5482 snd_ctl_close( chandle );
\r
5483 snd_card_next( &card );
\r
5486 if ( nDevices == 0 ) {
\r
5487 // This should not happen because a check is made before this function is called.
\r
5488 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5492 if ( device >= nDevices ) {
\r
5493 // This should not happen because a check is made before this function is called.
\r
5494 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5501 // The getDeviceInfo() function will not work for a device that is
\r
5502 // already open. Thus, we'll probe the system before opening a
\r
5503 // stream and save the results for use by getDeviceInfo().
\r
5504 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5505 this->saveDeviceInfo();
\r
5507 snd_pcm_stream_t stream;
\r
5508 if ( mode == OUTPUT )
\r
5509 stream = SND_PCM_STREAM_PLAYBACK;
\r
5511 stream = SND_PCM_STREAM_CAPTURE;
\r
5513 snd_pcm_t *phandle;
\r
5514 int openMode = SND_PCM_ASYNC;
\r
5515 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5516 if ( result < 0 ) {
\r
5517 if ( mode == OUTPUT )
\r
5518 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5520 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5521 errorText_ = errorStream_.str();
\r
5525 // Fill the parameter structure.
\r
5526 snd_pcm_hw_params_t *hw_params;
\r
5527 snd_pcm_hw_params_alloca( &hw_params );
\r
5528 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5529 if ( result < 0 ) {
\r
5530 snd_pcm_close( phandle );
\r
5531 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5532 errorText_ = errorStream_.str();
\r
5536 #if defined(__RTAUDIO_DEBUG__)
\r
5537 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5538 snd_pcm_hw_params_dump( hw_params, out );
\r
5541 // Set access ... check user preference.
\r
5542 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5543 stream_.userInterleaved = false;
\r
5544 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5545 if ( result < 0 ) {
\r
5546 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5547 stream_.deviceInterleaved[mode] = true;
\r
5550 stream_.deviceInterleaved[mode] = false;
\r
5553 stream_.userInterleaved = true;
\r
5554 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5555 if ( result < 0 ) {
\r
5556 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5557 stream_.deviceInterleaved[mode] = false;
\r
5560 stream_.deviceInterleaved[mode] = true;
\r
5563 if ( result < 0 ) {
\r
5564 snd_pcm_close( phandle );
\r
5565 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5566 errorText_ = errorStream_.str();
\r
5570 // Determine how to set the device format.
\r
5571 stream_.userFormat = format;
\r
5572 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5574 if ( format == RTAUDIO_SINT8 )
\r
5575 deviceFormat = SND_PCM_FORMAT_S8;
\r
5576 else if ( format == RTAUDIO_SINT16 )
\r
5577 deviceFormat = SND_PCM_FORMAT_S16;
\r
5578 else if ( format == RTAUDIO_SINT24 )
\r
5579 deviceFormat = SND_PCM_FORMAT_S24;
\r
5580 else if ( format == RTAUDIO_SINT32 )
\r
5581 deviceFormat = SND_PCM_FORMAT_S32;
\r
5582 else if ( format == RTAUDIO_FLOAT32 )
\r
5583 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5584 else if ( format == RTAUDIO_FLOAT64 )
\r
5585 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5587 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5588 stream_.deviceFormat[mode] = format;
\r
5592 // The user requested format is not natively supported by the device.
\r
5593 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5594 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5595 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5599 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5600 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5601 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5605 deviceFormat = SND_PCM_FORMAT_S32;
\r
5606 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5607 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5611 deviceFormat = SND_PCM_FORMAT_S24;
\r
5612 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5613 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5617 deviceFormat = SND_PCM_FORMAT_S16;
\r
5618 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5619 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5623 deviceFormat = SND_PCM_FORMAT_S8;
\r
5624 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5625 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5629 // If we get here, no supported format was found.
\r
5630 snd_pcm_close( phandle );
\r
5631 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5632 errorText_ = errorStream_.str();
\r
5636 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5637 if ( result < 0 ) {
\r
5638 snd_pcm_close( phandle );
\r
5639 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5640 errorText_ = errorStream_.str();
\r
5644 // Determine whether byte-swaping is necessary.
\r
5645 stream_.doByteSwap[mode] = false;
\r
5646 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5647 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5648 if ( result == 0 )
\r
5649 stream_.doByteSwap[mode] = true;
\r
5650 else if (result < 0) {
\r
5651 snd_pcm_close( phandle );
\r
5652 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5653 errorText_ = errorStream_.str();
\r
5658 // Set the sample rate.
\r
5659 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5660 if ( result < 0 ) {
\r
5661 snd_pcm_close( phandle );
\r
5662 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5663 errorText_ = errorStream_.str();
\r
5667 // Determine the number of channels for this device. We support a possible
\r
5668 // minimum device channel number > than the value requested by the user.
\r
5669 stream_.nUserChannels[mode] = channels;
\r
5670 unsigned int value;
\r
5671 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5672 unsigned int deviceChannels = value;
\r
5673 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5674 snd_pcm_close( phandle );
\r
5675 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5676 errorText_ = errorStream_.str();
\r
5680 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5681 if ( result < 0 ) {
\r
5682 snd_pcm_close( phandle );
\r
5683 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5684 errorText_ = errorStream_.str();
\r
5687 deviceChannels = value;
\r
5688 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5689 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5691 // Set the device channels.
\r
5692 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5693 if ( result < 0 ) {
\r
5694 snd_pcm_close( phandle );
\r
5695 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5696 errorText_ = errorStream_.str();
\r
5700 // Set the buffer (or period) size.
\r
5702 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5703 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5704 if ( result < 0 ) {
\r
5705 snd_pcm_close( phandle );
\r
5706 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5707 errorText_ = errorStream_.str();
\r
5710 *bufferSize = periodSize;
\r
5712 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5713 unsigned int periods = 0;
\r
5714 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5715 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5716 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5717 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5718 if ( result < 0 ) {
\r
5719 snd_pcm_close( phandle );
\r
5720 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5721 errorText_ = errorStream_.str();
\r
5725 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5726 // MUST be the same in both directions!
\r
5727 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5728 snd_pcm_close( phandle );
\r
5729 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5730 errorText_ = errorStream_.str();
\r
5734 stream_.bufferSize = *bufferSize;
\r
5736 // Install the hardware configuration
\r
5737 result = snd_pcm_hw_params( phandle, hw_params );
\r
5738 if ( result < 0 ) {
\r
5739 snd_pcm_close( phandle );
\r
5740 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5741 errorText_ = errorStream_.str();
\r
5745 #if defined(__RTAUDIO_DEBUG__)
\r
5746 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5747 snd_pcm_hw_params_dump( hw_params, out );
\r
5750 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5751 snd_pcm_sw_params_t *sw_params = NULL;
\r
5752 snd_pcm_sw_params_alloca( &sw_params );
\r
5753 snd_pcm_sw_params_current( phandle, sw_params );
\r
5754 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5755 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5756 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5758 // The following two settings were suggested by Theo Veenker
\r
5759 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5760 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5762 // here are two options for a fix
\r
5763 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5764 snd_pcm_uframes_t val;
\r
5765 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5766 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5768 result = snd_pcm_sw_params( phandle, sw_params );
\r
5769 if ( result < 0 ) {
\r
5770 snd_pcm_close( phandle );
\r
5771 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5772 errorText_ = errorStream_.str();
\r
5776 #if defined(__RTAUDIO_DEBUG__)
\r
5777 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5778 snd_pcm_sw_params_dump( sw_params, out );
\r
5781 // Set flags for buffer conversion
\r
5782 stream_.doConvertBuffer[mode] = false;
\r
5783 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5784 stream_.doConvertBuffer[mode] = true;
\r
5785 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5786 stream_.doConvertBuffer[mode] = true;
\r
5787 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5788 stream_.nUserChannels[mode] > 1 )
\r
5789 stream_.doConvertBuffer[mode] = true;
\r
5791 // Allocate the ApiHandle if necessary and then save.
\r
5792 AlsaHandle *apiInfo = 0;
\r
5793 if ( stream_.apiHandle == 0 ) {
\r
5795 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5797 catch ( std::bad_alloc& ) {
\r
5798 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5802 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5803 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5807 stream_.apiHandle = (void *) apiInfo;
\r
5808 apiInfo->handles[0] = 0;
\r
5809 apiInfo->handles[1] = 0;
\r
5812 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5814 apiInfo->handles[mode] = phandle;
\r
5817 // Allocate necessary internal buffers.
\r
5818 unsigned long bufferBytes;
\r
5819 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5820 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5821 if ( stream_.userBuffer[mode] == NULL ) {
\r
5822 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5826 if ( stream_.doConvertBuffer[mode] ) {
\r
5828 bool makeBuffer = true;
\r
5829 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5830 if ( mode == INPUT ) {
\r
5831 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5832 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5833 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5837 if ( makeBuffer ) {
\r
5838 bufferBytes *= *bufferSize;
\r
5839 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5840 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5841 if ( stream_.deviceBuffer == NULL ) {
\r
5842 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5848 stream_.sampleRate = sampleRate;
\r
5849 stream_.nBuffers = periods;
\r
5850 stream_.device[mode] = device;
\r
5851 stream_.state = STREAM_STOPPED;
\r
5853 // Setup the buffer conversion information structure.
\r
5854 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5856 // Setup thread if necessary.
\r
5857 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5858 // We had already set up an output stream.
\r
5859 stream_.mode = DUPLEX;
\r
5860 // Link the streams if possible.
\r
5861 apiInfo->synchronized = false;
\r
5862 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5863 apiInfo->synchronized = true;
\r
5865 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5866 error( RtError::WARNING );
\r
5870 stream_.mode = mode;
\r
5872 // Setup callback thread.
\r
5873 stream_.callbackInfo.object = (void *) this;
\r
5875 // Set the thread attributes for joinable and realtime scheduling
\r
5876 // priority (optional). The higher priority will only take affect
\r
5877 // if the program is run as root or suid. Note, under Linux
\r
5878 // processes with CAP_SYS_NICE privilege, a user can change
\r
5879 // scheduling policy and priority (thus need not be root). See
\r
5880 // POSIX "capabilities".
\r
5881 pthread_attr_t attr;
\r
5882 pthread_attr_init( &attr );
\r
5883 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5884 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5885 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5886 struct sched_param param;
\r
5887 int priority = options->priority;
\r
5888 int min = sched_get_priority_min( SCHED_RR );
\r
5889 int max = sched_get_priority_max( SCHED_RR );
\r
5890 if ( priority < min ) priority = min;
\r
5891 else if ( priority > max ) priority = max;
\r
5892 param.sched_priority = priority;
\r
5893 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5894 pthread_attr_setschedparam( &attr, ¶m );
\r
5897 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5899 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5902 stream_.callbackInfo.isRunning = true;
\r
5903 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5904 pthread_attr_destroy( &attr );
\r
5906 stream_.callbackInfo.isRunning = false;
\r
5907 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5916 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5917 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5918 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5920 stream_.apiHandle = 0;
\r
5923 if ( phandle) snd_pcm_close( phandle );
\r
5925 for ( int i=0; i<2; i++ ) {
\r
5926 if ( stream_.userBuffer[i] ) {
\r
5927 free( stream_.userBuffer[i] );
\r
5928 stream_.userBuffer[i] = 0;
\r
5932 if ( stream_.deviceBuffer ) {
\r
5933 free( stream_.deviceBuffer );
\r
5934 stream_.deviceBuffer = 0;
\r
5940 void RtApiAlsa :: closeStream()
\r
5942 if ( stream_.state == STREAM_CLOSED ) {
\r
5943 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5944 error( RtError::WARNING );
\r
5948 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5949 stream_.callbackInfo.isRunning = false;
\r
5950 MUTEX_LOCK( &stream_.mutex );
\r
5951 if ( stream_.state == STREAM_STOPPED ) {
\r
5952 apiInfo->runnable = true;
\r
5953 pthread_cond_signal( &apiInfo->runnable_cv );
\r
5955 MUTEX_UNLOCK( &stream_.mutex );
\r
5956 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5958 if ( stream_.state == STREAM_RUNNING ) {
\r
5959 stream_.state = STREAM_STOPPED;
\r
5960 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
5961 snd_pcm_drop( apiInfo->handles[0] );
\r
5962 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
5963 snd_pcm_drop( apiInfo->handles[1] );
\r
5967 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5968 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5969 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5971 stream_.apiHandle = 0;
\r
5974 for ( int i=0; i<2; i++ ) {
\r
5975 if ( stream_.userBuffer[i] ) {
\r
5976 free( stream_.userBuffer[i] );
\r
5977 stream_.userBuffer[i] = 0;
\r
5981 if ( stream_.deviceBuffer ) {
\r
5982 free( stream_.deviceBuffer );
\r
5983 stream_.deviceBuffer = 0;
\r
5986 stream_.mode = UNINITIALIZED;
\r
5987 stream_.state = STREAM_CLOSED;
\r
5990 void RtApiAlsa :: startStream()
\r
5992 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
5995 if ( stream_.state == STREAM_RUNNING ) {
\r
5996 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
5997 error( RtError::WARNING );
\r
6001 MUTEX_LOCK( &stream_.mutex );
\r
6004 snd_pcm_state_t state;
\r
6005 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6006 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6007 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6008 state = snd_pcm_state( handle[0] );
\r
6009 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6010 result = snd_pcm_prepare( handle[0] );
\r
6011 if ( result < 0 ) {
\r
6012 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6013 errorText_ = errorStream_.str();
\r
6019 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6020 state = snd_pcm_state( handle[1] );
\r
6021 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6022 result = snd_pcm_prepare( handle[1] );
\r
6023 if ( result < 0 ) {
\r
6024 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6025 errorText_ = errorStream_.str();
\r
6031 stream_.state = STREAM_RUNNING;
\r
6034 apiInfo->runnable = true;
\r
6035 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6036 MUTEX_UNLOCK( &stream_.mutex );
\r
6038 if ( result >= 0 ) return;
\r
6039 error( RtError::SYSTEM_ERROR );
\r
6042 void RtApiAlsa :: stopStream()
\r
6045 if ( stream_.state == STREAM_STOPPED ) {
\r
6046 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6047 error( RtError::WARNING );
\r
6051 stream_.state = STREAM_STOPPED;
\r
6052 MUTEX_LOCK( &stream_.mutex );
\r
6055 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6056 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6057 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6058 if ( apiInfo->synchronized )
\r
6059 result = snd_pcm_drop( handle[0] );
\r
6061 result = snd_pcm_drain( handle[0] );
\r
6062 if ( result < 0 ) {
\r
6063 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6064 errorText_ = errorStream_.str();
\r
6069 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6070 result = snd_pcm_drop( handle[1] );
\r
6071 if ( result < 0 ) {
\r
6072 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6073 errorText_ = errorStream_.str();
\r
6079 MUTEX_UNLOCK( &stream_.mutex );
\r
6081 if ( result >= 0 ) return;
\r
6082 error( RtError::SYSTEM_ERROR );
\r
6085 void RtApiAlsa :: abortStream()
\r
6088 if ( stream_.state == STREAM_STOPPED ) {
\r
6089 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6090 error( RtError::WARNING );
\r
6094 stream_.state = STREAM_STOPPED;
\r
6095 MUTEX_LOCK( &stream_.mutex );
\r
6098 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6099 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6100 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6101 result = snd_pcm_drop( handle[0] );
\r
6102 if ( result < 0 ) {
\r
6103 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6104 errorText_ = errorStream_.str();
\r
6109 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6110 result = snd_pcm_drop( handle[1] );
\r
6111 if ( result < 0 ) {
\r
6112 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6113 errorText_ = errorStream_.str();
\r
6119 MUTEX_UNLOCK( &stream_.mutex );
\r
6121 if ( result >= 0 ) return;
\r
6122 error( RtError::SYSTEM_ERROR );
\r
6125 void RtApiAlsa :: callbackEvent()
\r
6127 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6128 if ( stream_.state == STREAM_STOPPED ) {
\r
6129 MUTEX_LOCK( &stream_.mutex );
\r
6130 while ( !apiInfo->runnable )
\r
6131 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6133 if ( stream_.state != STREAM_RUNNING ) {
\r
6134 MUTEX_UNLOCK( &stream_.mutex );
\r
6137 MUTEX_UNLOCK( &stream_.mutex );
\r
6140 if ( stream_.state == STREAM_CLOSED ) {
\r
6141 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6142 error( RtError::WARNING );
\r
6146 int doStopStream = 0;
\r
6147 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6148 double streamTime = getStreamTime();
\r
6149 RtAudioStreamStatus status = 0;
\r
6150 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6151 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6152 apiInfo->xrun[0] = false;
\r
6154 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6155 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6156 apiInfo->xrun[1] = false;
\r
6158 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6159 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6161 if ( doStopStream == 2 ) {
\r
6166 MUTEX_LOCK( &stream_.mutex );
\r
6168 // The state might change while waiting on a mutex.
\r
6169 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6174 snd_pcm_t **handle;
\r
6175 snd_pcm_sframes_t frames;
\r
6176 RtAudioFormat format;
\r
6177 handle = (snd_pcm_t **) apiInfo->handles;
\r
6179 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6181 // Setup parameters.
\r
6182 if ( stream_.doConvertBuffer[1] ) {
\r
6183 buffer = stream_.deviceBuffer;
\r
6184 channels = stream_.nDeviceChannels[1];
\r
6185 format = stream_.deviceFormat[1];
\r
6188 buffer = stream_.userBuffer[1];
\r
6189 channels = stream_.nUserChannels[1];
\r
6190 format = stream_.userFormat;
\r
6193 // Read samples from device in interleaved/non-interleaved format.
\r
6194 if ( stream_.deviceInterleaved[1] )
\r
6195 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6197 void *bufs[channels];
\r
6198 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6199 for ( int i=0; i<channels; i++ )
\r
6200 bufs[i] = (void *) (buffer + (i * offset));
\r
6201 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6204 if ( result < (int) stream_.bufferSize ) {
\r
6205 // Either an error or overrun occured.
\r
6206 if ( result == -EPIPE ) {
\r
6207 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6208 if ( state == SND_PCM_STATE_XRUN ) {
\r
6209 apiInfo->xrun[1] = true;
\r
6210 result = snd_pcm_prepare( handle[1] );
\r
6211 if ( result < 0 ) {
\r
6212 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6213 errorText_ = errorStream_.str();
\r
6217 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6218 errorText_ = errorStream_.str();
\r
6222 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6223 errorText_ = errorStream_.str();
\r
6225 error( RtError::WARNING );
\r
6229 // Do byte swapping if necessary.
\r
6230 if ( stream_.doByteSwap[1] )
\r
6231 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6233 // Do buffer conversion if necessary.
\r
6234 if ( stream_.doConvertBuffer[1] )
\r
6235 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6237 // Check stream latency
\r
6238 result = snd_pcm_delay( handle[1], &frames );
\r
6239 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6244 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6246 // Setup parameters and do buffer conversion if necessary.
\r
6247 if ( stream_.doConvertBuffer[0] ) {
\r
6248 buffer = stream_.deviceBuffer;
\r
6249 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6250 channels = stream_.nDeviceChannels[0];
\r
6251 format = stream_.deviceFormat[0];
\r
6254 buffer = stream_.userBuffer[0];
\r
6255 channels = stream_.nUserChannels[0];
\r
6256 format = stream_.userFormat;
\r
6259 // Do byte swapping if necessary.
\r
6260 if ( stream_.doByteSwap[0] )
\r
6261 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6263 // Write samples to device in interleaved/non-interleaved format.
\r
6264 if ( stream_.deviceInterleaved[0] )
\r
6265 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6267 void *bufs[channels];
\r
6268 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6269 for ( int i=0; i<channels; i++ )
\r
6270 bufs[i] = (void *) (buffer + (i * offset));
\r
6271 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6274 if ( result < (int) stream_.bufferSize ) {
\r
6275 // Either an error or underrun occured.
\r
6276 if ( result == -EPIPE ) {
\r
6277 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6278 if ( state == SND_PCM_STATE_XRUN ) {
\r
6279 apiInfo->xrun[0] = true;
\r
6280 result = snd_pcm_prepare( handle[0] );
\r
6281 if ( result < 0 ) {
\r
6282 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6283 errorText_ = errorStream_.str();
\r
6287 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6288 errorText_ = errorStream_.str();
\r
6292 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6293 errorText_ = errorStream_.str();
\r
6295 error( RtError::WARNING );
\r
6299 // Check stream latency
\r
6300 result = snd_pcm_delay( handle[0], &frames );
\r
6301 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6305 MUTEX_UNLOCK( &stream_.mutex );
\r
6307 RtApi::tickStreamTime();
\r
6308 if ( doStopStream == 1 ) this->stopStream();
\r
6311 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6313 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6314 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6315 bool *isRunning = &info->isRunning;
\r
6317 while ( *isRunning == true ) {
\r
6318 pthread_testcancel();
\r
6319 object->callbackEvent();
\r
6322 pthread_exit( NULL );
\r
6325 //******************** End of __LINUX_ALSA__ *********************//
\r
6328 #if defined(__LINUX_PULSE__)
\r
6330 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6331 // and Tristan Matthews.
\r
6333 #include <pulse/error.h>
\r
6334 #include <pulse/simple.h>
\r
6338 const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6339 44100, 48000, 96000, 0}; }
\r
6341 struct rtaudio_pa_format_mapping_t {
\r
6342 RtAudioFormat rtaudio_format;
\r
6343 pa_sample_format_t pa_format;
\r
6346 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6347 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6348 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6349 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6350 {0, PA_SAMPLE_INVALID}};
\r
6352 struct PulseAudioHandle {
\r
6353 pa_simple *s_play;
\r
6356 pthread_cond_t runnable_cv;
\r
6358 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6361 RtApiPulse::~RtApiPulse()
\r
6363 if ( stream_.state != STREAM_CLOSED )
\r
6367 unsigned int RtApiPulse::getDeviceCount( void )
\r
6372 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6374 RtAudio::DeviceInfo info;
\r
6375 info.probed = true;
\r
6376 info.name = "PulseAudio";
\r
6377 info.outputChannels = 2;
\r
6378 info.inputChannels = 2;
\r
6379 info.duplexChannels = 2;
\r
6380 info.isDefaultOutput = true;
\r
6381 info.isDefaultInput = true;
\r
6383 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6384 info.sampleRates.push_back( *sr );
\r
6386 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6391 extern "C" void *pulseaudio_callback( void * user )
\r
6393 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6394 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6395 volatile bool *isRunning = &cbi->isRunning;
\r
6397 while ( *isRunning ) {
\r
6398 pthread_testcancel();
\r
6399 context->callbackEvent();
\r
6402 pthread_exit( NULL );
\r
6405 void RtApiPulse::closeStream( void )
\r
6407 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6409 stream_.callbackInfo.isRunning = false;
\r
6411 MUTEX_LOCK( &stream_.mutex );
\r
6412 if ( stream_.state == STREAM_STOPPED ) {
\r
6413 pah->runnable = true;
\r
6414 pthread_cond_signal( &pah->runnable_cv );
\r
6416 MUTEX_UNLOCK( &stream_.mutex );
\r
6418 pthread_join( pah->thread, 0 );
\r
6419 if ( pah->s_play ) {
\r
6420 pa_simple_flush( pah->s_play, NULL );
\r
6421 pa_simple_free( pah->s_play );
\r
6424 pa_simple_free( pah->s_rec );
\r
6426 pthread_cond_destroy( &pah->runnable_cv );
\r
6428 stream_.apiHandle = 0;
\r
6431 if ( stream_.userBuffer[0] ) {
\r
6432 free( stream_.userBuffer[0] );
\r
6433 stream_.userBuffer[0] = 0;
\r
6435 if ( stream_.userBuffer[1] ) {
\r
6436 free( stream_.userBuffer[1] );
\r
6437 stream_.userBuffer[1] = 0;
\r
6440 stream_.state = STREAM_CLOSED;
\r
6441 stream_.mode = UNINITIALIZED;
\r
6444 void RtApiPulse::callbackEvent( void )
\r
6446 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6448 if ( stream_.state == STREAM_STOPPED ) {
\r
6449 MUTEX_LOCK( &stream_.mutex );
\r
6450 while ( !pah->runnable )
\r
6451 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6453 if ( stream_.state != STREAM_RUNNING ) {
\r
6454 MUTEX_UNLOCK( &stream_.mutex );
\r
6457 MUTEX_UNLOCK( &stream_.mutex );
\r
6460 if ( stream_.state == STREAM_CLOSED ) {
\r
6461 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6462 "this shouldn't happen!";
\r
6463 error( RtError::WARNING );
\r
6467 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6468 double streamTime = getStreamTime();
\r
6469 RtAudioStreamStatus status = 0;
\r
6470 int doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6471 stream_.bufferSize, streamTime, status,
\r
6472 stream_.callbackInfo.userData );
\r
6474 if ( doStopStream == 2 ) {
\r
6479 MUTEX_LOCK( &stream_.mutex );
\r
6481 if ( stream_.state != STREAM_RUNNING )
\r
6486 switch ( stream_.mode ) {
\r
6488 bytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6489 if ( pa_simple_read( pah->s_rec, stream_.userBuffer[1], bytes, &pa_error ) < 0 ) {
\r
6490 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6491 pa_strerror( pa_error ) << ".";
\r
6492 errorText_ = errorStream_.str();
\r
6493 error( RtError::WARNING );
\r
6497 bytes = stream_.nUserChannels[0] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6498 if ( pa_simple_write( pah->s_play, stream_.userBuffer[0], bytes, &pa_error ) < 0 ) {
\r
6499 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6500 pa_strerror( pa_error ) << ".";
\r
6501 errorText_ = errorStream_.str();
\r
6502 error( RtError::WARNING );
\r
6506 bytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6507 if ( pa_simple_read( pah->s_rec, stream_.userBuffer[1], bytes, &pa_error ) < 0 ) {
\r
6508 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6509 pa_strerror( pa_error ) << ".";
\r
6510 errorText_ = errorStream_.str();
\r
6511 error( RtError::WARNING );
\r
6513 bytes = stream_.nUserChannels[0] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6514 if ( pa_simple_write( pah->s_play, stream_.userBuffer[0], bytes, &pa_error ) < 0) {
\r
6515 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6516 pa_strerror( pa_error ) << ".";
\r
6517 errorText_ = errorStream_.str();
\r
6518 error( RtError::WARNING );
\r
6527 MUTEX_UNLOCK( &stream_.mutex );
\r
6528 RtApi::tickStreamTime();
\r
6530 if ( doStopStream == 1 )
\r
6534 void RtApiPulse::startStream( void )
\r
6536 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6538 if ( stream_.state == STREAM_CLOSED ) {
\r
6539 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6540 error( RtError::INVALID_USE );
\r
6543 if ( stream_.state == STREAM_RUNNING ) {
\r
6544 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6545 error( RtError::WARNING );
\r
6549 MUTEX_LOCK( &stream_.mutex );
\r
6551 stream_.state = STREAM_RUNNING;
\r
6553 pah->runnable = true;
\r
6554 pthread_cond_signal( &pah->runnable_cv );
\r
6555 MUTEX_UNLOCK( &stream_.mutex );
\r
6558 void RtApiPulse::stopStream( void )
\r
6560 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6562 if ( stream_.state == STREAM_CLOSED ) {
\r
6563 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6564 error( RtError::INVALID_USE );
\r
6567 if ( stream_.state == STREAM_STOPPED ) {
\r
6568 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6569 error( RtError::WARNING );
\r
6573 stream_.state = STREAM_STOPPED;
\r
6574 MUTEX_LOCK( &stream_.mutex );
\r
6576 if ( pah && pah->s_play ) {
\r
6578 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6579 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6580 pa_strerror( pa_error ) << ".";
\r
6581 errorText_ = errorStream_.str();
\r
6582 MUTEX_UNLOCK( &stream_.mutex );
\r
6583 error( RtError::SYSTEM_ERROR );
\r
6587 stream_.state = STREAM_STOPPED;
\r
6588 MUTEX_UNLOCK( &stream_.mutex );
\r
6591 void RtApiPulse::abortStream( void )
\r
6593 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6595 if ( stream_.state == STREAM_CLOSED ) {
\r
6596 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6597 error( RtError::INVALID_USE );
\r
6600 if ( stream_.state == STREAM_STOPPED ) {
\r
6601 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6602 error( RtError::WARNING );
\r
6606 stream_.state = STREAM_STOPPED;
\r
6607 MUTEX_LOCK( &stream_.mutex );
\r
6609 if ( pah && pah->s_play ) {
\r
6611 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6612 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6613 pa_strerror( pa_error ) << ".";
\r
6614 errorText_ = errorStream_.str();
\r
6615 MUTEX_UNLOCK( &stream_.mutex );
\r
6616 error( RtError::SYSTEM_ERROR );
\r
6620 stream_.state = STREAM_STOPPED;
\r
6621 MUTEX_UNLOCK( &stream_.mutex );
\r
6624 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6625 unsigned int channels, unsigned int firstChannel,
\r
6626 unsigned int sampleRate, RtAudioFormat format,
\r
6627 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6629 PulseAudioHandle *pah = 0;
\r
6630 unsigned long bufferBytes = 0;
\r
6631 pa_sample_spec ss;
\r
6633 if ( device != 0 ) return false;
\r
6634 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6635 if ( channels != 1 && channels != 2 ) {
\r
6636 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6639 ss.channels = channels;
\r
6641 if ( firstChannel != 0 ) return false;
\r
6643 bool sr_found = false;
\r
6644 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6645 if ( sampleRate == *sr ) {
\r
6647 stream_.sampleRate = sampleRate;
\r
6648 ss.rate = sampleRate;
\r
6652 if ( !sr_found ) {
\r
6653 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6657 bool sf_found = 0;
\r
6658 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6659 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6660 if ( format == sf->rtaudio_format ) {
\r
6662 stream_.userFormat = sf->rtaudio_format;
\r
6663 ss.format = sf->pa_format;
\r
6667 if ( !sf_found ) {
\r
6668 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6672 if ( options && ( options->flags & RTAUDIO_NONINTERLEAVED ) ) {
\r
6673 errorText_ = "RtApiPulse::probeDeviceOpen: only interleaved audio data supported.";
\r
6677 stream_.userInterleaved = true;
\r
6678 stream_.nBuffers = 1;
\r
6680 stream_.deviceInterleaved[mode] = true;
\r
6681 stream_.doByteSwap[mode] = false;
\r
6682 stream_.doConvertBuffer[mode] = false;
\r
6683 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6684 stream_.nUserChannels[mode] = channels;
\r
6685 stream_.nDeviceChannels[mode] = channels;
\r
6686 stream_.channelOffset[mode] = 0;
\r
6688 // Allocate necessary internal buffers.
\r
6689 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6690 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6691 if ( stream_.userBuffer[mode] == NULL ) {
\r
6692 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6695 stream_.bufferSize = *bufferSize;
\r
6697 if ( !stream_.apiHandle ) {
\r
6698 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6700 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6704 stream_.apiHandle = pah;
\r
6705 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6706 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6710 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6715 pah->s_rec = pa_simple_new( NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error );
\r
6716 if ( !pah->s_rec ) {
\r
6717 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6722 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6723 if ( !pah->s_play ) {
\r
6724 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6732 if ( stream_.mode == UNINITIALIZED )
\r
6733 stream_.mode = mode;
\r
6734 else if ( stream_.mode == mode )
\r
6737 stream_.mode = DUPLEX;
\r
6739 stream_.state = STREAM_STOPPED;
\r
6741 if ( !stream_.callbackInfo.isRunning ) {
\r
6742 stream_.callbackInfo.object = this;
\r
6743 stream_.callbackInfo.isRunning = true;
\r
6744 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6745 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6756 //******************** End of __LINUX_PULSE__ *********************//
\r
6759 #if defined(__LINUX_OSS__)
\r
6761 #include <unistd.h>
\r
6762 #include <sys/ioctl.h>
\r
6763 #include <unistd.h>
\r
6764 #include <fcntl.h>
\r
6765 #include "soundcard.h"
\r
6766 #include <errno.h>
\r
6769 extern "C" void *ossCallbackHandler(void * ptr);
\r
6771 // A structure to hold various information related to the OSS API
\r
6772 // implementation.
\r
6773 struct OssHandle {
\r
6774 int id[2]; // device ids
\r
6777 pthread_cond_t runnable;
\r
6780 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6783 RtApiOss :: RtApiOss()
\r
6785 // Nothing to do here.
\r
6788 RtApiOss :: ~RtApiOss()
\r
6790 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6793 unsigned int RtApiOss :: getDeviceCount( void )
\r
6795 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6796 if ( mixerfd == -1 ) {
\r
6797 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6798 error( RtError::WARNING );
\r
6802 oss_sysinfo sysinfo;
\r
6803 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6805 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6806 error( RtError::WARNING );
\r
6811 return sysinfo.numaudios;
\r
6814 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6816 RtAudio::DeviceInfo info;
\r
6817 info.probed = false;
\r
6819 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6820 if ( mixerfd == -1 ) {
\r
6821 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6822 error( RtError::WARNING );
\r
6826 oss_sysinfo sysinfo;
\r
6827 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6828 if ( result == -1 ) {
\r
6830 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6831 error( RtError::WARNING );
\r
6835 unsigned nDevices = sysinfo.numaudios;
\r
6836 if ( nDevices == 0 ) {
\r
6838 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6839 error( RtError::INVALID_USE );
\r
6842 if ( device >= nDevices ) {
\r
6844 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6845 error( RtError::INVALID_USE );
\r
6848 oss_audioinfo ainfo;
\r
6849 ainfo.dev = device;
\r
6850 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6852 if ( result == -1 ) {
\r
6853 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6854 errorText_ = errorStream_.str();
\r
6855 error( RtError::WARNING );
\r
6860 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6861 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6862 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6863 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6864 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6867 // Probe data formats ... do for input
\r
6868 unsigned long mask = ainfo.iformats;
\r
6869 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6870 info.nativeFormats |= RTAUDIO_SINT16;
\r
6871 if ( mask & AFMT_S8 )
\r
6872 info.nativeFormats |= RTAUDIO_SINT8;
\r
6873 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6874 info.nativeFormats |= RTAUDIO_SINT32;
\r
6875 if ( mask & AFMT_FLOAT )
\r
6876 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6877 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6878 info.nativeFormats |= RTAUDIO_SINT24;
\r
6880 // Check that we have at least one supported format
\r
6881 if ( info.nativeFormats == 0 ) {
\r
6882 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6883 errorText_ = errorStream_.str();
\r
6884 error( RtError::WARNING );
\r
6888 // Probe the supported sample rates.
\r
6889 info.sampleRates.clear();
\r
6890 if ( ainfo.nrates ) {
\r
6891 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6892 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6893 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6894 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6901 // Check min and max rate values;
\r
6902 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6903 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6904 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6908 if ( info.sampleRates.size() == 0 ) {
\r
6909 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6910 errorText_ = errorStream_.str();
\r
6911 error( RtError::WARNING );
\r
6914 info.probed = true;
\r
6915 info.name = ainfo.name;
\r
6922 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6923 unsigned int firstChannel, unsigned int sampleRate,
\r
6924 RtAudioFormat format, unsigned int *bufferSize,
\r
6925 RtAudio::StreamOptions *options )
\r
6927 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6928 if ( mixerfd == -1 ) {
\r
6929 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6933 oss_sysinfo sysinfo;
\r
6934 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6935 if ( result == -1 ) {
\r
6937 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6941 unsigned nDevices = sysinfo.numaudios;
\r
6942 if ( nDevices == 0 ) {
\r
6943 // This should not happen because a check is made before this function is called.
\r
6945 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6949 if ( device >= nDevices ) {
\r
6950 // This should not happen because a check is made before this function is called.
\r
6952 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6956 oss_audioinfo ainfo;
\r
6957 ainfo.dev = device;
\r
6958 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6960 if ( result == -1 ) {
\r
6961 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6962 errorText_ = errorStream_.str();
\r
6966 // Check if device supports input or output
\r
6967 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6968 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6969 if ( mode == OUTPUT )
\r
6970 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6972 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6973 errorText_ = errorStream_.str();
\r
6978 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6979 if ( mode == OUTPUT )
\r
6980 flags |= O_WRONLY;
\r
6981 else { // mode == INPUT
\r
6982 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6983 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6984 close( handle->id[0] );
\r
6985 handle->id[0] = 0;
\r
6986 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6987 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6988 errorText_ = errorStream_.str();
\r
6991 // Check that the number previously set channels is the same.
\r
6992 if ( stream_.nUserChannels[0] != channels ) {
\r
6993 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6994 errorText_ = errorStream_.str();
\r
7000 flags |= O_RDONLY;
\r
7003 // Set exclusive access if specified.
\r
7004 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7006 // Try to open the device.
\r
7008 fd = open( ainfo.devnode, flags, 0 );
\r
7010 if ( errno == EBUSY )
\r
7011 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7013 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7014 errorText_ = errorStream_.str();
\r
7018 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7020 if ( flags | O_RDWR ) {
\r
7021 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7022 if ( result == -1) {
\r
7023 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7024 errorText_ = errorStream_.str();
\r
7030 // Check the device channel support.
\r
7031 stream_.nUserChannels[mode] = channels;
\r
7032 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7034 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7035 errorText_ = errorStream_.str();
\r
7039 // Set the number of channels.
\r
7040 int deviceChannels = channels + firstChannel;
\r
7041 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7042 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7044 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7045 errorText_ = errorStream_.str();
\r
7048 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7050 // Get the data format mask
\r
7052 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7053 if ( result == -1 ) {
\r
7055 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7056 errorText_ = errorStream_.str();
\r
7060 // Determine how to set the device format.
\r
7061 stream_.userFormat = format;
\r
7062 int deviceFormat = -1;
\r
7063 stream_.doByteSwap[mode] = false;
\r
7064 if ( format == RTAUDIO_SINT8 ) {
\r
7065 if ( mask & AFMT_S8 ) {
\r
7066 deviceFormat = AFMT_S8;
\r
7067 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7070 else if ( format == RTAUDIO_SINT16 ) {
\r
7071 if ( mask & AFMT_S16_NE ) {
\r
7072 deviceFormat = AFMT_S16_NE;
\r
7073 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7075 else if ( mask & AFMT_S16_OE ) {
\r
7076 deviceFormat = AFMT_S16_OE;
\r
7077 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7078 stream_.doByteSwap[mode] = true;
\r
7081 else if ( format == RTAUDIO_SINT24 ) {
\r
7082 if ( mask & AFMT_S24_NE ) {
\r
7083 deviceFormat = AFMT_S24_NE;
\r
7084 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7086 else if ( mask & AFMT_S24_OE ) {
\r
7087 deviceFormat = AFMT_S24_OE;
\r
7088 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7089 stream_.doByteSwap[mode] = true;
\r
7092 else if ( format == RTAUDIO_SINT32 ) {
\r
7093 if ( mask & AFMT_S32_NE ) {
\r
7094 deviceFormat = AFMT_S32_NE;
\r
7095 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7097 else if ( mask & AFMT_S32_OE ) {
\r
7098 deviceFormat = AFMT_S32_OE;
\r
7099 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7100 stream_.doByteSwap[mode] = true;
\r
7104 if ( deviceFormat == -1 ) {
\r
7105 // The user requested format is not natively supported by the device.
\r
7106 if ( mask & AFMT_S16_NE ) {
\r
7107 deviceFormat = AFMT_S16_NE;
\r
7108 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7110 else if ( mask & AFMT_S32_NE ) {
\r
7111 deviceFormat = AFMT_S32_NE;
\r
7112 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7114 else if ( mask & AFMT_S24_NE ) {
\r
7115 deviceFormat = AFMT_S24_NE;
\r
7116 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7118 else if ( mask & AFMT_S16_OE ) {
\r
7119 deviceFormat = AFMT_S16_OE;
\r
7120 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7121 stream_.doByteSwap[mode] = true;
\r
7123 else if ( mask & AFMT_S32_OE ) {
\r
7124 deviceFormat = AFMT_S32_OE;
\r
7125 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7126 stream_.doByteSwap[mode] = true;
\r
7128 else if ( mask & AFMT_S24_OE ) {
\r
7129 deviceFormat = AFMT_S24_OE;
\r
7130 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7131 stream_.doByteSwap[mode] = true;
\r
7133 else if ( mask & AFMT_S8) {
\r
7134 deviceFormat = AFMT_S8;
\r
7135 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7139 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7140 // This really shouldn't happen ...
\r
7142 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7143 errorText_ = errorStream_.str();
\r
7147 // Set the data format.
\r
7148 int temp = deviceFormat;
\r
7149 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7150 if ( result == -1 || deviceFormat != temp ) {
\r
7152 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7153 errorText_ = errorStream_.str();
\r
7157 // Attempt to set the buffer size. According to OSS, the minimum
\r
7158 // number of buffers is two. The supposed minimum buffer size is 16
\r
7159 // bytes, so that will be our lower bound. The argument to this
\r
7160 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7161 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7162 // We'll check the actual value used near the end of the setup
\r
7164 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7165 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7167 if ( options ) buffers = options->numberOfBuffers;
\r
7168 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7169 if ( buffers < 2 ) buffers = 3;
\r
7170 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7171 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7172 if ( result == -1 ) {
\r
7174 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7175 errorText_ = errorStream_.str();
\r
7178 stream_.nBuffers = buffers;
\r
7180 // Save buffer size (in sample frames).
\r
7181 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7182 stream_.bufferSize = *bufferSize;
\r
7184 // Set the sample rate.
\r
7185 int srate = sampleRate;
\r
7186 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7187 if ( result == -1 ) {
\r
7189 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7190 errorText_ = errorStream_.str();
\r
7194 // Verify the sample rate setup worked.
\r
7195 if ( abs( srate - sampleRate ) > 100 ) {
\r
7197 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7198 errorText_ = errorStream_.str();
\r
7201 stream_.sampleRate = sampleRate;
\r
7203 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7204 // We're doing duplex setup here.
\r
7205 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7206 stream_.nDeviceChannels[0] = deviceChannels;
\r
7209 // Set interleaving parameters.
\r
7210 stream_.userInterleaved = true;
\r
7211 stream_.deviceInterleaved[mode] = true;
\r
7212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7213 stream_.userInterleaved = false;
\r
7215 // Set flags for buffer conversion
\r
7216 stream_.doConvertBuffer[mode] = false;
\r
7217 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7218 stream_.doConvertBuffer[mode] = true;
\r
7219 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7220 stream_.doConvertBuffer[mode] = true;
\r
7221 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7222 stream_.nUserChannels[mode] > 1 )
\r
7223 stream_.doConvertBuffer[mode] = true;
\r
7225 // Allocate the stream handles if necessary and then save.
\r
7226 if ( stream_.apiHandle == 0 ) {
\r
7228 handle = new OssHandle;
\r
7230 catch ( std::bad_alloc& ) {
\r
7231 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7235 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7236 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7240 stream_.apiHandle = (void *) handle;
\r
7243 handle = (OssHandle *) stream_.apiHandle;
\r
7245 handle->id[mode] = fd;
\r
7247 // Allocate necessary internal buffers.
\r
7248 unsigned long bufferBytes;
\r
7249 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7250 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7251 if ( stream_.userBuffer[mode] == NULL ) {
\r
7252 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7256 if ( stream_.doConvertBuffer[mode] ) {
\r
7258 bool makeBuffer = true;
\r
7259 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7260 if ( mode == INPUT ) {
\r
7261 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7262 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7263 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7267 if ( makeBuffer ) {
\r
7268 bufferBytes *= *bufferSize;
\r
7269 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7270 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7271 if ( stream_.deviceBuffer == NULL ) {
\r
7272 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7278 stream_.device[mode] = device;
\r
7279 stream_.state = STREAM_STOPPED;
\r
7281 // Setup the buffer conversion information structure.
\r
7282 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7284 // Setup thread if necessary.
\r
7285 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7286 // We had already set up an output stream.
\r
7287 stream_.mode = DUPLEX;
\r
7288 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7291 stream_.mode = mode;
\r
7293 // Setup callback thread.
\r
7294 stream_.callbackInfo.object = (void *) this;
\r
7296 // Set the thread attributes for joinable and realtime scheduling
\r
7297 // priority. The higher priority will only take affect if the
\r
7298 // program is run as root or suid.
\r
7299 pthread_attr_t attr;
\r
7300 pthread_attr_init( &attr );
\r
7301 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7302 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7303 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7304 struct sched_param param;
\r
7305 int priority = options->priority;
\r
7306 int min = sched_get_priority_min( SCHED_RR );
\r
7307 int max = sched_get_priority_max( SCHED_RR );
\r
7308 if ( priority < min ) priority = min;
\r
7309 else if ( priority > max ) priority = max;
\r
7310 param.sched_priority = priority;
\r
7311 pthread_attr_setschedparam( &attr, ¶m );
\r
7312 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7315 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7317 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7320 stream_.callbackInfo.isRunning = true;
\r
7321 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7322 pthread_attr_destroy( &attr );
\r
7324 stream_.callbackInfo.isRunning = false;
\r
7325 errorText_ = "RtApiOss::error creating callback thread!";
\r
7334 pthread_cond_destroy( &handle->runnable );
\r
7335 if ( handle->id[0] ) close( handle->id[0] );
\r
7336 if ( handle->id[1] ) close( handle->id[1] );
\r
7338 stream_.apiHandle = 0;
\r
7341 for ( int i=0; i<2; i++ ) {
\r
7342 if ( stream_.userBuffer[i] ) {
\r
7343 free( stream_.userBuffer[i] );
\r
7344 stream_.userBuffer[i] = 0;
\r
7348 if ( stream_.deviceBuffer ) {
\r
7349 free( stream_.deviceBuffer );
\r
7350 stream_.deviceBuffer = 0;
\r
7356 void RtApiOss :: closeStream()
\r
7358 if ( stream_.state == STREAM_CLOSED ) {
\r
7359 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7360 error( RtError::WARNING );
\r
7364 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7365 stream_.callbackInfo.isRunning = false;
\r
7366 MUTEX_LOCK( &stream_.mutex );
\r
7367 if ( stream_.state == STREAM_STOPPED )
\r
7368 pthread_cond_signal( &handle->runnable );
\r
7369 MUTEX_UNLOCK( &stream_.mutex );
\r
7370 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7372 if ( stream_.state == STREAM_RUNNING ) {
\r
7373 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7374 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7376 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7377 stream_.state = STREAM_STOPPED;
\r
7381 pthread_cond_destroy( &handle->runnable );
\r
7382 if ( handle->id[0] ) close( handle->id[0] );
\r
7383 if ( handle->id[1] ) close( handle->id[1] );
\r
7385 stream_.apiHandle = 0;
\r
7388 for ( int i=0; i<2; i++ ) {
\r
7389 if ( stream_.userBuffer[i] ) {
\r
7390 free( stream_.userBuffer[i] );
\r
7391 stream_.userBuffer[i] = 0;
\r
7395 if ( stream_.deviceBuffer ) {
\r
7396 free( stream_.deviceBuffer );
\r
7397 stream_.deviceBuffer = 0;
\r
7400 stream_.mode = UNINITIALIZED;
\r
7401 stream_.state = STREAM_CLOSED;
\r
7404 void RtApiOss :: startStream()
\r
7407 if ( stream_.state == STREAM_RUNNING ) {
\r
7408 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7409 error( RtError::WARNING );
\r
7413 MUTEX_LOCK( &stream_.mutex );
\r
7415 stream_.state = STREAM_RUNNING;
\r
7417 // No need to do anything else here ... OSS automatically starts
\r
7418 // when fed samples.
\r
7420 MUTEX_UNLOCK( &stream_.mutex );
\r
7422 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7423 pthread_cond_signal( &handle->runnable );
\r
7426 void RtApiOss :: stopStream()
\r
7429 if ( stream_.state == STREAM_STOPPED ) {
\r
7430 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7431 error( RtError::WARNING );
\r
7435 MUTEX_LOCK( &stream_.mutex );
\r
7437 // The state might change while waiting on a mutex.
\r
7438 if ( stream_.state == STREAM_STOPPED ) {
\r
7439 MUTEX_UNLOCK( &stream_.mutex );
\r
7444 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7445 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7447 // Flush the output with zeros a few times.
\r
7450 RtAudioFormat format;
\r
7452 if ( stream_.doConvertBuffer[0] ) {
\r
7453 buffer = stream_.deviceBuffer;
\r
7454 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7455 format = stream_.deviceFormat[0];
\r
7458 buffer = stream_.userBuffer[0];
\r
7459 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7460 format = stream_.userFormat;
\r
7463 memset( buffer, 0, samples * formatBytes(format) );
\r
7464 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7465 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7466 if ( result == -1 ) {
\r
7467 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7468 error( RtError::WARNING );
\r
7472 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7473 if ( result == -1 ) {
\r
7474 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7475 errorText_ = errorStream_.str();
\r
7478 handle->triggered = false;
\r
7481 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7482 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7483 if ( result == -1 ) {
\r
7484 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7485 errorText_ = errorStream_.str();
\r
7491 stream_.state = STREAM_STOPPED;
\r
7492 MUTEX_UNLOCK( &stream_.mutex );
\r
7494 if ( result != -1 ) return;
\r
7495 error( RtError::SYSTEM_ERROR );
\r
7498 void RtApiOss :: abortStream()
\r
7501 if ( stream_.state == STREAM_STOPPED ) {
\r
7502 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7503 error( RtError::WARNING );
\r
7507 MUTEX_LOCK( &stream_.mutex );
\r
7509 // The state might change while waiting on a mutex.
\r
7510 if ( stream_.state == STREAM_STOPPED ) {
\r
7511 MUTEX_UNLOCK( &stream_.mutex );
\r
7516 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7517 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7518 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7519 if ( result == -1 ) {
\r
7520 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7521 errorText_ = errorStream_.str();
\r
7524 handle->triggered = false;
\r
7527 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7528 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7529 if ( result == -1 ) {
\r
7530 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7531 errorText_ = errorStream_.str();
\r
7537 stream_.state = STREAM_STOPPED;
\r
7538 MUTEX_UNLOCK( &stream_.mutex );
\r
7540 if ( result != -1 ) return;
\r
7541 error( RtError::SYSTEM_ERROR );
\r
7544 void RtApiOss :: callbackEvent()
\r
7546 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7547 if ( stream_.state == STREAM_STOPPED ) {
\r
7548 MUTEX_LOCK( &stream_.mutex );
\r
7549 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7550 if ( stream_.state != STREAM_RUNNING ) {
\r
7551 MUTEX_UNLOCK( &stream_.mutex );
\r
7554 MUTEX_UNLOCK( &stream_.mutex );
\r
7557 if ( stream_.state == STREAM_CLOSED ) {
\r
7558 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7559 error( RtError::WARNING );
\r
7563 // Invoke user callback to get fresh output data.
\r
7564 int doStopStream = 0;
\r
7565 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7566 double streamTime = getStreamTime();
\r
7567 RtAudioStreamStatus status = 0;
\r
7568 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7569 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7570 handle->xrun[0] = false;
\r
7572 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7573 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7574 handle->xrun[1] = false;
\r
7576 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7577 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7578 if ( doStopStream == 2 ) {
\r
7579 this->abortStream();
\r
7583 MUTEX_LOCK( &stream_.mutex );
\r
7585 // The state might change while waiting on a mutex.
\r
7586 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7591 RtAudioFormat format;
\r
7593 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7595 // Setup parameters and do buffer conversion if necessary.
\r
7596 if ( stream_.doConvertBuffer[0] ) {
\r
7597 buffer = stream_.deviceBuffer;
\r
7598 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7599 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7600 format = stream_.deviceFormat[0];
\r
7603 buffer = stream_.userBuffer[0];
\r
7604 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7605 format = stream_.userFormat;
\r
7608 // Do byte swapping if necessary.
\r
7609 if ( stream_.doByteSwap[0] )
\r
7610 byteSwapBuffer( buffer, samples, format );
\r
7612 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7614 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7615 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7616 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7617 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7618 handle->triggered = true;
\r
7621 // Write samples to device.
\r
7622 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7624 if ( result == -1 ) {
\r
7625 // We'll assume this is an underrun, though there isn't a
\r
7626 // specific means for determining that.
\r
7627 handle->xrun[0] = true;
\r
7628 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7629 error( RtError::WARNING );
\r
7630 // Continue on to input section.
\r
7634 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7636 // Setup parameters.
\r
7637 if ( stream_.doConvertBuffer[1] ) {
\r
7638 buffer = stream_.deviceBuffer;
\r
7639 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7640 format = stream_.deviceFormat[1];
\r
7643 buffer = stream_.userBuffer[1];
\r
7644 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7645 format = stream_.userFormat;
\r
7648 // Read samples from device.
\r
7649 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7651 if ( result == -1 ) {
\r
7652 // We'll assume this is an overrun, though there isn't a
\r
7653 // specific means for determining that.
\r
7654 handle->xrun[1] = true;
\r
7655 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7656 error( RtError::WARNING );
\r
7660 // Do byte swapping if necessary.
\r
7661 if ( stream_.doByteSwap[1] )
\r
7662 byteSwapBuffer( buffer, samples, format );
\r
7664 // Do buffer conversion if necessary.
\r
7665 if ( stream_.doConvertBuffer[1] )
\r
7666 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7670 MUTEX_UNLOCK( &stream_.mutex );
\r
7672 RtApi::tickStreamTime();
\r
7673 if ( doStopStream == 1 ) this->stopStream();
\r
7676 extern "C" void *ossCallbackHandler( void *ptr )
\r
7678 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7679 RtApiOss *object = (RtApiOss *) info->object;
\r
7680 bool *isRunning = &info->isRunning;
\r
7682 while ( *isRunning == true ) {
\r
7683 pthread_testcancel();
\r
7684 object->callbackEvent();
\r
7687 pthread_exit( NULL );
\r
7690 //******************** End of __LINUX_OSS__ *********************//
\r
7694 // *************************************************** //
\r
7696 // Protected common (OS-independent) RtAudio methods.
\r
7698 // *************************************************** //
\r
7700 // This method can be modified to control the behavior of error
\r
7701 // message printing.
\r
7702 void RtApi :: error( RtError::Type type )
\r
7704 errorStream_.str(""); // clear the ostringstream
\r
7705 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7706 std::cerr << '\n' << errorText_ << "\n\n";
\r
7707 else if ( type != RtError::WARNING )
\r
7708 throw( RtError( errorText_, type ) );
\r
7711 void RtApi :: verifyStream()
\r
7713 if ( stream_.state == STREAM_CLOSED ) {
\r
7714 errorText_ = "RtApi:: a stream is not open!";
\r
7715 error( RtError::INVALID_USE );
\r
7719 void RtApi :: clearStreamInfo()
\r
7721 stream_.mode = UNINITIALIZED;
\r
7722 stream_.state = STREAM_CLOSED;
\r
7723 stream_.sampleRate = 0;
\r
7724 stream_.bufferSize = 0;
\r
7725 stream_.nBuffers = 0;
\r
7726 stream_.userFormat = 0;
\r
7727 stream_.userInterleaved = true;
\r
7728 stream_.streamTime = 0.0;
\r
7729 stream_.apiHandle = 0;
\r
7730 stream_.deviceBuffer = 0;
\r
7731 stream_.callbackInfo.callback = 0;
\r
7732 stream_.callbackInfo.userData = 0;
\r
7733 stream_.callbackInfo.isRunning = false;
\r
7734 for ( int i=0; i<2; i++ ) {
\r
7735 stream_.device[i] = 11111;
\r
7736 stream_.doConvertBuffer[i] = false;
\r
7737 stream_.deviceInterleaved[i] = true;
\r
7738 stream_.doByteSwap[i] = false;
\r
7739 stream_.nUserChannels[i] = 0;
\r
7740 stream_.nDeviceChannels[i] = 0;
\r
7741 stream_.channelOffset[i] = 0;
\r
7742 stream_.deviceFormat[i] = 0;
\r
7743 stream_.latency[i] = 0;
\r
7744 stream_.userBuffer[i] = 0;
\r
7745 stream_.convertInfo[i].channels = 0;
\r
7746 stream_.convertInfo[i].inJump = 0;
\r
7747 stream_.convertInfo[i].outJump = 0;
\r
7748 stream_.convertInfo[i].inFormat = 0;
\r
7749 stream_.convertInfo[i].outFormat = 0;
\r
7750 stream_.convertInfo[i].inOffset.clear();
\r
7751 stream_.convertInfo[i].outOffset.clear();
\r
7755 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7757 if ( format == RTAUDIO_SINT16 )
\r
7759 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7761 else if ( format == RTAUDIO_FLOAT64 )
\r
7763 else if ( format == RTAUDIO_SINT24 )
\r
7765 else if ( format == RTAUDIO_SINT8 )
\r
7768 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7769 error( RtError::WARNING );
\r
7774 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7776 if ( mode == INPUT ) { // convert device to user buffer
\r
7777 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7778 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7779 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7780 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7782 else { // convert user to device buffer
\r
7783 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7784 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7785 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7786 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7789 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7790 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7792 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7794 // Set up the interleave/deinterleave offsets.
\r
7795 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7796 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7797 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7798 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7799 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7800 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7801 stream_.convertInfo[mode].inJump = 1;
\r
7805 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7806 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7807 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7808 stream_.convertInfo[mode].outJump = 1;
\r
7812 else { // no (de)interleaving
\r
7813 if ( stream_.userInterleaved ) {
\r
7814 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7815 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7816 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7820 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7821 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7822 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7823 stream_.convertInfo[mode].inJump = 1;
\r
7824 stream_.convertInfo[mode].outJump = 1;
\r
7829 // Add channel offset.
\r
7830 if ( firstChannel > 0 ) {
\r
7831 if ( stream_.deviceInterleaved[mode] ) {
\r
7832 if ( mode == OUTPUT ) {
\r
7833 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7834 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7837 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7838 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7842 if ( mode == OUTPUT ) {
\r
7843 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7844 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7847 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7848 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7854 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7856 // This function does format conversion, input/output channel compensation, and
\r
7857 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7858 // the lower three bytes of a 32-bit integer.
\r
7860 // Clear our device buffer when in/out duplex device channels are different
\r
7861 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7862 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7863 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7866 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7868 Float64 *out = (Float64 *)outBuffer;
\r
7870 if (info.inFormat == RTAUDIO_SINT8) {
\r
7871 signed char *in = (signed char *)inBuffer;
\r
7872 scale = 1.0 / 127.5;
\r
7873 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7874 for (j=0; j<info.channels; j++) {
\r
7875 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7876 out[info.outOffset[j]] += 0.5;
\r
7877 out[info.outOffset[j]] *= scale;
\r
7879 in += info.inJump;
\r
7880 out += info.outJump;
\r
7883 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7884 Int16 *in = (Int16 *)inBuffer;
\r
7885 scale = 1.0 / 32767.5;
\r
7886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7887 for (j=0; j<info.channels; j++) {
\r
7888 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7889 out[info.outOffset[j]] += 0.5;
\r
7890 out[info.outOffset[j]] *= scale;
\r
7892 in += info.inJump;
\r
7893 out += info.outJump;
\r
7896 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7897 Int24 *in = (Int24 *)inBuffer;
\r
7898 scale = 1.0 / 8388607.5;
\r
7899 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7900 for (j=0; j<info.channels; j++) {
\r
7901 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
7902 out[info.outOffset[j]] += 0.5;
\r
7903 out[info.outOffset[j]] *= scale;
\r
7905 in += info.inJump;
\r
7906 out += info.outJump;
\r
7909 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7910 Int32 *in = (Int32 *)inBuffer;
\r
7911 scale = 1.0 / 2147483647.5;
\r
7912 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7913 for (j=0; j<info.channels; j++) {
\r
7914 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7915 out[info.outOffset[j]] += 0.5;
\r
7916 out[info.outOffset[j]] *= scale;
\r
7918 in += info.inJump;
\r
7919 out += info.outJump;
\r
7922 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7923 Float32 *in = (Float32 *)inBuffer;
\r
7924 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7925 for (j=0; j<info.channels; j++) {
\r
7926 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7928 in += info.inJump;
\r
7929 out += info.outJump;
\r
7932 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7933 // Channel compensation and/or (de)interleaving only.
\r
7934 Float64 *in = (Float64 *)inBuffer;
\r
7935 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7936 for (j=0; j<info.channels; j++) {
\r
7937 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7939 in += info.inJump;
\r
7940 out += info.outJump;
\r
7944 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7946 Float32 *out = (Float32 *)outBuffer;
\r
7948 if (info.inFormat == RTAUDIO_SINT8) {
\r
7949 signed char *in = (signed char *)inBuffer;
\r
7950 scale = (Float32) ( 1.0 / 127.5 );
\r
7951 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7952 for (j=0; j<info.channels; j++) {
\r
7953 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7954 out[info.outOffset[j]] += 0.5;
\r
7955 out[info.outOffset[j]] *= scale;
\r
7957 in += info.inJump;
\r
7958 out += info.outJump;
\r
7961 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7962 Int16 *in = (Int16 *)inBuffer;
\r
7963 scale = (Float32) ( 1.0 / 32767.5 );
\r
7964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7965 for (j=0; j<info.channels; j++) {
\r
7966 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7967 out[info.outOffset[j]] += 0.5;
\r
7968 out[info.outOffset[j]] *= scale;
\r
7970 in += info.inJump;
\r
7971 out += info.outJump;
\r
7974 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7975 Int24 *in = (Int24 *)inBuffer;
\r
7976 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7977 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7978 for (j=0; j<info.channels; j++) {
\r
7979 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
7980 out[info.outOffset[j]] += 0.5;
\r
7981 out[info.outOffset[j]] *= scale;
\r
7983 in += info.inJump;
\r
7984 out += info.outJump;
\r
7987 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7988 Int32 *in = (Int32 *)inBuffer;
\r
7989 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7990 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7991 for (j=0; j<info.channels; j++) {
\r
7992 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7993 out[info.outOffset[j]] += 0.5;
\r
7994 out[info.outOffset[j]] *= scale;
\r
7996 in += info.inJump;
\r
7997 out += info.outJump;
\r
8000 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8001 // Channel compensation and/or (de)interleaving only.
\r
8002 Float32 *in = (Float32 *)inBuffer;
\r
8003 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8004 for (j=0; j<info.channels; j++) {
\r
8005 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8007 in += info.inJump;
\r
8008 out += info.outJump;
\r
8011 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8012 Float64 *in = (Float64 *)inBuffer;
\r
8013 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8014 for (j=0; j<info.channels; j++) {
\r
8015 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8017 in += info.inJump;
\r
8018 out += info.outJump;
\r
8022 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8023 Int32 *out = (Int32 *)outBuffer;
\r
8024 if (info.inFormat == RTAUDIO_SINT8) {
\r
8025 signed char *in = (signed char *)inBuffer;
\r
8026 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8027 for (j=0; j<info.channels; j++) {
\r
8028 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8029 out[info.outOffset[j]] <<= 24;
\r
8031 in += info.inJump;
\r
8032 out += info.outJump;
\r
8035 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8036 Int16 *in = (Int16 *)inBuffer;
\r
8037 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8038 for (j=0; j<info.channels; j++) {
\r
8039 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8040 out[info.outOffset[j]] <<= 16;
\r
8042 in += info.inJump;
\r
8043 out += info.outJump;
\r
8046 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8047 Int24 *in = (Int24 *)inBuffer;
\r
8048 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8049 for (j=0; j<info.channels; j++) {
\r
8050 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8051 out[info.outOffset[j]] <<= 8;
\r
8053 in += info.inJump;
\r
8054 out += info.outJump;
\r
8057 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8058 // Channel compensation and/or (de)interleaving only.
\r
8059 Int32 *in = (Int32 *)inBuffer;
\r
8060 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8061 for (j=0; j<info.channels; j++) {
\r
8062 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8064 in += info.inJump;
\r
8065 out += info.outJump;
\r
8068 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8069 Float32 *in = (Float32 *)inBuffer;
\r
8070 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8071 for (j=0; j<info.channels; j++) {
\r
8072 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8074 in += info.inJump;
\r
8075 out += info.outJump;
\r
8078 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8079 Float64 *in = (Float64 *)inBuffer;
\r
8080 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8081 for (j=0; j<info.channels; j++) {
\r
8082 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8084 in += info.inJump;
\r
8085 out += info.outJump;
\r
8089 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8090 Int24 *out = (Int24 *)outBuffer;
\r
8091 if (info.inFormat == RTAUDIO_SINT8) {
\r
8092 signed char *in = (signed char *)inBuffer;
\r
8093 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8094 for (j=0; j<info.channels; j++) {
\r
8095 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8096 //out[info.outOffset[j]] <<= 16;
\r
8098 in += info.inJump;
\r
8099 out += info.outJump;
\r
8102 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8103 Int16 *in = (Int16 *)inBuffer;
\r
8104 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8105 for (j=0; j<info.channels; j++) {
\r
8106 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8107 //out[info.outOffset[j]] <<= 8;
\r
8109 in += info.inJump;
\r
8110 out += info.outJump;
\r
8113 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8114 // Channel compensation and/or (de)interleaving only.
\r
8115 Int24 *in = (Int24 *)inBuffer;
\r
8116 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8117 for (j=0; j<info.channels; j++) {
\r
8118 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8120 in += info.inJump;
\r
8121 out += info.outJump;
\r
8124 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8125 Int32 *in = (Int32 *)inBuffer;
\r
8126 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8127 for (j=0; j<info.channels; j++) {
\r
8128 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8129 //out[info.outOffset[j]] >>= 8;
\r
8131 in += info.inJump;
\r
8132 out += info.outJump;
\r
8135 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8136 Float32 *in = (Float32 *)inBuffer;
\r
8137 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8138 for (j=0; j<info.channels; j++) {
\r
8139 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8141 in += info.inJump;
\r
8142 out += info.outJump;
\r
8145 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8146 Float64 *in = (Float64 *)inBuffer;
\r
8147 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8148 for (j=0; j<info.channels; j++) {
\r
8149 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8151 in += info.inJump;
\r
8152 out += info.outJump;
\r
8156 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8157 Int16 *out = (Int16 *)outBuffer;
\r
8158 if (info.inFormat == RTAUDIO_SINT8) {
\r
8159 signed char *in = (signed char *)inBuffer;
\r
8160 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8161 for (j=0; j<info.channels; j++) {
\r
8162 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8163 out[info.outOffset[j]] <<= 8;
\r
8165 in += info.inJump;
\r
8166 out += info.outJump;
\r
8169 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8170 // Channel compensation and/or (de)interleaving only.
\r
8171 Int16 *in = (Int16 *)inBuffer;
\r
8172 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8173 for (j=0; j<info.channels; j++) {
\r
8174 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8176 in += info.inJump;
\r
8177 out += info.outJump;
\r
8180 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8181 Int24 *in = (Int24 *)inBuffer;
\r
8182 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8183 for (j=0; j<info.channels; j++) {
\r
8184 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8186 in += info.inJump;
\r
8187 out += info.outJump;
\r
8190 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8191 Int32 *in = (Int32 *)inBuffer;
\r
8192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8193 for (j=0; j<info.channels; j++) {
\r
8194 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8196 in += info.inJump;
\r
8197 out += info.outJump;
\r
8200 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8201 Float32 *in = (Float32 *)inBuffer;
\r
8202 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8203 for (j=0; j<info.channels; j++) {
\r
8204 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8206 in += info.inJump;
\r
8207 out += info.outJump;
\r
8210 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8211 Float64 *in = (Float64 *)inBuffer;
\r
8212 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8213 for (j=0; j<info.channels; j++) {
\r
8214 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8216 in += info.inJump;
\r
8217 out += info.outJump;
\r
8221 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8222 signed char *out = (signed char *)outBuffer;
\r
8223 if (info.inFormat == RTAUDIO_SINT8) {
\r
8224 // Channel compensation and/or (de)interleaving only.
\r
8225 signed char *in = (signed char *)inBuffer;
\r
8226 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8227 for (j=0; j<info.channels; j++) {
\r
8228 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8230 in += info.inJump;
\r
8231 out += info.outJump;
\r
8234 if (info.inFormat == RTAUDIO_SINT16) {
\r
8235 Int16 *in = (Int16 *)inBuffer;
\r
8236 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8237 for (j=0; j<info.channels; j++) {
\r
8238 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8240 in += info.inJump;
\r
8241 out += info.outJump;
\r
8244 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8245 Int24 *in = (Int24 *)inBuffer;
\r
8246 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8247 for (j=0; j<info.channels; j++) {
\r
8248 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8250 in += info.inJump;
\r
8251 out += info.outJump;
\r
8254 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8255 Int32 *in = (Int32 *)inBuffer;
\r
8256 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8257 for (j=0; j<info.channels; j++) {
\r
8258 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8260 in += info.inJump;
\r
8261 out += info.outJump;
\r
8264 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8265 Float32 *in = (Float32 *)inBuffer;
\r
8266 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8267 for (j=0; j<info.channels; j++) {
\r
8268 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8270 in += info.inJump;
\r
8271 out += info.outJump;
\r
8274 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8275 Float64 *in = (Float64 *)inBuffer;
\r
8276 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8277 for (j=0; j<info.channels; j++) {
\r
8278 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8280 in += info.inJump;
\r
8281 out += info.outJump;
\r
8287 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8288 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8289 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8291 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8293 register char val;
\r
8294 register char *ptr;
\r
8297 if ( format == RTAUDIO_SINT16 ) {
\r
8298 for ( unsigned int i=0; i<samples; i++ ) {
\r
8299 // Swap 1st and 2nd bytes.
\r
8301 *(ptr) = *(ptr+1);
\r
8304 // Increment 2 bytes.
\r
8308 else if ( format == RTAUDIO_SINT32 ||
\r
8309 format == RTAUDIO_FLOAT32 ) {
\r
8310 for ( unsigned int i=0; i<samples; i++ ) {
\r
8311 // Swap 1st and 4th bytes.
\r
8313 *(ptr) = *(ptr+3);
\r
8316 // Swap 2nd and 3rd bytes.
\r
8319 *(ptr) = *(ptr+1);
\r
8322 // Increment 3 more bytes.
\r
8326 else if ( format == RTAUDIO_SINT24 ) {
\r
8327 for ( unsigned int i=0; i<samples; i++ ) {
\r
8328 // Swap 1st and 3rd bytes.
\r
8330 *(ptr) = *(ptr+2);
\r
8333 // Increment 2 more bytes.
\r
8337 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8338 for ( unsigned int i=0; i<samples; i++ ) {
\r
8339 // Swap 1st and 8th bytes
\r
8341 *(ptr) = *(ptr+7);
\r
8344 // Swap 2nd and 7th bytes
\r
8347 *(ptr) = *(ptr+5);
\r
8350 // Swap 3rd and 6th bytes
\r
8353 *(ptr) = *(ptr+3);
\r
8356 // Swap 4th and 5th bytes
\r
8359 *(ptr) = *(ptr+1);
\r
8362 // Increment 5 more bytes.
\r
8368 // Indentation settings for Vim and Emacs
\r
8370 // Local Variables:
\r
8371 // c-basic-offset: 2
\r
8372 // indent-tabs-mode: nil
\r
8375 // vim: et sts=2 sw=2
\r