1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2011 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.9
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_OSS__)
\r
91 apis.push_back( LINUX_OSS );
\r
93 #if defined(__WINDOWS_ASIO__)
\r
94 apis.push_back( WINDOWS_ASIO );
\r
96 #if defined(__WINDOWS_DS__)
\r
97 apis.push_back( WINDOWS_DS );
\r
99 #if defined(__MACOSX_CORE__)
\r
100 apis.push_back( MACOSX_CORE );
\r
102 #if defined(__RTAUDIO_DUMMY__)
\r
103 apis.push_back( RTAUDIO_DUMMY );
\r
107 void RtAudio :: openRtApi( RtAudio::Api api )
\r
109 #if defined(__UNIX_JACK__)
\r
110 if ( api == UNIX_JACK )
\r
111 rtapi_ = new RtApiJack();
\r
113 #if defined(__LINUX_ALSA__)
\r
114 if ( api == LINUX_ALSA )
\r
115 rtapi_ = new RtApiAlsa();
\r
117 #if defined(__LINUX_OSS__)
\r
118 if ( api == LINUX_OSS )
\r
119 rtapi_ = new RtApiOss();
\r
121 #if defined(__WINDOWS_ASIO__)
\r
122 if ( api == WINDOWS_ASIO )
\r
123 rtapi_ = new RtApiAsio();
\r
125 #if defined(__WINDOWS_DS__)
\r
126 if ( api == WINDOWS_DS )
\r
127 rtapi_ = new RtApiDs();
\r
129 #if defined(__MACOSX_CORE__)
\r
130 if ( api == MACOSX_CORE )
\r
131 rtapi_ = new RtApiCore();
\r
133 #if defined(__RTAUDIO_DUMMY__)
\r
134 if ( api == RTAUDIO_DUMMY )
\r
135 rtapi_ = new RtApiDummy();
\r
139 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
143 if ( api != UNSPECIFIED ) {
\r
144 // Attempt to open the specified API.
\r
146 if ( rtapi_ ) return;
\r
148 // No compiled support for specified API value. Issue a debug
\r
149 // warning and continue as if no API was specified.
\r
150 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
153 // Iterate through the compiled APIs and return as soon as we find
\r
154 // one with at least one device or we reach the end of the list.
\r
155 std::vector< RtAudio::Api > apis;
\r
156 getCompiledApi( apis );
\r
157 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
158 openRtApi( apis[i] );
\r
159 if ( rtapi_->getDeviceCount() ) break;
\r
162 if ( rtapi_ ) return;
\r
164 // It should not be possible to get here because the preprocessor
\r
165 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
166 // API-specific definitions are passed to the compiler. But just in
\r
167 // case something weird happens, we'll print out an error message.
\r
168 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
171 RtAudio :: ~RtAudio() throw()
\r
176 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
177 RtAudio::StreamParameters *inputParameters,
\r
178 RtAudioFormat format, unsigned int sampleRate,
\r
179 unsigned int *bufferFrames,
\r
180 RtAudioCallback callback, void *userData,
\r
181 RtAudio::StreamOptions *options )
\r
183 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
184 sampleRate, bufferFrames, callback,
\r
185 userData, options );
\r
188 // *************************************************** //
\r
190 // Public RtApi definitions (see end of file for
\r
191 // private or protected utility functions).
\r
193 // *************************************************** //
\r
197 stream_.state = STREAM_CLOSED;
\r
198 stream_.mode = UNINITIALIZED;
\r
199 stream_.apiHandle = 0;
\r
200 stream_.userBuffer[0] = 0;
\r
201 stream_.userBuffer[1] = 0;
\r
202 MUTEX_INITIALIZE( &stream_.mutex );
\r
203 showWarnings_ = true;
\r
208 MUTEX_DESTROY( &stream_.mutex );
\r
211 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
212 RtAudio::StreamParameters *iParams,
\r
213 RtAudioFormat format, unsigned int sampleRate,
\r
214 unsigned int *bufferFrames,
\r
215 RtAudioCallback callback, void *userData,
\r
216 RtAudio::StreamOptions *options )
\r
218 if ( stream_.state != STREAM_CLOSED ) {
\r
219 errorText_ = "RtApi::openStream: a stream is already open!";
\r
220 error( RtError::INVALID_USE );
\r
223 if ( oParams && oParams->nChannels < 1 ) {
\r
224 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
225 error( RtError::INVALID_USE );
\r
228 if ( iParams && iParams->nChannels < 1 ) {
\r
229 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
230 error( RtError::INVALID_USE );
\r
233 if ( oParams == NULL && iParams == NULL ) {
\r
234 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
235 error( RtError::INVALID_USE );
\r
238 if ( formatBytes(format) == 0 ) {
\r
239 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
240 error( RtError::INVALID_USE );
\r
243 unsigned int nDevices = getDeviceCount();
\r
244 unsigned int oChannels = 0;
\r
246 oChannels = oParams->nChannels;
\r
247 if ( oParams->deviceId >= nDevices ) {
\r
248 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
249 error( RtError::INVALID_USE );
\r
253 unsigned int iChannels = 0;
\r
255 iChannels = iParams->nChannels;
\r
256 if ( iParams->deviceId >= nDevices ) {
\r
257 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
258 error( RtError::INVALID_USE );
\r
265 if ( oChannels > 0 ) {
\r
267 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
268 sampleRate, format, bufferFrames, options );
\r
269 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
272 if ( iChannels > 0 ) {
\r
274 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
275 sampleRate, format, bufferFrames, options );
\r
276 if ( result == false ) {
\r
277 if ( oChannels > 0 ) closeStream();
\r
278 error( RtError::SYSTEM_ERROR );
\r
282 stream_.callbackInfo.callback = (void *) callback;
\r
283 stream_.callbackInfo.userData = userData;
\r
285 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
286 stream_.state = STREAM_STOPPED;
\r
289 unsigned int RtApi :: getDefaultInputDevice( void )
\r
291 // Should be implemented in subclasses if possible.
\r
295 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
297 // Should be implemented in subclasses if possible.
\r
301 void RtApi :: closeStream( void )
\r
303 // MUST be implemented in subclasses!
\r
307 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
308 unsigned int firstChannel, unsigned int sampleRate,
\r
309 RtAudioFormat format, unsigned int *bufferSize,
\r
310 RtAudio::StreamOptions *options )
\r
312 // MUST be implemented in subclasses!
\r
316 void RtApi :: tickStreamTime( void )
\r
318 // Subclasses that do not provide their own implementation of
\r
319 // getStreamTime should call this function once per buffer I/O to
\r
320 // provide basic stream time support.
\r
322 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
324 #if defined( HAVE_GETTIMEOFDAY )
\r
325 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
329 long RtApi :: getStreamLatency( void )
\r
333 long totalLatency = 0;
\r
334 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
335 totalLatency = stream_.latency[0];
\r
336 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
337 totalLatency += stream_.latency[1];
\r
339 return totalLatency;
\r
342 double RtApi :: getStreamTime( void )
\r
346 #if defined( HAVE_GETTIMEOFDAY )
\r
347 // Return a very accurate estimate of the stream time by
\r
348 // adding in the elapsed time since the last tick.
\r
349 struct timeval then;
\r
350 struct timeval now;
\r
352 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
353 return stream_.streamTime;
\r
355 gettimeofday( &now, NULL );
\r
356 then = stream_.lastTickTimestamp;
\r
357 return stream_.streamTime +
\r
358 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
359 (then.tv_sec + 0.000001 * then.tv_usec));
\r
361 return stream_.streamTime;
\r
365 unsigned int RtApi :: getStreamSampleRate( void )
\r
369 return stream_.sampleRate;
\r
373 // *************************************************** //
\r
375 // OS/API-specific methods.
\r
377 // *************************************************** //
\r
379 #if defined(__MACOSX_CORE__)
\r
381 // The OS X CoreAudio API is designed to use a separate callback
\r
382 // procedure for each of its audio devices. A single RtAudio duplex
\r
383 // stream using two different devices is supported here, though it
\r
384 // cannot be guaranteed to always behave correctly because we cannot
\r
385 // synchronize these two callbacks.
\r
387 // A property listener is installed for over/underrun information.
\r
388 // However, no functionality is currently provided to allow property
\r
389 // listeners to trigger user handlers because it is unclear what could
\r
390 // be done if a critical stream parameter (buffer size, sample rate,
\r
391 // device disconnect) notification arrived. The listeners entail
\r
392 // quite a bit of extra code and most likely, a user program wouldn't
\r
393 // be prepared for the result anyway. However, we do provide a flag
\r
394 // to the client callback function to inform of an over/underrun.
\r
396 // A structure to hold various information related to the CoreAudio API
\r
398 struct CoreHandle {
\r
399 AudioDeviceID id[2]; // device ids
\r
400 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
401 AudioDeviceIOProcID procId[2];
\r
403 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
404 UInt32 nStreams[2]; // number of streams to use
\r
406 char *deviceBuffer;
\r
407 pthread_cond_t condition;
\r
408 int drainCounter; // Tracks callback counts when draining
\r
409 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
412 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
415 RtApiCore:: RtApiCore()
\r
417 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
418 // This is a largely undocumented but absolutely necessary
\r
419 // requirement starting with OS-X 10.6. If not called, queries and
\r
420 // updates to various audio device properties are not handled
\r
422 CFRunLoopRef theRunLoop = NULL;
\r
423 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
424 kAudioObjectPropertyScopeGlobal,
\r
425 kAudioObjectPropertyElementMaster };
\r
426 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
427 if ( result != noErr ) {
\r
428 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
429 error( RtError::WARNING );
\r
434 RtApiCore :: ~RtApiCore()
\r
436 // The subclass destructor gets called before the base class
\r
437 // destructor, so close an existing stream before deallocating
\r
438 // apiDeviceId memory.
\r
439 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
442 unsigned int RtApiCore :: getDeviceCount( void )
\r
444 // Find out how many audio devices there are, if any.
\r
446 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
447 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
448 if ( result != noErr ) {
\r
449 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
450 error( RtError::WARNING );
\r
454 return dataSize / sizeof( AudioDeviceID );
\r
457 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
459 unsigned int nDevices = getDeviceCount();
\r
460 if ( nDevices <= 1 ) return 0;
\r
463 UInt32 dataSize = sizeof( AudioDeviceID );
\r
464 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
465 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
466 if ( result != noErr ) {
\r
467 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
468 error( RtError::WARNING );
\r
472 dataSize *= nDevices;
\r
473 AudioDeviceID deviceList[ nDevices ];
\r
474 property.mSelector = kAudioHardwarePropertyDevices;
\r
475 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
476 if ( result != noErr ) {
\r
477 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
478 error( RtError::WARNING );
\r
482 for ( unsigned int i=0; i<nDevices; i++ )
\r
483 if ( id == deviceList[i] ) return i;
\r
485 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
486 error( RtError::WARNING );
\r
490 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
492 unsigned int nDevices = getDeviceCount();
\r
493 if ( nDevices <= 1 ) return 0;
\r
496 UInt32 dataSize = sizeof( AudioDeviceID );
\r
497 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
498 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
499 if ( result != noErr ) {
\r
500 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
501 error( RtError::WARNING );
\r
505 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
506 AudioDeviceID deviceList[ nDevices ];
\r
507 property.mSelector = kAudioHardwarePropertyDevices;
\r
508 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
509 if ( result != noErr ) {
\r
510 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
511 error( RtError::WARNING );
\r
515 for ( unsigned int i=0; i<nDevices; i++ )
\r
516 if ( id == deviceList[i] ) return i;
\r
518 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
519 error( RtError::WARNING );
\r
523 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
525 RtAudio::DeviceInfo info;
\r
526 info.probed = false;
\r
529 unsigned int nDevices = getDeviceCount();
\r
530 if ( nDevices == 0 ) {
\r
531 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
532 error( RtError::INVALID_USE );
\r
535 if ( device >= nDevices ) {
\r
536 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
537 error( RtError::INVALID_USE );
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
542 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
543 kAudioObjectPropertyScopeGlobal,
\r
544 kAudioObjectPropertyElementMaster };
\r
545 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
546 0, NULL, &dataSize, (void *) &deviceList );
\r
547 if ( result != noErr ) {
\r
548 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
549 error( RtError::WARNING );
\r
553 AudioDeviceID id = deviceList[ device ];
\r
555 // Get the device name.
\r
557 CFStringRef cfname;
\r
558 dataSize = sizeof( CFStringRef );
\r
559 property.mSelector = kAudioObjectPropertyManufacturer;
\r
560 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
561 if ( result != noErr ) {
\r
562 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
563 errorText_ = errorStream_.str();
\r
564 error( RtError::WARNING );
\r
568 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
569 int length = CFStringGetLength(cfname);
\r
570 char *mname = (char *)malloc(length * 3 + 1);
\r
571 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
572 info.name.append( (const char *)mname, strlen(mname) );
\r
573 info.name.append( ": " );
\r
574 CFRelease( cfname );
\r
577 property.mSelector = kAudioObjectPropertyName;
\r
578 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
579 if ( result != noErr ) {
\r
580 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
581 errorText_ = errorStream_.str();
\r
582 error( RtError::WARNING );
\r
586 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
587 length = CFStringGetLength(cfname);
\r
588 char *name = (char *)malloc(length * 3 + 1);
\r
589 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
590 info.name.append( (const char *)name, strlen(name) );
\r
591 CFRelease( cfname );
\r
594 // Get the output stream "configuration".
\r
595 AudioBufferList *bufferList = nil;
\r
596 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
597 property.mScope = kAudioDevicePropertyScopeOutput;
\r
598 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
600 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
601 if ( result != noErr || dataSize == 0 ) {
\r
602 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
603 errorText_ = errorStream_.str();
\r
604 error( RtError::WARNING );
\r
608 // Allocate the AudioBufferList.
\r
609 bufferList = (AudioBufferList *) malloc( dataSize );
\r
610 if ( bufferList == NULL ) {
\r
611 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
612 error( RtError::WARNING );
\r
616 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
617 if ( result != noErr || dataSize == 0 ) {
\r
618 free( bufferList );
\r
619 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
620 errorText_ = errorStream_.str();
\r
621 error( RtError::WARNING );
\r
625 // Get output channel information.
\r
626 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
627 for ( i=0; i<nStreams; i++ )
\r
628 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
629 free( bufferList );
\r
631 // Get the input stream "configuration".
\r
632 property.mScope = kAudioDevicePropertyScopeInput;
\r
633 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
634 if ( result != noErr || dataSize == 0 ) {
\r
635 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
636 errorText_ = errorStream_.str();
\r
637 error( RtError::WARNING );
\r
641 // Allocate the AudioBufferList.
\r
642 bufferList = (AudioBufferList *) malloc( dataSize );
\r
643 if ( bufferList == NULL ) {
\r
644 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
645 error( RtError::WARNING );
\r
649 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
650 if (result != noErr || dataSize == 0) {
\r
651 free( bufferList );
\r
652 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
653 errorText_ = errorStream_.str();
\r
654 error( RtError::WARNING );
\r
658 // Get input channel information.
\r
659 nStreams = bufferList->mNumberBuffers;
\r
660 for ( i=0; i<nStreams; i++ )
\r
661 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
662 free( bufferList );
\r
664 // If device opens for both playback and capture, we determine the channels.
\r
665 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
666 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
668 // Probe the device sample rates.
\r
669 bool isInput = false;
\r
670 if ( info.outputChannels == 0 ) isInput = true;
\r
672 // Determine the supported sample rates.
\r
673 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
674 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
676 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
677 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
678 errorText_ = errorStream_.str();
\r
679 error( RtError::WARNING );
\r
683 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
684 AudioValueRange rangeList[ nRanges ];
\r
685 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
686 if ( result != kAudioHardwareNoError ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtError::WARNING );
\r
693 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
694 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
695 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
696 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
699 info.sampleRates.clear();
\r
700 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
701 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
702 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
705 if ( info.sampleRates.size() == 0 ) {
\r
706 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
707 errorText_ = errorStream_.str();
\r
708 error( RtError::WARNING );
\r
712 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
713 // Thus, any other "physical" formats supported by the device are of
\r
714 // no interest to the client.
\r
715 info.nativeFormats = RTAUDIO_FLOAT32;
\r
717 if ( info.outputChannels > 0 )
\r
718 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
719 if ( info.inputChannels > 0 )
\r
720 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
722 info.probed = true;
\r
726 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
727 const AudioTimeStamp* inNow,
\r
728 const AudioBufferList* inInputData,
\r
729 const AudioTimeStamp* inInputTime,
\r
730 AudioBufferList* outOutputData,
\r
731 const AudioTimeStamp* inOutputTime,
\r
732 void* infoPointer )
\r
734 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
736 RtApiCore *object = (RtApiCore *) info->object;
\r
737 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
738 return kAudioHardwareUnspecifiedError;
\r
740 return kAudioHardwareNoError;
\r
743 OSStatus xrunListener( AudioObjectID inDevice,
\r
745 const AudioObjectPropertyAddress properties[],
\r
746 void* handlePointer )
\r
748 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
749 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
750 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
751 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
752 handle->xrun[1] = true;
\r
754 handle->xrun[0] = true;
\r
758 return kAudioHardwareNoError;
\r
761 OSStatus rateListener( AudioObjectID inDevice,
\r
763 const AudioObjectPropertyAddress properties[],
\r
764 void* ratePointer )
\r
767 Float64 *rate = (Float64 *) ratePointer;
\r
768 UInt32 dataSize = sizeof( Float64 );
\r
769 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
770 kAudioObjectPropertyScopeGlobal,
\r
771 kAudioObjectPropertyElementMaster };
\r
772 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
773 return kAudioHardwareNoError;
\r
776 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
777 unsigned int firstChannel, unsigned int sampleRate,
\r
778 RtAudioFormat format, unsigned int *bufferSize,
\r
779 RtAudio::StreamOptions *options )
\r
782 unsigned int nDevices = getDeviceCount();
\r
783 if ( nDevices == 0 ) {
\r
784 // This should not happen because a check is made before this function is called.
\r
785 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
789 if ( device >= nDevices ) {
\r
790 // This should not happen because a check is made before this function is called.
\r
791 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
795 AudioDeviceID deviceList[ nDevices ];
\r
796 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
797 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
798 kAudioObjectPropertyScopeGlobal,
\r
799 kAudioObjectPropertyElementMaster };
\r
800 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
801 0, NULL, &dataSize, (void *) &deviceList );
\r
802 if ( result != noErr ) {
\r
803 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
807 AudioDeviceID id = deviceList[ device ];
\r
809 // Setup for stream mode.
\r
810 bool isInput = false;
\r
811 if ( mode == INPUT ) {
\r
813 property.mScope = kAudioDevicePropertyScopeInput;
\r
816 property.mScope = kAudioDevicePropertyScopeOutput;
\r
818 // Get the stream "configuration".
\r
819 AudioBufferList *bufferList = nil;
\r
821 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
822 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
823 if ( result != noErr || dataSize == 0 ) {
\r
824 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
825 errorText_ = errorStream_.str();
\r
829 // Allocate the AudioBufferList.
\r
830 bufferList = (AudioBufferList *) malloc( dataSize );
\r
831 if ( bufferList == NULL ) {
\r
832 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
836 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
837 if (result != noErr || dataSize == 0) {
\r
838 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
839 errorText_ = errorStream_.str();
\r
843 // Search for one or more streams that contain the desired number of
\r
844 // channels. CoreAudio devices can have an arbitrary number of
\r
845 // streams and each stream can have an arbitrary number of channels.
\r
846 // For each stream, a single buffer of interleaved samples is
\r
847 // provided. RtAudio prefers the use of one stream of interleaved
\r
848 // data or multiple consecutive single-channel streams. However, we
\r
849 // now support multiple consecutive multi-channel streams of
\r
850 // interleaved data as well.
\r
851 UInt32 iStream, offsetCounter = firstChannel;
\r
852 UInt32 nStreams = bufferList->mNumberBuffers;
\r
853 bool monoMode = false;
\r
854 bool foundStream = false;
\r
856 // First check that the device supports the requested number of
\r
858 UInt32 deviceChannels = 0;
\r
859 for ( iStream=0; iStream<nStreams; iStream++ )
\r
860 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
862 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
863 free( bufferList );
\r
864 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
865 errorText_ = errorStream_.str();
\r
869 // Look for a single stream meeting our needs.
\r
870 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
871 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
872 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
873 if ( streamChannels >= channels + offsetCounter ) {
\r
874 firstStream = iStream;
\r
875 channelOffset = offsetCounter;
\r
876 foundStream = true;
\r
879 if ( streamChannels > offsetCounter ) break;
\r
880 offsetCounter -= streamChannels;
\r
883 // If we didn't find a single stream above, then we should be able
\r
884 // to meet the channel specification with multiple streams.
\r
885 if ( foundStream == false ) {
\r
887 offsetCounter = firstChannel;
\r
888 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
889 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
890 if ( streamChannels > offsetCounter ) break;
\r
891 offsetCounter -= streamChannels;
\r
894 firstStream = iStream;
\r
895 channelOffset = offsetCounter;
\r
896 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
898 if ( streamChannels > 1 ) monoMode = false;
\r
899 while ( channelCounter > 0 ) {
\r
900 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
901 if ( streamChannels > 1 ) monoMode = false;
\r
902 channelCounter -= streamChannels;
\r
907 free( bufferList );
\r
909 // Determine the buffer size.
\r
910 AudioValueRange bufferRange;
\r
911 dataSize = sizeof( AudioValueRange );
\r
912 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
913 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
915 if ( result != noErr ) {
\r
916 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
917 errorText_ = errorStream_.str();
\r
921 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
922 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
923 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
925 // Set the buffer size. For multiple streams, I'm assuming we only
\r
926 // need to make this setting for the master channel.
\r
927 UInt32 theSize = (UInt32) *bufferSize;
\r
928 dataSize = sizeof( UInt32 );
\r
929 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
930 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
932 if ( result != noErr ) {
\r
933 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
934 errorText_ = errorStream_.str();
\r
938 // If attempting to setup a duplex stream, the bufferSize parameter
\r
939 // MUST be the same in both directions!
\r
940 *bufferSize = theSize;
\r
941 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
942 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
943 errorText_ = errorStream_.str();
\r
947 stream_.bufferSize = *bufferSize;
\r
948 stream_.nBuffers = 1;
\r
950 // Try to set "hog" mode ... it's not clear to me this is working.
\r
951 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
953 dataSize = sizeof( hog_pid );
\r
954 property.mSelector = kAudioDevicePropertyHogMode;
\r
955 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
956 if ( result != noErr ) {
\r
957 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
958 errorText_ = errorStream_.str();
\r
962 if ( hog_pid != getpid() ) {
\r
963 hog_pid = getpid();
\r
964 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
965 if ( result != noErr ) {
\r
966 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
967 errorText_ = errorStream_.str();
\r
973 // Check and if necessary, change the sample rate for the device.
\r
974 Float64 nominalRate;
\r
975 dataSize = sizeof( Float64 );
\r
976 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
977 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
979 if ( result != noErr ) {
\r
980 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
981 errorText_ = errorStream_.str();
\r
985 // Only change the sample rate if off by more than 1 Hz.
\r
986 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
988 // Set a property listener for the sample rate change
\r
989 Float64 reportedRate = 0.0;
\r
990 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
991 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
992 if ( result != noErr ) {
\r
993 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
994 errorText_ = errorStream_.str();
\r
998 nominalRate = (Float64) sampleRate;
\r
999 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1001 if ( result != noErr ) {
\r
1002 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1003 errorText_ = errorStream_.str();
\r
1007 // Now wait until the reported nominal rate is what we just set.
\r
1008 UInt32 microCounter = 0;
\r
1009 while ( reportedRate != nominalRate ) {
\r
1010 microCounter += 5000;
\r
1011 if ( microCounter > 5000000 ) break;
\r
1015 // Remove the property listener.
\r
1016 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1018 if ( microCounter > 5000000 ) {
\r
1019 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1020 errorText_ = errorStream_.str();
\r
1025 // Now set the stream format for all streams. Also, check the
\r
1026 // physical format of the device and change that if necessary.
\r
1027 AudioStreamBasicDescription description;
\r
1028 dataSize = sizeof( AudioStreamBasicDescription );
\r
1029 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1030 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1031 if ( result != noErr ) {
\r
1032 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1033 errorText_ = errorStream_.str();
\r
1037 // Set the sample rate and data format id. However, only make the
\r
1038 // change if the sample rate is not within 1.0 of the desired
\r
1039 // rate and the format is not linear pcm.
\r
1040 bool updateFormat = false;
\r
1041 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1042 description.mSampleRate = (Float64) sampleRate;
\r
1043 updateFormat = true;
\r
1046 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1047 description.mFormatID = kAudioFormatLinearPCM;
\r
1048 updateFormat = true;
\r
1051 if ( updateFormat ) {
\r
1052 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1053 if ( result != noErr ) {
\r
1054 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1055 errorText_ = errorStream_.str();
\r
1060 // Now check the physical format.
\r
1061 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1062 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1063 if ( result != noErr ) {
\r
1064 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1065 errorText_ = errorStream_.str();
\r
1069 //std::cout << "Current physical stream format:" << std::endl;
\r
1070 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1071 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1072 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1073 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1075 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1076 description.mFormatID = kAudioFormatLinearPCM;
\r
1077 //description.mSampleRate = (Float64) sampleRate;
\r
1078 AudioStreamBasicDescription testDescription = description;
\r
1079 UInt32 formatFlags;
\r
1081 // We'll try higher bit rates first and then work our way down.
\r
1082 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1083 formatFlags = description.mFormatFlags | kLinearPCMFormatFlagIsFloat & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1084 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1085 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1086 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1087 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1088 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1089 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1090 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1091 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1092 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1093 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1094 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1096 bool setPhysicalFormat = false;
\r
1097 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1098 testDescription = description;
\r
1099 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1100 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1101 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1102 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1104 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1105 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1106 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1107 if ( result == noErr ) {
\r
1108 setPhysicalFormat = true;
\r
1109 //std::cout << "Updated physical stream format:" << std::endl;
\r
1110 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1111 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1112 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1113 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1118 if ( !setPhysicalFormat ) {
\r
1119 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1120 errorText_ = errorStream_.str();
\r
1123 } // done setting virtual/physical formats.
\r
1125 // Get the stream / device latency.
\r
1127 dataSize = sizeof( UInt32 );
\r
1128 property.mSelector = kAudioDevicePropertyLatency;
\r
1129 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1130 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1131 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1133 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1134 errorText_ = errorStream_.str();
\r
1135 error( RtError::WARNING );
\r
1139 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1140 // always be presented in native-endian format, so we should never
\r
1141 // need to byte swap.
\r
1142 stream_.doByteSwap[mode] = false;
\r
1144 // From the CoreAudio documentation, PCM data must be supplied as
\r
1146 stream_.userFormat = format;
\r
1147 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1149 if ( streamCount == 1 )
\r
1150 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1151 else // multiple streams
\r
1152 stream_.nDeviceChannels[mode] = channels;
\r
1153 stream_.nUserChannels[mode] = channels;
\r
1154 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1155 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1156 else stream_.userInterleaved = true;
\r
1157 stream_.deviceInterleaved[mode] = true;
\r
1158 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1160 // Set flags for buffer conversion.
\r
1161 stream_.doConvertBuffer[mode] = false;
\r
1162 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1163 stream_.doConvertBuffer[mode] = true;
\r
1164 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1165 stream_.doConvertBuffer[mode] = true;
\r
1166 if ( streamCount == 1 ) {
\r
1167 if ( stream_.nUserChannels[mode] > 1 &&
\r
1168 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1169 stream_.doConvertBuffer[mode] = true;
\r
1171 else if ( monoMode && stream_.userInterleaved )
\r
1172 stream_.doConvertBuffer[mode] = true;
\r
1174 // Allocate our CoreHandle structure for the stream.
\r
1175 CoreHandle *handle = 0;
\r
1176 if ( stream_.apiHandle == 0 ) {
\r
1178 handle = new CoreHandle;
\r
1180 catch ( std::bad_alloc& ) {
\r
1181 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1185 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1186 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1189 stream_.apiHandle = (void *) handle;
\r
1192 handle = (CoreHandle *) stream_.apiHandle;
\r
1193 handle->iStream[mode] = firstStream;
\r
1194 handle->nStreams[mode] = streamCount;
\r
1195 handle->id[mode] = id;
\r
1197 // Allocate necessary internal buffers.
\r
1198 unsigned long bufferBytes;
\r
1199 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1200 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1201 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1202 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1203 if ( stream_.userBuffer[mode] == NULL ) {
\r
1204 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1208 // If possible, we will make use of the CoreAudio stream buffers as
\r
1209 // "device buffers". However, we can't do this if using multiple
\r
1211 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1213 bool makeBuffer = true;
\r
1214 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1215 if ( mode == INPUT ) {
\r
1216 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1217 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1218 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1222 if ( makeBuffer ) {
\r
1223 bufferBytes *= *bufferSize;
\r
1224 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1225 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1226 if ( stream_.deviceBuffer == NULL ) {
\r
1227 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1233 stream_.sampleRate = sampleRate;
\r
1234 stream_.device[mode] = device;
\r
1235 stream_.state = STREAM_STOPPED;
\r
1236 stream_.callbackInfo.object = (void *) this;
\r
1238 // Setup the buffer conversion information structure.
\r
1239 if ( stream_.doConvertBuffer[mode] ) {
\r
1240 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1241 else setConvertInfo( mode, channelOffset );
\r
1244 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1245 // Only one callback procedure per device.
\r
1246 stream_.mode = DUPLEX;
\r
1248 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1249 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1251 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1252 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1254 if ( result != noErr ) {
\r
1255 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1256 errorText_ = errorStream_.str();
\r
1259 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1260 stream_.mode = DUPLEX;
\r
1262 stream_.mode = mode;
\r
1265 // Setup the device property listener for over/underload.
\r
1266 property.mSelector = kAudioDeviceProcessorOverload;
\r
1267 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1273 pthread_cond_destroy( &handle->condition );
\r
1275 stream_.apiHandle = 0;
\r
1278 for ( int i=0; i<2; i++ ) {
\r
1279 if ( stream_.userBuffer[i] ) {
\r
1280 free( stream_.userBuffer[i] );
\r
1281 stream_.userBuffer[i] = 0;
\r
1285 if ( stream_.deviceBuffer ) {
\r
1286 free( stream_.deviceBuffer );
\r
1287 stream_.deviceBuffer = 0;
\r
1293 void RtApiCore :: closeStream( void )
\r
1295 if ( stream_.state == STREAM_CLOSED ) {
\r
1296 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1297 error( RtError::WARNING );
\r
1301 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1302 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1303 if ( stream_.state == STREAM_RUNNING )
\r
1304 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1305 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1306 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1308 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1309 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1313 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1314 if ( stream_.state == STREAM_RUNNING )
\r
1315 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1316 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1317 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1319 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1320 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1324 for ( int i=0; i<2; i++ ) {
\r
1325 if ( stream_.userBuffer[i] ) {
\r
1326 free( stream_.userBuffer[i] );
\r
1327 stream_.userBuffer[i] = 0;
\r
1331 if ( stream_.deviceBuffer ) {
\r
1332 free( stream_.deviceBuffer );
\r
1333 stream_.deviceBuffer = 0;
\r
1336 // Destroy pthread condition variable.
\r
1337 pthread_cond_destroy( &handle->condition );
\r
1339 stream_.apiHandle = 0;
\r
1341 stream_.mode = UNINITIALIZED;
\r
1342 stream_.state = STREAM_CLOSED;
\r
1345 void RtApiCore :: startStream( void )
\r
1348 if ( stream_.state == STREAM_RUNNING ) {
\r
1349 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1350 error( RtError::WARNING );
\r
1354 MUTEX_LOCK( &stream_.mutex );
\r
1356 OSStatus result = noErr;
\r
1357 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1358 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1360 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1361 if ( result != noErr ) {
\r
1362 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1363 errorText_ = errorStream_.str();
\r
1368 if ( stream_.mode == INPUT ||
\r
1369 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1371 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1372 if ( result != noErr ) {
\r
1373 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1374 errorText_ = errorStream_.str();
\r
1379 handle->drainCounter = 0;
\r
1380 handle->internalDrain = false;
\r
1381 stream_.state = STREAM_RUNNING;
\r
1384 MUTEX_UNLOCK( &stream_.mutex );
\r
1386 if ( result == noErr ) return;
\r
1387 error( RtError::SYSTEM_ERROR );
\r
1390 void RtApiCore :: stopStream( void )
\r
1393 if ( stream_.state == STREAM_STOPPED ) {
\r
1394 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1395 error( RtError::WARNING );
\r
1399 MUTEX_LOCK( &stream_.mutex );
\r
1401 if ( stream_.state == STREAM_STOPPED ) {
\r
1402 MUTEX_UNLOCK( &stream_.mutex );
\r
1406 OSStatus result = noErr;
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1410 if ( handle->drainCounter == 0 ) {
\r
1411 handle->drainCounter = 2;
\r
1412 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1415 MUTEX_UNLOCK( &stream_.mutex );
\r
1416 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1417 MUTEX_LOCK( &stream_.mutex );
\r
1418 if ( result != noErr ) {
\r
1419 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1420 errorText_ = errorStream_.str();
\r
1425 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1427 MUTEX_UNLOCK( &stream_.mutex );
\r
1428 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1429 MUTEX_LOCK( &stream_.mutex );
\r
1430 if ( result != noErr ) {
\r
1431 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1432 errorText_ = errorStream_.str();
\r
1437 stream_.state = STREAM_STOPPED;
\r
1440 MUTEX_UNLOCK( &stream_.mutex );
\r
1442 if ( result == noErr ) return;
\r
1443 error( RtError::SYSTEM_ERROR );
\r
1446 void RtApiCore :: abortStream( void )
\r
1449 if ( stream_.state == STREAM_STOPPED ) {
\r
1450 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1451 error( RtError::WARNING );
\r
1455 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1456 handle->drainCounter = 2;
\r
1461 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1462 const AudioBufferList *inBufferList,
\r
1463 const AudioBufferList *outBufferList )
\r
1465 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
1466 if ( stream_.state == STREAM_CLOSED ) {
\r
1467 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1468 error( RtError::WARNING );
\r
1472 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1473 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1475 // Check if we were draining the stream and signal is finished.
\r
1476 if ( handle->drainCounter > 3 ) {
\r
1477 if ( handle->internalDrain == true )
\r
1479 else // external call to stopStream()
\r
1480 pthread_cond_signal( &handle->condition );
\r
1484 MUTEX_LOCK( &stream_.mutex );
\r
1486 // The state might change while waiting on a mutex.
\r
1487 if ( stream_.state == STREAM_STOPPED ) {
\r
1488 MUTEX_UNLOCK( &stream_.mutex );
\r
1492 AudioDeviceID outputDevice = handle->id[0];
\r
1494 // Invoke user callback to get fresh output data UNLESS we are
\r
1495 // draining stream or duplex mode AND the input/output devices are
\r
1496 // different AND this function is called for the input device.
\r
1497 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1498 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1499 double streamTime = getStreamTime();
\r
1500 RtAudioStreamStatus status = 0;
\r
1501 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1502 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1503 handle->xrun[0] = false;
\r
1505 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1506 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1507 handle->xrun[1] = false;
\r
1510 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1511 stream_.bufferSize, streamTime, status, info->userData );
\r
1512 if ( handle->drainCounter == 2 ) {
\r
1513 MUTEX_UNLOCK( &stream_.mutex );
\r
1517 else if ( handle->drainCounter == 1 )
\r
1518 handle->internalDrain = true;
\r
1521 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1523 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1525 if ( handle->nStreams[0] == 1 ) {
\r
1526 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1528 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1530 else { // fill multiple streams with zeros
\r
1531 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1532 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1534 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1538 else if ( handle->nStreams[0] == 1 ) {
\r
1539 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1540 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1541 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1543 else { // copy from user buffer
\r
1544 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1545 stream_.userBuffer[0],
\r
1546 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1549 else { // fill multiple streams
\r
1550 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1551 if ( stream_.doConvertBuffer[0] ) {
\r
1552 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1553 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1556 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1557 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1558 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1559 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1560 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1563 else { // fill multiple multi-channel streams with interleaved data
\r
1564 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1565 Float32 *out, *in;
\r
1567 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1568 UInt32 inChannels = stream_.nUserChannels[0];
\r
1569 if ( stream_.doConvertBuffer[0] ) {
\r
1570 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1571 inChannels = stream_.nDeviceChannels[0];
\r
1574 if ( inInterleaved ) inOffset = 1;
\r
1575 else inOffset = stream_.bufferSize;
\r
1577 channelsLeft = inChannels;
\r
1578 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1580 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1581 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1584 // Account for possible channel offset in first stream
\r
1585 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1586 streamChannels -= stream_.channelOffset[0];
\r
1587 outJump = stream_.channelOffset[0];
\r
1591 // Account for possible unfilled channels at end of the last stream
\r
1592 if ( streamChannels > channelsLeft ) {
\r
1593 outJump = streamChannels - channelsLeft;
\r
1594 streamChannels = channelsLeft;
\r
1597 // Determine input buffer offsets and skips
\r
1598 if ( inInterleaved ) {
\r
1599 inJump = inChannels;
\r
1600 in += inChannels - channelsLeft;
\r
1604 in += (inChannels - channelsLeft) * inOffset;
\r
1607 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1608 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1609 *out++ = in[j*inOffset];
\r
1614 channelsLeft -= streamChannels;
\r
1619 if ( handle->drainCounter ) {
\r
1620 handle->drainCounter++;
\r
1625 AudioDeviceID inputDevice;
\r
1626 inputDevice = handle->id[1];
\r
1627 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1629 if ( handle->nStreams[1] == 1 ) {
\r
1630 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1631 convertBuffer( stream_.userBuffer[1],
\r
1632 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1633 stream_.convertInfo[1] );
\r
1635 else { // copy to user buffer
\r
1636 memcpy( stream_.userBuffer[1],
\r
1637 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1638 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1641 else { // read from multiple streams
\r
1642 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1643 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1645 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1646 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1647 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1648 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1649 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1652 else { // read from multiple multi-channel streams
\r
1653 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1654 Float32 *out, *in;
\r
1656 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1657 UInt32 outChannels = stream_.nUserChannels[1];
\r
1658 if ( stream_.doConvertBuffer[1] ) {
\r
1659 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1660 outChannels = stream_.nDeviceChannels[1];
\r
1663 if ( outInterleaved ) outOffset = 1;
\r
1664 else outOffset = stream_.bufferSize;
\r
1666 channelsLeft = outChannels;
\r
1667 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1669 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1670 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1673 // Account for possible channel offset in first stream
\r
1674 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1675 streamChannels -= stream_.channelOffset[1];
\r
1676 inJump = stream_.channelOffset[1];
\r
1680 // Account for possible unread channels at end of the last stream
\r
1681 if ( streamChannels > channelsLeft ) {
\r
1682 inJump = streamChannels - channelsLeft;
\r
1683 streamChannels = channelsLeft;
\r
1686 // Determine output buffer offsets and skips
\r
1687 if ( outInterleaved ) {
\r
1688 outJump = outChannels;
\r
1689 out += outChannels - channelsLeft;
\r
1693 out += (outChannels - channelsLeft) * outOffset;
\r
1696 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1697 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1698 out[j*outOffset] = *in++;
\r
1703 channelsLeft -= streamChannels;
\r
1707 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1708 convertBuffer( stream_.userBuffer[1],
\r
1709 stream_.deviceBuffer,
\r
1710 stream_.convertInfo[1] );
\r
1716 MUTEX_UNLOCK( &stream_.mutex );
\r
1718 RtApi::tickStreamTime();
\r
1722 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1726 case kAudioHardwareNotRunningError:
\r
1727 return "kAudioHardwareNotRunningError";
\r
1729 case kAudioHardwareUnspecifiedError:
\r
1730 return "kAudioHardwareUnspecifiedError";
\r
1732 case kAudioHardwareUnknownPropertyError:
\r
1733 return "kAudioHardwareUnknownPropertyError";
\r
1735 case kAudioHardwareBadPropertySizeError:
\r
1736 return "kAudioHardwareBadPropertySizeError";
\r
1738 case kAudioHardwareIllegalOperationError:
\r
1739 return "kAudioHardwareIllegalOperationError";
\r
1741 case kAudioHardwareBadObjectError:
\r
1742 return "kAudioHardwareBadObjectError";
\r
1744 case kAudioHardwareBadDeviceError:
\r
1745 return "kAudioHardwareBadDeviceError";
\r
1747 case kAudioHardwareBadStreamError:
\r
1748 return "kAudioHardwareBadStreamError";
\r
1750 case kAudioHardwareUnsupportedOperationError:
\r
1751 return "kAudioHardwareUnsupportedOperationError";
\r
1753 case kAudioDeviceUnsupportedFormatError:
\r
1754 return "kAudioDeviceUnsupportedFormatError";
\r
1756 case kAudioDevicePermissionsError:
\r
1757 return "kAudioDevicePermissionsError";
\r
1760 return "CoreAudio unknown error";
\r
1764 //******************** End of __MACOSX_CORE__ *********************//
\r
1767 #if defined(__UNIX_JACK__)
\r
1769 // JACK is a low-latency audio server, originally written for the
\r
1770 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1771 // connect a number of different applications to an audio device, as
\r
1772 // well as allowing them to share audio between themselves.
\r
1774 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1775 // have ports connected to the server. The JACK server is typically
\r
1776 // started in a terminal as follows:
\r
1778 // .jackd -d alsa -d hw:0
\r
1780 // or through an interface program such as qjackctl. Many of the
\r
1781 // parameters normally set for a stream are fixed by the JACK server
\r
1782 // and can be specified when the JACK server is started. In
\r
1785 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1787 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1788 // frames, and number of buffers = 4. Once the server is running, it
\r
1789 // is not possible to override these values. If the values are not
\r
1790 // specified in the command-line, the JACK server uses default values.
\r
1792 // The JACK server does not have to be running when an instance of
\r
1793 // RtApiJack is created, though the function getDeviceCount() will
\r
1794 // report 0 devices found until JACK has been started. When no
\r
1795 // devices are available (i.e., the JACK server is not running), a
\r
1796 // stream cannot be opened.
\r
1798 #include <jack/jack.h>
\r
1799 #include <unistd.h>
\r
1802 // A structure to hold various information related to the Jack API
\r
1803 // implementation.
\r
1804 struct JackHandle {
\r
1805 jack_client_t *client;
\r
1806 jack_port_t **ports[2];
\r
1807 std::string deviceName[2];
\r
1809 pthread_cond_t condition;
\r
1810 int drainCounter; // Tracks callback counts when draining
\r
1811 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1814 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1817 ThreadHandle threadId;
\r
1818 void jackSilentError( const char * ) {};
\r
1820 RtApiJack :: RtApiJack()
\r
1822 // Nothing to do here.
\r
1823 #if !defined(__RTAUDIO_DEBUG__)
\r
1824 // Turn off Jack's internal error reporting.
\r
1825 jack_set_error_function( &jackSilentError );
\r
1829 RtApiJack :: ~RtApiJack()
\r
1831 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1834 unsigned int RtApiJack :: getDeviceCount( void )
\r
1836 // See if we can become a jack client.
\r
1837 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1838 jack_status_t *status = NULL;
\r
1839 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1840 if ( client == 0 ) return 0;
\r
1842 const char **ports;
\r
1843 std::string port, previousPort;
\r
1844 unsigned int nChannels = 0, nDevices = 0;
\r
1845 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1847 // Parse the port names up to the first colon (:).
\r
1848 size_t iColon = 0;
\r
1850 port = (char *) ports[ nChannels ];
\r
1851 iColon = port.find(":");
\r
1852 if ( iColon != std::string::npos ) {
\r
1853 port = port.substr( 0, iColon + 1 );
\r
1854 if ( port != previousPort ) {
\r
1856 previousPort = port;
\r
1859 } while ( ports[++nChannels] );
\r
1863 jack_client_close( client );
\r
1867 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1869 RtAudio::DeviceInfo info;
\r
1870 info.probed = false;
\r
1872 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1873 jack_status_t *status = NULL;
\r
1874 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1875 if ( client == 0 ) {
\r
1876 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1877 error( RtError::WARNING );
\r
1881 const char **ports;
\r
1882 std::string port, previousPort;
\r
1883 unsigned int nPorts = 0, nDevices = 0;
\r
1884 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1886 // Parse the port names up to the first colon (:).
\r
1887 size_t iColon = 0;
\r
1889 port = (char *) ports[ nPorts ];
\r
1890 iColon = port.find(":");
\r
1891 if ( iColon != std::string::npos ) {
\r
1892 port = port.substr( 0, iColon );
\r
1893 if ( port != previousPort ) {
\r
1894 if ( nDevices == device ) info.name = port;
\r
1896 previousPort = port;
\r
1899 } while ( ports[++nPorts] );
\r
1903 if ( device >= nDevices ) {
\r
1904 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1905 error( RtError::INVALID_USE );
\r
1908 // Get the current jack server sample rate.
\r
1909 info.sampleRates.clear();
\r
1910 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1912 // Count the available ports containing the client name as device
\r
1913 // channels. Jack "input ports" equal RtAudio output channels.
\r
1914 unsigned int nChannels = 0;
\r
1915 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1917 while ( ports[ nChannels ] ) nChannels++;
\r
1919 info.outputChannels = nChannels;
\r
1922 // Jack "output ports" equal RtAudio input channels.
\r
1924 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1926 while ( ports[ nChannels ] ) nChannels++;
\r
1928 info.inputChannels = nChannels;
\r
1931 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1932 jack_client_close(client);
\r
1933 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1934 error( RtError::WARNING );
\r
1938 // If device opens for both playback and capture, we determine the channels.
\r
1939 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1940 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1942 // Jack always uses 32-bit floats.
\r
1943 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1945 // Jack doesn't provide default devices so we'll use the first available one.
\r
1946 if ( device == 0 && info.outputChannels > 0 )
\r
1947 info.isDefaultOutput = true;
\r
1948 if ( device == 0 && info.inputChannels > 0 )
\r
1949 info.isDefaultInput = true;
\r
1951 jack_client_close(client);
\r
1952 info.probed = true;
\r
1956 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1958 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1960 RtApiJack *object = (RtApiJack *) info->object;
\r
1961 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1966 // This function will be called by a spawned thread when the Jack
\r
1967 // server signals that it is shutting down. It is necessary to handle
\r
1968 // it this way because the jackShutdown() function must return before
\r
1969 // the jack_deactivate() function (in closeStream()) will return.
\r
1970 extern "C" void *jackCloseStream( void *ptr )
\r
1972 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1973 RtApiJack *object = (RtApiJack *) info->object;
\r
1975 object->closeStream();
\r
1977 pthread_exit( NULL );
\r
1979 void jackShutdown( void *infoPointer )
\r
1981 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1982 RtApiJack *object = (RtApiJack *) info->object;
\r
1984 // Check current stream state. If stopped, then we'll assume this
\r
1985 // was called as a result of a call to RtApiJack::stopStream (the
\r
1986 // deactivation of a client handle causes this function to be called).
\r
1987 // If not, we'll assume the Jack server is shutting down or some
\r
1988 // other problem occurred and we should close the stream.
\r
1989 if ( object->isStreamRunning() == false ) return;
\r
1991 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
1992 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
1995 int jackXrun( void *infoPointer )
\r
1997 JackHandle *handle = (JackHandle *) infoPointer;
\r
1999 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2000 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2005 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2006 unsigned int firstChannel, unsigned int sampleRate,
\r
2007 RtAudioFormat format, unsigned int *bufferSize,
\r
2008 RtAudio::StreamOptions *options )
\r
2010 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2012 // Look for jack server and try to become a client (only do once per stream).
\r
2013 jack_client_t *client = 0;
\r
2014 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2015 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2016 jack_status_t *status = NULL;
\r
2017 if ( options && !options->streamName.empty() )
\r
2018 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2020 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2021 if ( client == 0 ) {
\r
2022 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2023 error( RtError::WARNING );
\r
2028 // The handle must have been created on an earlier pass.
\r
2029 client = handle->client;
\r
2032 const char **ports;
\r
2033 std::string port, previousPort, deviceName;
\r
2034 unsigned int nPorts = 0, nDevices = 0;
\r
2035 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2037 // Parse the port names up to the first colon (:).
\r
2038 size_t iColon = 0;
\r
2040 port = (char *) ports[ nPorts ];
\r
2041 iColon = port.find(":");
\r
2042 if ( iColon != std::string::npos ) {
\r
2043 port = port.substr( 0, iColon );
\r
2044 if ( port != previousPort ) {
\r
2045 if ( nDevices == device ) deviceName = port;
\r
2047 previousPort = port;
\r
2050 } while ( ports[++nPorts] );
\r
2054 if ( device >= nDevices ) {
\r
2055 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2059 // Count the available ports containing the client name as device
\r
2060 // channels. Jack "input ports" equal RtAudio output channels.
\r
2061 unsigned int nChannels = 0;
\r
2062 unsigned long flag = JackPortIsInput;
\r
2063 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2064 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2066 while ( ports[ nChannels ] ) nChannels++;
\r
2070 // Compare the jack ports for specified client to the requested number of channels.
\r
2071 if ( nChannels < (channels + firstChannel) ) {
\r
2072 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2073 errorText_ = errorStream_.str();
\r
2077 // Check the jack server sample rate.
\r
2078 unsigned int jackRate = jack_get_sample_rate( client );
\r
2079 if ( sampleRate != jackRate ) {
\r
2080 jack_client_close( client );
\r
2081 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2082 errorText_ = errorStream_.str();
\r
2085 stream_.sampleRate = jackRate;
\r
2087 // Get the latency of the JACK port.
\r
2088 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2089 if ( ports[ firstChannel ] )
\r
2090 stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2093 // The jack server always uses 32-bit floating-point data.
\r
2094 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2095 stream_.userFormat = format;
\r
2097 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2098 else stream_.userInterleaved = true;
\r
2100 // Jack always uses non-interleaved buffers.
\r
2101 stream_.deviceInterleaved[mode] = false;
\r
2103 // Jack always provides host byte-ordered data.
\r
2104 stream_.doByteSwap[mode] = false;
\r
2106 // Get the buffer size. The buffer size and number of buffers
\r
2107 // (periods) is set when the jack server is started.
\r
2108 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2109 *bufferSize = stream_.bufferSize;
\r
2111 stream_.nDeviceChannels[mode] = channels;
\r
2112 stream_.nUserChannels[mode] = channels;
\r
2114 // Set flags for buffer conversion.
\r
2115 stream_.doConvertBuffer[mode] = false;
\r
2116 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2117 stream_.doConvertBuffer[mode] = true;
\r
2118 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2119 stream_.nUserChannels[mode] > 1 )
\r
2120 stream_.doConvertBuffer[mode] = true;
\r
2122 // Allocate our JackHandle structure for the stream.
\r
2123 if ( handle == 0 ) {
\r
2125 handle = new JackHandle;
\r
2127 catch ( std::bad_alloc& ) {
\r
2128 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2132 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2133 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2136 stream_.apiHandle = (void *) handle;
\r
2137 handle->client = client;
\r
2139 handle->deviceName[mode] = deviceName;
\r
2141 // Allocate necessary internal buffers.
\r
2142 unsigned long bufferBytes;
\r
2143 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2144 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2145 if ( stream_.userBuffer[mode] == NULL ) {
\r
2146 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2150 if ( stream_.doConvertBuffer[mode] ) {
\r
2152 bool makeBuffer = true;
\r
2153 if ( mode == OUTPUT )
\r
2154 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2155 else { // mode == INPUT
\r
2156 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2157 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2158 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2159 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2163 if ( makeBuffer ) {
\r
2164 bufferBytes *= *bufferSize;
\r
2165 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2166 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2167 if ( stream_.deviceBuffer == NULL ) {
\r
2168 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2174 // Allocate memory for the Jack ports (channels) identifiers.
\r
2175 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2176 if ( handle->ports[mode] == NULL ) {
\r
2177 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2181 stream_.device[mode] = device;
\r
2182 stream_.channelOffset[mode] = firstChannel;
\r
2183 stream_.state = STREAM_STOPPED;
\r
2184 stream_.callbackInfo.object = (void *) this;
\r
2186 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2187 // We had already set up the stream for output.
\r
2188 stream_.mode = DUPLEX;
\r
2190 stream_.mode = mode;
\r
2191 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2192 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2193 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2196 // Register our ports.
\r
2198 if ( mode == OUTPUT ) {
\r
2199 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2200 snprintf( label, 64, "outport %d", i );
\r
2201 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2202 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2206 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2207 snprintf( label, 64, "inport %d", i );
\r
2208 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2209 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2213 // Setup the buffer conversion information structure. We don't use
\r
2214 // buffers to do channel offsets, so we override that parameter
\r
2216 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2222 pthread_cond_destroy( &handle->condition );
\r
2223 jack_client_close( handle->client );
\r
2225 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2226 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2229 stream_.apiHandle = 0;
\r
2232 for ( int i=0; i<2; i++ ) {
\r
2233 if ( stream_.userBuffer[i] ) {
\r
2234 free( stream_.userBuffer[i] );
\r
2235 stream_.userBuffer[i] = 0;
\r
2239 if ( stream_.deviceBuffer ) {
\r
2240 free( stream_.deviceBuffer );
\r
2241 stream_.deviceBuffer = 0;
\r
2247 void RtApiJack :: closeStream( void )
\r
2249 if ( stream_.state == STREAM_CLOSED ) {
\r
2250 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2251 error( RtError::WARNING );
\r
2255 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2258 if ( stream_.state == STREAM_RUNNING )
\r
2259 jack_deactivate( handle->client );
\r
2261 jack_client_close( handle->client );
\r
2265 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2266 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2267 pthread_cond_destroy( &handle->condition );
\r
2269 stream_.apiHandle = 0;
\r
2272 for ( int i=0; i<2; i++ ) {
\r
2273 if ( stream_.userBuffer[i] ) {
\r
2274 free( stream_.userBuffer[i] );
\r
2275 stream_.userBuffer[i] = 0;
\r
2279 if ( stream_.deviceBuffer ) {
\r
2280 free( stream_.deviceBuffer );
\r
2281 stream_.deviceBuffer = 0;
\r
2284 stream_.mode = UNINITIALIZED;
\r
2285 stream_.state = STREAM_CLOSED;
\r
2288 void RtApiJack :: startStream( void )
\r
2291 if ( stream_.state == STREAM_RUNNING ) {
\r
2292 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2293 error( RtError::WARNING );
\r
2297 MUTEX_LOCK(&stream_.mutex);
\r
2299 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2300 int result = jack_activate( handle->client );
\r
2302 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2306 const char **ports;
\r
2308 // Get the list of available ports.
\r
2309 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2311 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2312 if ( ports == NULL) {
\r
2313 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2317 // Now make the port connections. Since RtAudio wasn't designed to
\r
2318 // allow the user to select particular channels of a device, we'll
\r
2319 // just open the first "nChannels" ports with offset.
\r
2320 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2322 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2323 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2326 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2333 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2335 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2336 if ( ports == NULL) {
\r
2337 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2341 // Now make the port connections. See note above.
\r
2342 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2344 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2345 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2348 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2355 handle->drainCounter = 0;
\r
2356 handle->internalDrain = false;
\r
2357 stream_.state = STREAM_RUNNING;
\r
2360 MUTEX_UNLOCK(&stream_.mutex);
\r
2362 if ( result == 0 ) return;
\r
2363 error( RtError::SYSTEM_ERROR );
\r
2366 void RtApiJack :: stopStream( void )
\r
2369 if ( stream_.state == STREAM_STOPPED ) {
\r
2370 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2371 error( RtError::WARNING );
\r
2375 MUTEX_LOCK( &stream_.mutex );
\r
2377 if ( stream_.state == STREAM_STOPPED ) {
\r
2378 MUTEX_UNLOCK( &stream_.mutex );
\r
2382 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2383 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2385 if ( handle->drainCounter == 0 ) {
\r
2386 handle->drainCounter = 2;
\r
2387 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2391 jack_deactivate( handle->client );
\r
2392 stream_.state = STREAM_STOPPED;
\r
2394 MUTEX_UNLOCK( &stream_.mutex );
\r
2397 void RtApiJack :: abortStream( void )
\r
2400 if ( stream_.state == STREAM_STOPPED ) {
\r
2401 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2402 error( RtError::WARNING );
\r
2406 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2407 handle->drainCounter = 2;
\r
2412 // This function will be called by a spawned thread when the user
\r
2413 // callback function signals that the stream should be stopped or
\r
2414 // aborted. It is necessary to handle it this way because the
\r
2415 // callbackEvent() function must return before the jack_deactivate()
\r
2416 // function will return.
\r
2417 extern "C" void *jackStopStream( void *ptr )
\r
2419 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2420 RtApiJack *object = (RtApiJack *) info->object;
\r
2422 object->stopStream();
\r
2424 pthread_exit( NULL );
\r
2427 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2429 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
2430 if ( stream_.state == STREAM_CLOSED ) {
\r
2431 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2432 error( RtError::WARNING );
\r
2435 if ( stream_.bufferSize != nframes ) {
\r
2436 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2437 error( RtError::WARNING );
\r
2441 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2442 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2444 // Check if we were draining the stream and signal is finished.
\r
2445 if ( handle->drainCounter > 3 ) {
\r
2446 if ( handle->internalDrain == true )
\r
2447 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2449 pthread_cond_signal( &handle->condition );
\r
2453 MUTEX_LOCK( &stream_.mutex );
\r
2455 // The state might change while waiting on a mutex.
\r
2456 if ( stream_.state == STREAM_STOPPED ) {
\r
2457 MUTEX_UNLOCK( &stream_.mutex );
\r
2461 // Invoke user callback first, to get fresh output data.
\r
2462 if ( handle->drainCounter == 0 ) {
\r
2463 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2464 double streamTime = getStreamTime();
\r
2465 RtAudioStreamStatus status = 0;
\r
2466 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2467 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2468 handle->xrun[0] = false;
\r
2470 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2471 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2472 handle->xrun[1] = false;
\r
2474 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2475 stream_.bufferSize, streamTime, status, info->userData );
\r
2476 if ( handle->drainCounter == 2 ) {
\r
2477 MUTEX_UNLOCK( &stream_.mutex );
\r
2479 pthread_create( &id, NULL, jackStopStream, info );
\r
2482 else if ( handle->drainCounter == 1 )
\r
2483 handle->internalDrain = true;
\r
2486 jack_default_audio_sample_t *jackbuffer;
\r
2487 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2490 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2492 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2493 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2494 memset( jackbuffer, 0, bufferBytes );
\r
2498 else if ( stream_.doConvertBuffer[0] ) {
\r
2500 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2502 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2503 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2504 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2507 else { // no buffer conversion
\r
2508 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2509 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2510 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2514 if ( handle->drainCounter ) {
\r
2515 handle->drainCounter++;
\r
2520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2522 if ( stream_.doConvertBuffer[1] ) {
\r
2523 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2524 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2525 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2527 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2529 else { // no buffer conversion
\r
2530 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2531 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2532 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2538 MUTEX_UNLOCK(&stream_.mutex);
\r
2540 RtApi::tickStreamTime();
\r
2543 //******************** End of __UNIX_JACK__ *********************//
\r
2546 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2548 // The ASIO API is designed around a callback scheme, so this
\r
2549 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2550 // Jack. The primary constraint with ASIO is that it only allows
\r
2551 // access to a single driver at a time. Thus, it is not possible to
\r
2552 // have more than one simultaneous RtAudio stream.
\r
2554 // This implementation also requires a number of external ASIO files
\r
2555 // and a few global variables. The ASIO callback scheme does not
\r
2556 // allow for the passing of user data, so we must create a global
\r
2557 // pointer to our callbackInfo structure.
\r
2559 // On unix systems, we make use of a pthread condition variable.
\r
2560 // Since there is no equivalent in Windows, I hacked something based
\r
2561 // on information found in
\r
2562 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2564 #include "asiosys.h"
\r
2566 #include "iasiothiscallresolver.h"
\r
2567 #include "asiodrivers.h"
\r
2570 AsioDrivers drivers;
\r
2571 ASIOCallbacks asioCallbacks;
\r
2572 ASIODriverInfo driverInfo;
\r
2573 CallbackInfo *asioCallbackInfo;
\r
2576 struct AsioHandle {
\r
2577 int drainCounter; // Tracks callback counts when draining
\r
2578 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2579 ASIOBufferInfo *bufferInfos;
\r
2583 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2586 // Function declarations (definitions at end of section)
\r
2587 static const char* getAsioErrorString( ASIOError result );
\r
2588 void sampleRateChanged( ASIOSampleRate sRate );
\r
2589 long asioMessages( long selector, long value, void* message, double* opt );
\r
2591 RtApiAsio :: RtApiAsio()
\r
2593 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2594 // CoInitialize beforehand, but it must be for appartment threading
\r
2595 // (in which case, CoInitilialize will return S_FALSE here).
\r
2596 coInitialized_ = false;
\r
2597 HRESULT hr = CoInitialize( NULL );
\r
2598 if ( FAILED(hr) ) {
\r
2599 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2600 error( RtError::WARNING );
\r
2602 coInitialized_ = true;
\r
2604 drivers.removeCurrentDriver();
\r
2605 driverInfo.asioVersion = 2;
\r
2607 // See note in DirectSound implementation about GetDesktopWindow().
\r
2608 driverInfo.sysRef = GetForegroundWindow();
\r
2611 RtApiAsio :: ~RtApiAsio()
\r
2613 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2614 if ( coInitialized_ ) CoUninitialize();
\r
2617 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2619 return (unsigned int) drivers.asioGetNumDev();
\r
2622 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2624 RtAudio::DeviceInfo info;
\r
2625 info.probed = false;
\r
2628 unsigned int nDevices = getDeviceCount();
\r
2629 if ( nDevices == 0 ) {
\r
2630 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2631 error( RtError::INVALID_USE );
\r
2634 if ( device >= nDevices ) {
\r
2635 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2636 error( RtError::INVALID_USE );
\r
2639 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2640 if ( stream_.state != STREAM_CLOSED ) {
\r
2641 if ( device >= devices_.size() ) {
\r
2642 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2643 error( RtError::WARNING );
\r
2646 return devices_[ device ];
\r
2649 char driverName[32];
\r
2650 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2651 if ( result != ASE_OK ) {
\r
2652 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2653 errorText_ = errorStream_.str();
\r
2654 error( RtError::WARNING );
\r
2658 info.name = driverName;
\r
2660 if ( !drivers.loadDriver( driverName ) ) {
\r
2661 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2662 errorText_ = errorStream_.str();
\r
2663 error( RtError::WARNING );
\r
2667 result = ASIOInit( &driverInfo );
\r
2668 if ( result != ASE_OK ) {
\r
2669 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2670 errorText_ = errorStream_.str();
\r
2671 error( RtError::WARNING );
\r
2675 // Determine the device channel information.
\r
2676 long inputChannels, outputChannels;
\r
2677 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2678 if ( result != ASE_OK ) {
\r
2679 drivers.removeCurrentDriver();
\r
2680 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2681 errorText_ = errorStream_.str();
\r
2682 error( RtError::WARNING );
\r
2686 info.outputChannels = outputChannels;
\r
2687 info.inputChannels = inputChannels;
\r
2688 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2689 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2691 // Determine the supported sample rates.
\r
2692 info.sampleRates.clear();
\r
2693 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2694 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2695 if ( result == ASE_OK )
\r
2696 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2699 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2700 ASIOChannelInfo channelInfo;
\r
2701 channelInfo.channel = 0;
\r
2702 channelInfo.isInput = true;
\r
2703 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2704 result = ASIOGetChannelInfo( &channelInfo );
\r
2705 if ( result != ASE_OK ) {
\r
2706 drivers.removeCurrentDriver();
\r
2707 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2708 errorText_ = errorStream_.str();
\r
2709 error( RtError::WARNING );
\r
2713 info.nativeFormats = 0;
\r
2714 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2715 info.nativeFormats |= RTAUDIO_SINT16;
\r
2716 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2717 info.nativeFormats |= RTAUDIO_SINT32;
\r
2718 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2719 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2720 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2721 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2723 if ( info.outputChannels > 0 )
\r
2724 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2725 if ( info.inputChannels > 0 )
\r
2726 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2728 info.probed = true;
\r
2729 drivers.removeCurrentDriver();
\r
2733 void bufferSwitch( long index, ASIOBool processNow )
\r
2735 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2736 object->callbackEvent( index );
\r
2739 void RtApiAsio :: saveDeviceInfo( void )
\r
2743 unsigned int nDevices = getDeviceCount();
\r
2744 devices_.resize( nDevices );
\r
2745 for ( unsigned int i=0; i<nDevices; i++ )
\r
2746 devices_[i] = getDeviceInfo( i );
\r
2749 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2750 unsigned int firstChannel, unsigned int sampleRate,
\r
2751 RtAudioFormat format, unsigned int *bufferSize,
\r
2752 RtAudio::StreamOptions *options )
\r
2754 // For ASIO, a duplex stream MUST use the same driver.
\r
2755 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2756 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2760 char driverName[32];
\r
2761 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2762 if ( result != ASE_OK ) {
\r
2763 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2764 errorText_ = errorStream_.str();
\r
2768 // Only load the driver once for duplex stream.
\r
2769 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2770 // The getDeviceInfo() function will not work when a stream is open
\r
2771 // because ASIO does not allow multiple devices to run at the same
\r
2772 // time. Thus, we'll probe the system before opening a stream and
\r
2773 // save the results for use by getDeviceInfo().
\r
2774 this->saveDeviceInfo();
\r
2776 if ( !drivers.loadDriver( driverName ) ) {
\r
2777 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2782 result = ASIOInit( &driverInfo );
\r
2783 if ( result != ASE_OK ) {
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2790 // Check the device channel count.
\r
2791 long inputChannels, outputChannels;
\r
2792 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2793 if ( result != ASE_OK ) {
\r
2794 drivers.removeCurrentDriver();
\r
2795 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2800 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2801 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2802 drivers.removeCurrentDriver();
\r
2803 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2804 errorText_ = errorStream_.str();
\r
2807 stream_.nDeviceChannels[mode] = channels;
\r
2808 stream_.nUserChannels[mode] = channels;
\r
2809 stream_.channelOffset[mode] = firstChannel;
\r
2811 // Verify the sample rate is supported.
\r
2812 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2813 if ( result != ASE_OK ) {
\r
2814 drivers.removeCurrentDriver();
\r
2815 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2816 errorText_ = errorStream_.str();
\r
2820 // Get the current sample rate
\r
2821 ASIOSampleRate currentRate;
\r
2822 result = ASIOGetSampleRate( ¤tRate );
\r
2823 if ( result != ASE_OK ) {
\r
2824 drivers.removeCurrentDriver();
\r
2825 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2826 errorText_ = errorStream_.str();
\r
2830 // Set the sample rate only if necessary
\r
2831 if ( currentRate != sampleRate ) {
\r
2832 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2841 // Determine the driver data type.
\r
2842 ASIOChannelInfo channelInfo;
\r
2843 channelInfo.channel = 0;
\r
2844 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2845 else channelInfo.isInput = true;
\r
2846 result = ASIOGetChannelInfo( &channelInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 drivers.removeCurrentDriver();
\r
2849 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2850 errorText_ = errorStream_.str();
\r
2854 // Assuming WINDOWS host is always little-endian.
\r
2855 stream_.doByteSwap[mode] = false;
\r
2856 stream_.userFormat = format;
\r
2857 stream_.deviceFormat[mode] = 0;
\r
2858 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2859 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2860 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2862 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2863 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2864 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2866 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2867 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2868 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2870 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2871 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2872 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2875 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2876 drivers.removeCurrentDriver();
\r
2877 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2878 errorText_ = errorStream_.str();
\r
2882 // Set the buffer size. For a duplex stream, this will end up
\r
2883 // setting the buffer size based on the input constraints, which
\r
2885 long minSize, maxSize, preferSize, granularity;
\r
2886 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2890 errorText_ = errorStream_.str();
\r
2894 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2895 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2896 else if ( granularity == -1 ) {
\r
2897 // Make sure bufferSize is a power of two.
\r
2898 int log2_of_min_size = 0;
\r
2899 int log2_of_max_size = 0;
\r
2901 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2902 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2903 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2906 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2907 int min_delta_num = log2_of_min_size;
\r
2909 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2910 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2911 if (current_delta < min_delta) {
\r
2912 min_delta = current_delta;
\r
2913 min_delta_num = i;
\r
2917 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2918 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2919 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2921 else if ( granularity != 0 ) {
\r
2922 // Set to an even multiple of granularity, rounding up.
\r
2923 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2926 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2927 drivers.removeCurrentDriver();
\r
2928 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2932 stream_.bufferSize = *bufferSize;
\r
2933 stream_.nBuffers = 2;
\r
2935 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2936 else stream_.userInterleaved = true;
\r
2938 // ASIO always uses non-interleaved buffers.
\r
2939 stream_.deviceInterleaved[mode] = false;
\r
2941 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2942 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2943 if ( handle == 0 ) {
\r
2945 handle = new AsioHandle;
\r
2947 catch ( std::bad_alloc& ) {
\r
2948 //if ( handle == NULL ) {
\r
2949 drivers.removeCurrentDriver();
\r
2950 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2953 handle->bufferInfos = 0;
\r
2955 // Create a manual-reset event.
\r
2956 handle->condition = CreateEvent( NULL, // no security
\r
2957 TRUE, // manual-reset
\r
2958 FALSE, // non-signaled initially
\r
2959 NULL ); // unnamed
\r
2960 stream_.apiHandle = (void *) handle;
\r
2963 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2964 // and output separately, we'll have to dispose of previously
\r
2965 // created output buffers for a duplex stream.
\r
2966 long inputLatency, outputLatency;
\r
2967 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2968 ASIODisposeBuffers();
\r
2969 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2972 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2973 bool buffersAllocated = false;
\r
2974 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2975 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2976 if ( handle->bufferInfos == NULL ) {
\r
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2978 errorText_ = errorStream_.str();
\r
2982 ASIOBufferInfo *infos;
\r
2983 infos = handle->bufferInfos;
\r
2984 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2985 infos->isInput = ASIOFalse;
\r
2986 infos->channelNum = i + stream_.channelOffset[0];
\r
2987 infos->buffers[0] = infos->buffers[1] = 0;
\r
2989 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2990 infos->isInput = ASIOTrue;
\r
2991 infos->channelNum = i + stream_.channelOffset[1];
\r
2992 infos->buffers[0] = infos->buffers[1] = 0;
\r
2995 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
2996 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
2997 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
2998 asioCallbacks.asioMessage = &asioMessages;
\r
2999 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3000 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3001 if ( result != ASE_OK ) {
\r
3002 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3003 errorText_ = errorStream_.str();
\r
3006 buffersAllocated = true;
\r
3008 // Set flags for buffer conversion.
\r
3009 stream_.doConvertBuffer[mode] = false;
\r
3010 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3011 stream_.doConvertBuffer[mode] = true;
\r
3012 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3013 stream_.nUserChannels[mode] > 1 )
\r
3014 stream_.doConvertBuffer[mode] = true;
\r
3016 // Allocate necessary internal buffers
\r
3017 unsigned long bufferBytes;
\r
3018 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3019 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3020 if ( stream_.userBuffer[mode] == NULL ) {
\r
3021 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3025 if ( stream_.doConvertBuffer[mode] ) {
\r
3027 bool makeBuffer = true;
\r
3028 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3029 if ( mode == INPUT ) {
\r
3030 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3031 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3032 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3036 if ( makeBuffer ) {
\r
3037 bufferBytes *= *bufferSize;
\r
3038 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3039 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3040 if ( stream_.deviceBuffer == NULL ) {
\r
3041 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3047 stream_.sampleRate = sampleRate;
\r
3048 stream_.device[mode] = device;
\r
3049 stream_.state = STREAM_STOPPED;
\r
3050 asioCallbackInfo = &stream_.callbackInfo;
\r
3051 stream_.callbackInfo.object = (void *) this;
\r
3052 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3053 // We had already set up an output stream.
\r
3054 stream_.mode = DUPLEX;
\r
3056 stream_.mode = mode;
\r
3058 // Determine device latencies
\r
3059 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3060 if ( result != ASE_OK ) {
\r
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3062 errorText_ = errorStream_.str();
\r
3063 error( RtError::WARNING); // warn but don't fail
\r
3066 stream_.latency[0] = outputLatency;
\r
3067 stream_.latency[1] = inputLatency;
\r
3070 // Setup the buffer conversion information structure. We don't use
\r
3071 // buffers to do channel offsets, so we override that parameter
\r
3073 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3078 if ( buffersAllocated )
\r
3079 ASIODisposeBuffers();
\r
3080 drivers.removeCurrentDriver();
\r
3083 CloseHandle( handle->condition );
\r
3084 if ( handle->bufferInfos )
\r
3085 free( handle->bufferInfos );
\r
3087 stream_.apiHandle = 0;
\r
3090 for ( int i=0; i<2; i++ ) {
\r
3091 if ( stream_.userBuffer[i] ) {
\r
3092 free( stream_.userBuffer[i] );
\r
3093 stream_.userBuffer[i] = 0;
\r
3097 if ( stream_.deviceBuffer ) {
\r
3098 free( stream_.deviceBuffer );
\r
3099 stream_.deviceBuffer = 0;
\r
3105 void RtApiAsio :: closeStream()
\r
3107 if ( stream_.state == STREAM_CLOSED ) {
\r
3108 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3109 error( RtError::WARNING );
\r
3113 if ( stream_.state == STREAM_RUNNING ) {
\r
3114 stream_.state = STREAM_STOPPED;
\r
3117 ASIODisposeBuffers();
\r
3118 drivers.removeCurrentDriver();
\r
3120 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3122 CloseHandle( handle->condition );
\r
3123 if ( handle->bufferInfos )
\r
3124 free( handle->bufferInfos );
\r
3126 stream_.apiHandle = 0;
\r
3129 for ( int i=0; i<2; i++ ) {
\r
3130 if ( stream_.userBuffer[i] ) {
\r
3131 free( stream_.userBuffer[i] );
\r
3132 stream_.userBuffer[i] = 0;
\r
3136 if ( stream_.deviceBuffer ) {
\r
3137 free( stream_.deviceBuffer );
\r
3138 stream_.deviceBuffer = 0;
\r
3141 stream_.mode = UNINITIALIZED;
\r
3142 stream_.state = STREAM_CLOSED;
\r
3145 bool stopThreadCalled = false;
\r
3147 void RtApiAsio :: startStream()
\r
3150 if ( stream_.state == STREAM_RUNNING ) {
\r
3151 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3152 error( RtError::WARNING );
\r
3156 //MUTEX_LOCK( &stream_.mutex );
\r
3158 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3159 ASIOError result = ASIOStart();
\r
3160 if ( result != ASE_OK ) {
\r
3161 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3162 errorText_ = errorStream_.str();
\r
3166 handle->drainCounter = 0;
\r
3167 handle->internalDrain = false;
\r
3168 ResetEvent( handle->condition );
\r
3169 stream_.state = STREAM_RUNNING;
\r
3173 //MUTEX_UNLOCK( &stream_.mutex );
\r
3175 stopThreadCalled = false;
\r
3177 if ( result == ASE_OK ) return;
\r
3178 error( RtError::SYSTEM_ERROR );
\r
3181 void RtApiAsio :: stopStream()
\r
3184 if ( stream_.state == STREAM_STOPPED ) {
\r
3185 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3186 error( RtError::WARNING );
\r
3191 MUTEX_LOCK( &stream_.mutex );
\r
3193 if ( stream_.state == STREAM_STOPPED ) {
\r
3194 MUTEX_UNLOCK( &stream_.mutex );
\r
3199 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3200 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3201 if ( handle->drainCounter == 0 ) {
\r
3202 handle->drainCounter = 2;
\r
3203 // MUTEX_UNLOCK( &stream_.mutex );
\r
3204 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3205 //ResetEvent( handle->condition );
\r
3206 // MUTEX_LOCK( &stream_.mutex );
\r
3210 stream_.state = STREAM_STOPPED;
\r
3212 ASIOError result = ASIOStop();
\r
3213 if ( result != ASE_OK ) {
\r
3214 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3215 errorText_ = errorStream_.str();
\r
3218 // MUTEX_UNLOCK( &stream_.mutex );
\r
3220 if ( result == ASE_OK ) return;
\r
3221 error( RtError::SYSTEM_ERROR );
\r
3224 void RtApiAsio :: abortStream()
\r
3227 if ( stream_.state == STREAM_STOPPED ) {
\r
3228 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3229 error( RtError::WARNING );
\r
3233 // The following lines were commented-out because some behavior was
\r
3234 // noted where the device buffers need to be zeroed to avoid
\r
3235 // continuing sound, even when the device buffers are completely
\r
3236 // disposed. So now, calling abort is the same as calling stop.
\r
3237 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3238 // handle->drainCounter = 2;
\r
3242 // This function will be called by a spawned thread when the user
\r
3243 // callback function signals that the stream should be stopped or
\r
3244 // aborted. It is necessary to handle it this way because the
\r
3245 // callbackEvent() function must return before the ASIOStop()
\r
3246 // function will return.
\r
3247 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3249 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3250 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3252 object->stopStream();
\r
3254 _endthreadex( 0 );
\r
3258 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3260 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
3261 if ( stopThreadCalled ) return SUCCESS;
\r
3262 if ( stream_.state == STREAM_CLOSED ) {
\r
3263 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3264 error( RtError::WARNING );
\r
3268 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3269 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3271 // Check if we were draining the stream and signal if finished.
\r
3272 if ( handle->drainCounter > 3 ) {
\r
3273 if ( handle->internalDrain == false )
\r
3274 SetEvent( handle->condition );
\r
3275 else { // spawn a thread to stop the stream
\r
3276 unsigned threadId;
\r
3277 stopThreadCalled = true;
\r
3278 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3279 &stream_.callbackInfo, 0, &threadId );
\r
3284 /*MUTEX_LOCK( &stream_.mutex );
\r
3286 // The state might change while waiting on a mutex.
\r
3287 if ( stream_.state == STREAM_STOPPED ) goto unlock; */
\r
3289 // Invoke user callback to get fresh output data UNLESS we are
\r
3290 // draining stream.
\r
3291 if ( handle->drainCounter == 0 ) {
\r
3292 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3293 double streamTime = getStreamTime();
\r
3294 RtAudioStreamStatus status = 0;
\r
3295 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3296 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3299 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3300 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3303 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3304 stream_.bufferSize, streamTime, status, info->userData );
\r
3305 if ( handle->drainCounter == 2 ) {
\r
3306 // MUTEX_UNLOCK( &stream_.mutex );
\r
3308 unsigned threadId;
\r
3309 stopThreadCalled = true;
\r
3310 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3311 &stream_.callbackInfo, 0, &threadId );
\r
3314 else if ( handle->drainCounter == 1 )
\r
3315 handle->internalDrain = true;
\r
3318 unsigned int nChannels, bufferBytes, i, j;
\r
3319 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3320 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3322 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3324 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3326 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3327 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3328 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3332 else if ( stream_.doConvertBuffer[0] ) {
\r
3334 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3335 if ( stream_.doByteSwap[0] )
\r
3336 byteSwapBuffer( stream_.deviceBuffer,
\r
3337 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3338 stream_.deviceFormat[0] );
\r
3340 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3341 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3342 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3343 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3349 if ( stream_.doByteSwap[0] )
\r
3350 byteSwapBuffer( stream_.userBuffer[0],
\r
3351 stream_.bufferSize * stream_.nUserChannels[0],
\r
3352 stream_.userFormat );
\r
3354 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3355 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3356 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3357 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3362 if ( handle->drainCounter ) {
\r
3363 handle->drainCounter++;
\r
3368 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3370 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3372 if (stream_.doConvertBuffer[1]) {
\r
3374 // Always interleave ASIO input data.
\r
3375 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3376 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3377 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3378 handle->bufferInfos[i].buffers[bufferIndex],
\r
3382 if ( stream_.doByteSwap[1] )
\r
3383 byteSwapBuffer( stream_.deviceBuffer,
\r
3384 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3385 stream_.deviceFormat[1] );
\r
3386 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3390 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3391 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3392 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3393 handle->bufferInfos[i].buffers[bufferIndex],
\r
3398 if ( stream_.doByteSwap[1] )
\r
3399 byteSwapBuffer( stream_.userBuffer[1],
\r
3400 stream_.bufferSize * stream_.nUserChannels[1],
\r
3401 stream_.userFormat );
\r
3406 // The following call was suggested by Malte Clasen. While the API
\r
3407 // documentation indicates it should not be required, some device
\r
3408 // drivers apparently do not function correctly without it.
\r
3409 ASIOOutputReady();
\r
3411 // MUTEX_UNLOCK( &stream_.mutex );
\r
3413 RtApi::tickStreamTime();
\r
3417 void sampleRateChanged( ASIOSampleRate sRate )
\r
3419 // The ASIO documentation says that this usually only happens during
\r
3420 // external sync. Audio processing is not stopped by the driver,
\r
3421 // actual sample rate might not have even changed, maybe only the
\r
3422 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3425 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3427 object->stopStream();
\r
3429 catch ( RtError &exception ) {
\r
3430 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3434 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3437 long asioMessages( long selector, long value, void* message, double* opt )
\r
3441 switch( selector ) {
\r
3442 case kAsioSelectorSupported:
\r
3443 if ( value == kAsioResetRequest
\r
3444 || value == kAsioEngineVersion
\r
3445 || value == kAsioResyncRequest
\r
3446 || value == kAsioLatenciesChanged
\r
3447 // The following three were added for ASIO 2.0, you don't
\r
3448 // necessarily have to support them.
\r
3449 || value == kAsioSupportsTimeInfo
\r
3450 || value == kAsioSupportsTimeCode
\r
3451 || value == kAsioSupportsInputMonitor)
\r
3454 case kAsioResetRequest:
\r
3455 // Defer the task and perform the reset of the driver during the
\r
3456 // next "safe" situation. You cannot reset the driver right now,
\r
3457 // as this code is called from the driver. Reset the driver is
\r
3458 // done by completely destruct is. I.e. ASIOStop(),
\r
3459 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3461 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3464 case kAsioResyncRequest:
\r
3465 // This informs the application that the driver encountered some
\r
3466 // non-fatal data loss. It is used for synchronization purposes
\r
3467 // of different media. Added mainly to work around the Win16Mutex
\r
3468 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3469 // which could lose data because the Mutex was held too long by
\r
3470 // another thread. However a driver can issue it in other
\r
3471 // situations, too.
\r
3472 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3476 case kAsioLatenciesChanged:
\r
3477 // This will inform the host application that the drivers were
\r
3478 // latencies changed. Beware, it this does not mean that the
\r
3479 // buffer sizes have changed! You might need to update internal
\r
3481 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3484 case kAsioEngineVersion:
\r
3485 // Return the supported ASIO version of the host application. If
\r
3486 // a host application does not implement this selector, ASIO 1.0
\r
3487 // is assumed by the driver.
\r
3490 case kAsioSupportsTimeInfo:
\r
3491 // Informs the driver whether the
\r
3492 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3493 // For compatibility with ASIO 1.0 drivers the host application
\r
3494 // should always support the "old" bufferSwitch method, too.
\r
3497 case kAsioSupportsTimeCode:
\r
3498 // Informs the driver whether application is interested in time
\r
3499 // code info. If an application does not need to know about time
\r
3500 // code, the driver has less work to do.
\r
3507 static const char* getAsioErrorString( ASIOError result )
\r
3512 const char*message;
\r
3515 static Messages m[] =
\r
3517 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3518 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3519 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3520 { ASE_InvalidMode, "Invalid mode." },
\r
3521 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3522 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3523 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3526 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3527 if ( m[i].value == result ) return m[i].message;
\r
3529 return "Unknown error.";
\r
3531 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3535 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3537 // Modified by Robin Davies, October 2005
\r
3538 // - Improvements to DirectX pointer chasing.
\r
3539 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3540 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3541 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3542 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3544 #include <dsound.h>
\r
3545 #include <assert.h>
\r
3546 #include <algorithm>
\r
3548 #if defined(__MINGW32__)
\r
3549 // missing from latest mingw winapi
\r
3550 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3551 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3552 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3553 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3556 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3558 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3559 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3562 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3564 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3565 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3566 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3567 return pointer >= earlierPointer && pointer < laterPointer;
\r
3570 // A structure to hold various information related to the DirectSound
\r
3571 // API implementation.
\r
3573 unsigned int drainCounter; // Tracks callback counts when draining
\r
3574 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3578 UINT bufferPointer[2];
\r
3579 DWORD dsBufferSize[2];
\r
3580 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3584 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3587 // Declarations for utility functions, callbacks, and structures
\r
3588 // specific to the DirectSound implementation.
\r
3589 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3590 LPCTSTR description,
\r
3592 LPVOID lpContext );
\r
3594 static const char* getErrorString( int code );
\r
3596 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3605 : found(false) { validId[0] = false; validId[1] = false; }
\r
3608 std::vector< DsDevice > dsDevices;
\r
3610 RtApiDs :: RtApiDs()
\r
3612 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3613 // accept whatever the mainline chose for a threading model.
\r
3614 coInitialized_ = false;
\r
3615 HRESULT hr = CoInitialize( NULL );
\r
3616 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3619 RtApiDs :: ~RtApiDs()
\r
3621 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3622 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3625 // The DirectSound default output is always the first device.
\r
3626 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3631 // The DirectSound default input is always the first input device,
\r
3632 // which is the first capture device enumerated.
\r
3633 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3638 unsigned int RtApiDs :: getDeviceCount( void )
\r
3640 // Set query flag for previously found devices to false, so that we
\r
3641 // can check for any devices that have disappeared.
\r
3642 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3643 dsDevices[i].found = false;
\r
3645 // Query DirectSound devices.
\r
3646 bool isInput = false;
\r
3647 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3648 if ( FAILED( result ) ) {
\r
3649 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3650 errorText_ = errorStream_.str();
\r
3651 error( RtError::WARNING );
\r
3654 // Query DirectSoundCapture devices.
\r
3656 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3657 if ( FAILED( result ) ) {
\r
3658 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3659 errorText_ = errorStream_.str();
\r
3660 error( RtError::WARNING );
\r
3663 // Clean out any devices that may have disappeared.
\r
3664 std::vector< int > indices;
\r
3665 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3666 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3667 unsigned int nErased = 0;
\r
3668 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3669 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3671 return dsDevices.size();
\r
3674 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3676 RtAudio::DeviceInfo info;
\r
3677 info.probed = false;
\r
3679 if ( dsDevices.size() == 0 ) {
\r
3680 // Force a query of all devices
\r
3682 if ( dsDevices.size() == 0 ) {
\r
3683 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3684 error( RtError::INVALID_USE );
\r
3688 if ( device >= dsDevices.size() ) {
\r
3689 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3690 error( RtError::INVALID_USE );
\r
3694 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3696 LPDIRECTSOUND output;
\r
3698 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3699 if ( FAILED( result ) ) {
\r
3700 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3701 errorText_ = errorStream_.str();
\r
3702 error( RtError::WARNING );
\r
3706 outCaps.dwSize = sizeof( outCaps );
\r
3707 result = output->GetCaps( &outCaps );
\r
3708 if ( FAILED( result ) ) {
\r
3709 output->Release();
\r
3710 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3711 errorText_ = errorStream_.str();
\r
3712 error( RtError::WARNING );
\r
3716 // Get output channel information.
\r
3717 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3719 // Get sample rate information.
\r
3720 info.sampleRates.clear();
\r
3721 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3722 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3723 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3724 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3727 // Get format information.
\r
3728 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3729 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3731 output->Release();
\r
3733 if ( getDefaultOutputDevice() == device )
\r
3734 info.isDefaultOutput = true;
\r
3736 if ( dsDevices[ device ].validId[1] == false ) {
\r
3737 info.name = dsDevices[ device ].name;
\r
3738 info.probed = true;
\r
3744 LPDIRECTSOUNDCAPTURE input;
\r
3745 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3746 if ( FAILED( result ) ) {
\r
3747 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3748 errorText_ = errorStream_.str();
\r
3749 error( RtError::WARNING );
\r
3754 inCaps.dwSize = sizeof( inCaps );
\r
3755 result = input->GetCaps( &inCaps );
\r
3756 if ( FAILED( result ) ) {
\r
3758 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3759 errorText_ = errorStream_.str();
\r
3760 error( RtError::WARNING );
\r
3764 // Get input channel information.
\r
3765 info.inputChannels = inCaps.dwChannels;
\r
3767 // Get sample rate and format information.
\r
3768 std::vector<unsigned int> rates;
\r
3769 if ( inCaps.dwChannels >= 2 ) {
\r
3770 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3771 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3772 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3775 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3776 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3779 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3781 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3785 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3786 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3787 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3792 else if ( inCaps.dwChannels == 1 ) {
\r
3793 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3794 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3795 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3796 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3797 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3798 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3799 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3800 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3802 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3803 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3804 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3805 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3806 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3808 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3809 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3815 else info.inputChannels = 0; // technically, this would be an error
\r
3819 if ( info.inputChannels == 0 ) return info;
\r
3821 // Copy the supported rates to the info structure but avoid duplication.
\r
3823 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3825 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3826 if ( rates[i] == info.sampleRates[j] ) {
\r
3831 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3833 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3835 // If device opens for both playback and capture, we determine the channels.
\r
3836 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3837 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3839 if ( device == 0 ) info.isDefaultInput = true;
\r
3841 // Copy name and return.
\r
3842 info.name = dsDevices[ device ].name;
\r
3843 info.probed = true;
\r
3847 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3848 unsigned int firstChannel, unsigned int sampleRate,
\r
3849 RtAudioFormat format, unsigned int *bufferSize,
\r
3850 RtAudio::StreamOptions *options )
\r
3852 if ( channels + firstChannel > 2 ) {
\r
3853 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3857 unsigned int nDevices = dsDevices.size();
\r
3858 if ( nDevices == 0 ) {
\r
3859 // This should not happen because a check is made before this function is called.
\r
3860 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3864 if ( device >= nDevices ) {
\r
3865 // This should not happen because a check is made before this function is called.
\r
3866 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3870 if ( mode == OUTPUT ) {
\r
3871 if ( dsDevices[ device ].validId[0] == false ) {
\r
3872 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3873 errorText_ = errorStream_.str();
\r
3877 else { // mode == INPUT
\r
3878 if ( dsDevices[ device ].validId[1] == false ) {
\r
3879 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3880 errorText_ = errorStream_.str();
\r
3885 // According to a note in PortAudio, using GetDesktopWindow()
\r
3886 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3887 // that occur when the application's window is not the foreground
\r
3888 // window. Also, if the application window closes before the
\r
3889 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3890 // problems when using GetDesktopWindow() but it seems fine now
\r
3891 // (January 2010). I'll leave it commented here.
\r
3892 // HWND hWnd = GetForegroundWindow();
\r
3893 HWND hWnd = GetDesktopWindow();
\r
3895 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3896 // two. This is a judgement call and a value of two is probably too
\r
3897 // low for capture, but it should work for playback.
\r
3899 if ( options ) nBuffers = options->numberOfBuffers;
\r
3900 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3901 if ( nBuffers < 2 ) nBuffers = 3;
\r
3903 // Check the lower range of the user-specified buffer size and set
\r
3904 // (arbitrarily) to a lower bound of 32.
\r
3905 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3907 // Create the wave format structure. The data format setting will
\r
3908 // be determined later.
\r
3909 WAVEFORMATEX waveFormat;
\r
3910 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3911 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3912 waveFormat.nChannels = channels + firstChannel;
\r
3913 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3915 // Determine the device buffer size. By default, we'll use the value
\r
3916 // defined above (32K), but we will grow it to make allowances for
\r
3917 // very large software buffer sizes.
\r
3918 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;
\r
3919 DWORD dsPointerLeadTime = 0;
\r
3921 void *ohandle = 0, *bhandle = 0;
\r
3923 if ( mode == OUTPUT ) {
\r
3925 LPDIRECTSOUND output;
\r
3926 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3927 if ( FAILED( result ) ) {
\r
3928 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3929 errorText_ = errorStream_.str();
\r
3934 outCaps.dwSize = sizeof( outCaps );
\r
3935 result = output->GetCaps( &outCaps );
\r
3936 if ( FAILED( result ) ) {
\r
3937 output->Release();
\r
3938 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3939 errorText_ = errorStream_.str();
\r
3943 // Check channel information.
\r
3944 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3945 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3946 errorText_ = errorStream_.str();
\r
3950 // Check format information. Use 16-bit format unless not
\r
3951 // supported or user requests 8-bit.
\r
3952 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3953 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3954 waveFormat.wBitsPerSample = 16;
\r
3955 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3958 waveFormat.wBitsPerSample = 8;
\r
3959 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3961 stream_.userFormat = format;
\r
3963 // Update wave format structure and buffer information.
\r
3964 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3965 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3966 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3968 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3969 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3970 dsBufferSize *= 2;
\r
3972 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3973 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3974 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3975 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3976 if ( FAILED( result ) ) {
\r
3977 output->Release();
\r
3978 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3979 errorText_ = errorStream_.str();
\r
3983 // Even though we will write to the secondary buffer, we need to
\r
3984 // access the primary buffer to set the correct output format
\r
3985 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3986 // buffer description.
\r
3987 DSBUFFERDESC bufferDescription;
\r
3988 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3989 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3990 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3992 // Obtain the primary buffer
\r
3993 LPDIRECTSOUNDBUFFER buffer;
\r
3994 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3995 if ( FAILED( result ) ) {
\r
3996 output->Release();
\r
3997 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3998 errorText_ = errorStream_.str();
\r
4002 // Set the primary DS buffer sound format.
\r
4003 result = buffer->SetFormat( &waveFormat );
\r
4004 if ( FAILED( result ) ) {
\r
4005 output->Release();
\r
4006 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4007 errorText_ = errorStream_.str();
\r
4011 // Setup the secondary DS buffer description.
\r
4012 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4013 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4014 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4015 DSBCAPS_GLOBALFOCUS |
\r
4016 DSBCAPS_GETCURRENTPOSITION2 |
\r
4017 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4018 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4019 bufferDescription.lpwfxFormat = &waveFormat;
\r
4021 // Try to create the secondary DS buffer. If that doesn't work,
\r
4022 // try to use software mixing. Otherwise, there's a problem.
\r
4023 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4024 if ( FAILED( result ) ) {
\r
4025 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4026 DSBCAPS_GLOBALFOCUS |
\r
4027 DSBCAPS_GETCURRENTPOSITION2 |
\r
4028 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4029 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4030 if ( FAILED( result ) ) {
\r
4031 output->Release();
\r
4032 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4033 errorText_ = errorStream_.str();
\r
4038 // Get the buffer size ... might be different from what we specified.
\r
4040 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4041 result = buffer->GetCaps( &dsbcaps );
\r
4042 if ( FAILED( result ) ) {
\r
4043 output->Release();
\r
4044 buffer->Release();
\r
4045 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4046 errorText_ = errorStream_.str();
\r
4050 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4052 // Lock the DS buffer
\r
4055 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4056 if ( FAILED( result ) ) {
\r
4057 output->Release();
\r
4058 buffer->Release();
\r
4059 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4060 errorText_ = errorStream_.str();
\r
4064 // Zero the DS buffer
\r
4065 ZeroMemory( audioPtr, dataLen );
\r
4067 // Unlock the DS buffer
\r
4068 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4069 if ( FAILED( result ) ) {
\r
4070 output->Release();
\r
4071 buffer->Release();
\r
4072 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4073 errorText_ = errorStream_.str();
\r
4077 ohandle = (void *) output;
\r
4078 bhandle = (void *) buffer;
\r
4081 if ( mode == INPUT ) {
\r
4083 LPDIRECTSOUNDCAPTURE input;
\r
4084 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4085 if ( FAILED( result ) ) {
\r
4086 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4087 errorText_ = errorStream_.str();
\r
4092 inCaps.dwSize = sizeof( inCaps );
\r
4093 result = input->GetCaps( &inCaps );
\r
4094 if ( FAILED( result ) ) {
\r
4096 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4097 errorText_ = errorStream_.str();
\r
4101 // Check channel information.
\r
4102 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4103 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4107 // Check format information. Use 16-bit format unless user
\r
4108 // requests 8-bit.
\r
4109 DWORD deviceFormats;
\r
4110 if ( channels + firstChannel == 2 ) {
\r
4111 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4112 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4113 waveFormat.wBitsPerSample = 8;
\r
4114 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4116 else { // assume 16-bit is supported
\r
4117 waveFormat.wBitsPerSample = 16;
\r
4118 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4121 else { // channel == 1
\r
4122 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4123 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4124 waveFormat.wBitsPerSample = 8;
\r
4125 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4127 else { // assume 16-bit is supported
\r
4128 waveFormat.wBitsPerSample = 16;
\r
4129 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4132 stream_.userFormat = format;
\r
4134 // Update wave format structure and buffer information.
\r
4135 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4136 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4137 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4139 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4140 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4141 dsBufferSize *= 2;
\r
4143 // Setup the secondary DS buffer description.
\r
4144 DSCBUFFERDESC bufferDescription;
\r
4145 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4146 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4147 bufferDescription.dwFlags = 0;
\r
4148 bufferDescription.dwReserved = 0;
\r
4149 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4150 bufferDescription.lpwfxFormat = &waveFormat;
\r
4152 // Create the capture buffer.
\r
4153 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4154 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4155 if ( FAILED( result ) ) {
\r
4157 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4158 errorText_ = errorStream_.str();
\r
4162 // Get the buffer size ... might be different from what we specified.
\r
4163 DSCBCAPS dscbcaps;
\r
4164 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4165 result = buffer->GetCaps( &dscbcaps );
\r
4166 if ( FAILED( result ) ) {
\r
4168 buffer->Release();
\r
4169 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4170 errorText_ = errorStream_.str();
\r
4174 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4176 // NOTE: We could have a problem here if this is a duplex stream
\r
4177 // and the play and capture hardware buffer sizes are different
\r
4178 // (I'm actually not sure if that is a problem or not).
\r
4179 // Currently, we are not verifying that.
\r
4181 // Lock the capture buffer
\r
4184 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4185 if ( FAILED( result ) ) {
\r
4187 buffer->Release();
\r
4188 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4189 errorText_ = errorStream_.str();
\r
4193 // Zero the buffer
\r
4194 ZeroMemory( audioPtr, dataLen );
\r
4196 // Unlock the buffer
\r
4197 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4198 if ( FAILED( result ) ) {
\r
4200 buffer->Release();
\r
4201 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4202 errorText_ = errorStream_.str();
\r
4206 ohandle = (void *) input;
\r
4207 bhandle = (void *) buffer;
\r
4210 // Set various stream parameters
\r
4211 DsHandle *handle = 0;
\r
4212 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4213 stream_.nUserChannels[mode] = channels;
\r
4214 stream_.bufferSize = *bufferSize;
\r
4215 stream_.channelOffset[mode] = firstChannel;
\r
4216 stream_.deviceInterleaved[mode] = true;
\r
4217 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4218 else stream_.userInterleaved = true;
\r
4220 // Set flag for buffer conversion
\r
4221 stream_.doConvertBuffer[mode] = false;
\r
4222 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4223 stream_.doConvertBuffer[mode] = true;
\r
4224 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4225 stream_.doConvertBuffer[mode] = true;
\r
4226 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4227 stream_.nUserChannels[mode] > 1 )
\r
4228 stream_.doConvertBuffer[mode] = true;
\r
4230 // Allocate necessary internal buffers
\r
4231 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4232 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4233 if ( stream_.userBuffer[mode] == NULL ) {
\r
4234 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4238 if ( stream_.doConvertBuffer[mode] ) {
\r
4240 bool makeBuffer = true;
\r
4241 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4242 if ( mode == INPUT ) {
\r
4243 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4244 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4245 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4249 if ( makeBuffer ) {
\r
4250 bufferBytes *= *bufferSize;
\r
4251 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4252 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4253 if ( stream_.deviceBuffer == NULL ) {
\r
4254 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4260 // Allocate our DsHandle structures for the stream.
\r
4261 if ( stream_.apiHandle == 0 ) {
\r
4263 handle = new DsHandle;
\r
4265 catch ( std::bad_alloc& ) {
\r
4266 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4270 // Create a manual-reset event.
\r
4271 handle->condition = CreateEvent( NULL, // no security
\r
4272 TRUE, // manual-reset
\r
4273 FALSE, // non-signaled initially
\r
4274 NULL ); // unnamed
\r
4275 stream_.apiHandle = (void *) handle;
\r
4278 handle = (DsHandle *) stream_.apiHandle;
\r
4279 handle->id[mode] = ohandle;
\r
4280 handle->buffer[mode] = bhandle;
\r
4281 handle->dsBufferSize[mode] = dsBufferSize;
\r
4282 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4284 stream_.device[mode] = device;
\r
4285 stream_.state = STREAM_STOPPED;
\r
4286 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4287 // We had already set up an output stream.
\r
4288 stream_.mode = DUPLEX;
\r
4290 stream_.mode = mode;
\r
4291 stream_.nBuffers = nBuffers;
\r
4292 stream_.sampleRate = sampleRate;
\r
4294 // Setup the buffer conversion information structure.
\r
4295 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4297 // Setup the callback thread.
\r
4298 if ( stream_.callbackInfo.isRunning == false ) {
\r
4299 unsigned threadId;
\r
4300 stream_.callbackInfo.isRunning = true;
\r
4301 stream_.callbackInfo.object = (void *) this;
\r
4302 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4303 &stream_.callbackInfo, 0, &threadId );
\r
4304 if ( stream_.callbackInfo.thread == 0 ) {
\r
4305 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4309 // Boost DS thread priority
\r
4310 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4316 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4317 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4318 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4319 if ( buffer ) buffer->Release();
\r
4320 object->Release();
\r
4322 if ( handle->buffer[1] ) {
\r
4323 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4324 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4325 if ( buffer ) buffer->Release();
\r
4326 object->Release();
\r
4328 CloseHandle( handle->condition );
\r
4330 stream_.apiHandle = 0;
\r
4333 for ( int i=0; i<2; i++ ) {
\r
4334 if ( stream_.userBuffer[i] ) {
\r
4335 free( stream_.userBuffer[i] );
\r
4336 stream_.userBuffer[i] = 0;
\r
4340 if ( stream_.deviceBuffer ) {
\r
4341 free( stream_.deviceBuffer );
\r
4342 stream_.deviceBuffer = 0;
\r
4348 void RtApiDs :: closeStream()
\r
4350 if ( stream_.state == STREAM_CLOSED ) {
\r
4351 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4352 error( RtError::WARNING );
\r
4356 // Stop the callback thread.
\r
4357 stream_.callbackInfo.isRunning = false;
\r
4358 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4359 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4361 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4363 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4364 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4365 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4368 buffer->Release();
\r
4370 object->Release();
\r
4372 if ( handle->buffer[1] ) {
\r
4373 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4374 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4377 buffer->Release();
\r
4379 object->Release();
\r
4381 CloseHandle( handle->condition );
\r
4383 stream_.apiHandle = 0;
\r
4386 for ( int i=0; i<2; i++ ) {
\r
4387 if ( stream_.userBuffer[i] ) {
\r
4388 free( stream_.userBuffer[i] );
\r
4389 stream_.userBuffer[i] = 0;
\r
4393 if ( stream_.deviceBuffer ) {
\r
4394 free( stream_.deviceBuffer );
\r
4395 stream_.deviceBuffer = 0;
\r
4398 stream_.mode = UNINITIALIZED;
\r
4399 stream_.state = STREAM_CLOSED;
\r
4402 void RtApiDs :: startStream()
\r
4405 if ( stream_.state == STREAM_RUNNING ) {
\r
4406 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4407 error( RtError::WARNING );
\r
4411 //MUTEX_LOCK( &stream_.mutex );
\r
4413 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4415 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4416 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4417 // this is already in effect.
\r
4418 timeBeginPeriod( 1 );
\r
4420 buffersRolling = false;
\r
4421 duplexPrerollBytes = 0;
\r
4423 if ( stream_.mode == DUPLEX ) {
\r
4424 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4425 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4428 HRESULT result = 0;
\r
4429 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4431 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4432 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4433 if ( FAILED( result ) ) {
\r
4434 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4435 errorText_ = errorStream_.str();
\r
4440 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4442 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4443 result = buffer->Start( DSCBSTART_LOOPING );
\r
4444 if ( FAILED( result ) ) {
\r
4445 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4446 errorText_ = errorStream_.str();
\r
4451 handle->drainCounter = 0;
\r
4452 handle->internalDrain = false;
\r
4453 ResetEvent( handle->condition );
\r
4454 stream_.state = STREAM_RUNNING;
\r
4457 // MUTEX_UNLOCK( &stream_.mutex );
\r
4459 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4462 void RtApiDs :: stopStream()
\r
4465 if ( stream_.state == STREAM_STOPPED ) {
\r
4466 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4467 error( RtError::WARNING );
\r
4472 MUTEX_LOCK( &stream_.mutex );
\r
4474 if ( stream_.state == STREAM_STOPPED ) {
\r
4475 MUTEX_UNLOCK( &stream_.mutex );
\r
4480 HRESULT result = 0;
\r
4483 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4484 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4485 if ( handle->drainCounter == 0 ) {
\r
4486 handle->drainCounter = 2;
\r
4487 // MUTEX_UNLOCK( &stream_.mutex );
\r
4488 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4489 //ResetEvent( handle->condition );
\r
4490 // MUTEX_LOCK( &stream_.mutex );
\r
4493 stream_.state = STREAM_STOPPED;
\r
4495 // Stop the buffer and clear memory
\r
4496 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4497 result = buffer->Stop();
\r
4498 if ( FAILED( result ) ) {
\r
4499 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4500 errorText_ = errorStream_.str();
\r
4504 // Lock the buffer and clear it so that if we start to play again,
\r
4505 // we won't have old data playing.
\r
4506 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4507 if ( FAILED( result ) ) {
\r
4508 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4509 errorText_ = errorStream_.str();
\r
4513 // Zero the DS buffer
\r
4514 ZeroMemory( audioPtr, dataLen );
\r
4516 // Unlock the DS buffer
\r
4517 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4518 if ( FAILED( result ) ) {
\r
4519 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4520 errorText_ = errorStream_.str();
\r
4524 // If we start playing again, we must begin at beginning of buffer.
\r
4525 handle->bufferPointer[0] = 0;
\r
4528 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4529 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4533 stream_.state = STREAM_STOPPED;
\r
4535 result = buffer->Stop();
\r
4536 if ( FAILED( result ) ) {
\r
4537 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4538 errorText_ = errorStream_.str();
\r
4542 // Lock the buffer and clear it so that if we start to play again,
\r
4543 // we won't have old data playing.
\r
4544 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4545 if ( FAILED( result ) ) {
\r
4546 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4547 errorText_ = errorStream_.str();
\r
4551 // Zero the DS buffer
\r
4552 ZeroMemory( audioPtr, dataLen );
\r
4554 // Unlock the DS buffer
\r
4555 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4556 if ( FAILED( result ) ) {
\r
4557 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4558 errorText_ = errorStream_.str();
\r
4562 // If we start recording again, we must begin at beginning of buffer.
\r
4563 handle->bufferPointer[1] = 0;
\r
4567 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4568 // MUTEX_UNLOCK( &stream_.mutex );
\r
4570 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4573 void RtApiDs :: abortStream()
\r
4576 if ( stream_.state == STREAM_STOPPED ) {
\r
4577 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4578 error( RtError::WARNING );
\r
4582 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4583 handle->drainCounter = 2;
\r
4588 void RtApiDs :: callbackEvent()
\r
4590 if ( stream_.state == STREAM_STOPPED ) {
\r
4591 Sleep( 50 ); // sleep 50 milliseconds
\r
4595 if ( stream_.state == STREAM_CLOSED ) {
\r
4596 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4597 error( RtError::WARNING );
\r
4601 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4602 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4604 // Check if we were draining the stream and signal is finished.
\r
4605 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4606 if ( handle->internalDrain == false )
\r
4607 SetEvent( handle->condition );
\r
4614 MUTEX_LOCK( &stream_.mutex );
\r
4616 // The state might change while waiting on a mutex.
\r
4617 if ( stream_.state == STREAM_STOPPED ) {
\r
4618 MUTEX_UNLOCK( &stream_.mutex );
\r
4623 // Invoke user callback to get fresh output data UNLESS we are
\r
4624 // draining stream.
\r
4625 if ( handle->drainCounter == 0 ) {
\r
4626 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4627 double streamTime = getStreamTime();
\r
4628 RtAudioStreamStatus status = 0;
\r
4629 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4630 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4631 handle->xrun[0] = false;
\r
4633 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4634 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4635 handle->xrun[1] = false;
\r
4637 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4638 stream_.bufferSize, streamTime, status, info->userData );
\r
4639 if ( handle->drainCounter == 2 ) {
\r
4640 // MUTEX_UNLOCK( &stream_.mutex );
\r
4644 else if ( handle->drainCounter == 1 )
\r
4645 handle->internalDrain = true;
\r
4649 DWORD currentWritePointer, safeWritePointer;
\r
4650 DWORD currentReadPointer, safeReadPointer;
\r
4651 UINT nextWritePointer;
\r
4653 LPVOID buffer1 = NULL;
\r
4654 LPVOID buffer2 = NULL;
\r
4655 DWORD bufferSize1 = 0;
\r
4656 DWORD bufferSize2 = 0;
\r
4661 if ( buffersRolling == false ) {
\r
4662 if ( stream_.mode == DUPLEX ) {
\r
4663 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4665 // It takes a while for the devices to get rolling. As a result,
\r
4666 // there's no guarantee that the capture and write device pointers
\r
4667 // will move in lockstep. Wait here for both devices to start
\r
4668 // rolling, and then set our buffer pointers accordingly.
\r
4669 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4670 // bytes later than the write buffer.
\r
4672 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4673 // take place between the two GetCurrentPosition calls... but I'm
\r
4674 // really not sure how to solve the problem. Temporarily boost to
\r
4675 // Realtime priority, maybe; but I'm not sure what priority the
\r
4676 // DirectSound service threads run at. We *should* be roughly
\r
4677 // within a ms or so of correct.
\r
4679 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4680 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4682 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4684 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4685 if ( FAILED( result ) ) {
\r
4686 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4687 errorText_ = errorStream_.str();
\r
4688 error( RtError::SYSTEM_ERROR );
\r
4690 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4691 if ( FAILED( result ) ) {
\r
4692 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4693 errorText_ = errorStream_.str();
\r
4694 error( RtError::SYSTEM_ERROR );
\r
4697 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4698 if ( FAILED( result ) ) {
\r
4699 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4700 errorText_ = errorStream_.str();
\r
4701 error( RtError::SYSTEM_ERROR );
\r
4703 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4704 if ( FAILED( result ) ) {
\r
4705 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4706 errorText_ = errorStream_.str();
\r
4707 error( RtError::SYSTEM_ERROR );
\r
4709 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4713 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4715 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4716 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4717 handle->bufferPointer[1] = safeReadPointer;
\r
4719 else if ( stream_.mode == OUTPUT ) {
\r
4721 // Set the proper nextWritePosition after initial startup.
\r
4722 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4723 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4724 if ( FAILED( result ) ) {
\r
4725 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4726 errorText_ = errorStream_.str();
\r
4727 error( RtError::SYSTEM_ERROR );
\r
4729 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4730 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4733 buffersRolling = true;
\r
4736 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4738 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4740 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4741 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4742 bufferBytes *= formatBytes( stream_.userFormat );
\r
4743 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4746 // Setup parameters and do buffer conversion if necessary.
\r
4747 if ( stream_.doConvertBuffer[0] ) {
\r
4748 buffer = stream_.deviceBuffer;
\r
4749 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4750 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4751 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4754 buffer = stream_.userBuffer[0];
\r
4755 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4756 bufferBytes *= formatBytes( stream_.userFormat );
\r
4759 // No byte swapping necessary in DirectSound implementation.
\r
4761 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4762 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4764 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4765 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4767 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4768 nextWritePointer = handle->bufferPointer[0];
\r
4770 DWORD endWrite, leadPointer;
\r
4772 // Find out where the read and "safe write" pointers are.
\r
4773 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4774 if ( FAILED( result ) ) {
\r
4775 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4776 errorText_ = errorStream_.str();
\r
4777 error( RtError::SYSTEM_ERROR );
\r
4780 // We will copy our output buffer into the region between
\r
4781 // safeWritePointer and leadPointer. If leadPointer is not
\r
4782 // beyond the next endWrite position, wait until it is.
\r
4783 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4784 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4785 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4786 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4787 endWrite = nextWritePointer + bufferBytes;
\r
4789 // Check whether the entire write region is behind the play pointer.
\r
4790 if ( leadPointer >= endWrite ) break;
\r
4792 // If we are here, then we must wait until the leadPointer advances
\r
4793 // beyond the end of our next write region. We use the
\r
4794 // Sleep() function to suspend operation until that happens.
\r
4795 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4796 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4797 if ( millis < 1.0 ) millis = 1.0;
\r
4798 Sleep( (DWORD) millis );
\r
4801 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4802 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4803 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4804 handle->xrun[0] = true;
\r
4805 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4806 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4807 handle->bufferPointer[0] = nextWritePointer;
\r
4808 endWrite = nextWritePointer + bufferBytes;
\r
4811 // Lock free space in the buffer
\r
4812 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4813 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4814 if ( FAILED( result ) ) {
\r
4815 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4816 errorText_ = errorStream_.str();
\r
4817 error( RtError::SYSTEM_ERROR );
\r
4820 // Copy our buffer into the DS buffer
\r
4821 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4822 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4824 // Update our buffer offset and unlock sound buffer
\r
4825 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4826 if ( FAILED( result ) ) {
\r
4827 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4828 errorText_ = errorStream_.str();
\r
4829 error( RtError::SYSTEM_ERROR );
\r
4831 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4832 handle->bufferPointer[0] = nextWritePointer;
\r
4834 if ( handle->drainCounter ) {
\r
4835 handle->drainCounter++;
\r
4840 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4842 // Setup parameters.
\r
4843 if ( stream_.doConvertBuffer[1] ) {
\r
4844 buffer = stream_.deviceBuffer;
\r
4845 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4846 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4849 buffer = stream_.userBuffer[1];
\r
4850 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4851 bufferBytes *= formatBytes( stream_.userFormat );
\r
4854 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4855 long nextReadPointer = handle->bufferPointer[1];
\r
4856 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4858 // Find out where the write and "safe read" pointers are.
\r
4859 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4860 if ( FAILED( result ) ) {
\r
4861 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4862 errorText_ = errorStream_.str();
\r
4863 error( RtError::SYSTEM_ERROR );
\r
4866 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4867 DWORD endRead = nextReadPointer + bufferBytes;
\r
4869 // Handling depends on whether we are INPUT or DUPLEX.
\r
4870 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4871 // then a wait here will drag the write pointers into the forbidden zone.
\r
4873 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4874 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4875 // practical way to sync up the read and write pointers reliably, given the
\r
4876 // the very complex relationship between phase and increment of the read and write
\r
4879 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4880 // provide a pre-roll period of 0.5 seconds in which we return
\r
4881 // zeros from the read buffer while the pointers sync up.
\r
4883 if ( stream_.mode == DUPLEX ) {
\r
4884 if ( safeReadPointer < endRead ) {
\r
4885 if ( duplexPrerollBytes <= 0 ) {
\r
4886 // Pre-roll time over. Be more agressive.
\r
4887 int adjustment = endRead-safeReadPointer;
\r
4889 handle->xrun[1] = true;
\r
4891 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4892 // and perform fine adjustments later.
\r
4893 // - small adjustments: back off by twice as much.
\r
4894 if ( adjustment >= 2*bufferBytes )
\r
4895 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4897 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4899 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4903 // In pre=roll time. Just do it.
\r
4904 nextReadPointer = safeReadPointer - bufferBytes;
\r
4905 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4907 endRead = nextReadPointer + bufferBytes;
\r
4910 else { // mode == INPUT
\r
4911 while ( safeReadPointer < endRead ) {
\r
4912 // See comments for playback.
\r
4913 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4914 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4915 if ( millis < 1.0 ) millis = 1.0;
\r
4916 Sleep( (DWORD) millis );
\r
4918 // Wake up and find out where we are now.
\r
4919 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4920 if ( FAILED( result ) ) {
\r
4921 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4922 errorText_ = errorStream_.str();
\r
4923 error( RtError::SYSTEM_ERROR );
\r
4926 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4930 // Lock free space in the buffer
\r
4931 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4932 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4933 if ( FAILED( result ) ) {
\r
4934 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4935 errorText_ = errorStream_.str();
\r
4936 error( RtError::SYSTEM_ERROR );
\r
4939 if ( duplexPrerollBytes <= 0 ) {
\r
4940 // Copy our buffer into the DS buffer
\r
4941 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4942 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4945 memset( buffer, 0, bufferSize1 );
\r
4946 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4947 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4950 // Update our buffer offset and unlock sound buffer
\r
4951 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4952 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4953 if ( FAILED( result ) ) {
\r
4954 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4955 errorText_ = errorStream_.str();
\r
4956 error( RtError::SYSTEM_ERROR );
\r
4958 handle->bufferPointer[1] = nextReadPointer;
\r
4960 // No byte swapping necessary in DirectSound implementation.
\r
4962 // If necessary, convert 8-bit data from unsigned to signed.
\r
4963 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4964 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4966 // Do buffer conversion if necessary.
\r
4967 if ( stream_.doConvertBuffer[1] )
\r
4968 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4972 // MUTEX_UNLOCK( &stream_.mutex );
\r
4974 RtApi::tickStreamTime();
\r
4977 // Definitions for utility functions and callbacks
\r
4978 // specific to the DirectSound implementation.
\r
4980 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4982 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4983 RtApiDs *object = (RtApiDs *) info->object;
\r
4984 bool* isRunning = &info->isRunning;
\r
4986 while ( *isRunning == true ) {
\r
4987 object->callbackEvent();
\r
4990 _endthreadex( 0 );
\r
4994 #include "tchar.h"
\r
4996 std::string convertTChar( LPCTSTR name )
\r
4998 #if defined( UNICODE ) || defined( _UNICODE )
\r
4999 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
5000 std::string s( length, 0 );
\r
5001 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
5003 std::string s( name );
\r
5009 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5010 LPCTSTR description,
\r
5012 LPVOID lpContext )
\r
5014 bool *isInput = (bool *) lpContext;
\r
5017 bool validDevice = false;
\r
5018 if ( *isInput == true ) {
\r
5020 LPDIRECTSOUNDCAPTURE object;
\r
5022 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5023 if ( hr != DS_OK ) return TRUE;
\r
5025 caps.dwSize = sizeof(caps);
\r
5026 hr = object->GetCaps( &caps );
\r
5027 if ( hr == DS_OK ) {
\r
5028 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5029 validDevice = true;
\r
5031 object->Release();
\r
5035 LPDIRECTSOUND object;
\r
5036 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5037 if ( hr != DS_OK ) return TRUE;
\r
5039 caps.dwSize = sizeof(caps);
\r
5040 hr = object->GetCaps( &caps );
\r
5041 if ( hr == DS_OK ) {
\r
5042 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5043 validDevice = true;
\r
5045 object->Release();
\r
5048 // If good device, then save its name and guid.
\r
5049 std::string name = convertTChar( description );
\r
5050 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5051 name = "Default Device";
\r
5052 if ( validDevice ) {
\r
5053 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5054 if ( dsDevices[i].name == name ) {
\r
5055 dsDevices[i].found = true;
\r
5057 dsDevices[i].id[1] = lpguid;
\r
5058 dsDevices[i].validId[1] = true;
\r
5061 dsDevices[i].id[0] = lpguid;
\r
5062 dsDevices[i].validId[0] = true;
\r
5069 device.name = name;
\r
5070 device.found = true;
\r
5072 device.id[1] = lpguid;
\r
5073 device.validId[1] = true;
\r
5076 device.id[0] = lpguid;
\r
5077 device.validId[0] = true;
\r
5079 dsDevices.push_back( device );
\r
5085 static const char* getErrorString( int code )
\r
5089 case DSERR_ALLOCATED:
\r
5090 return "Already allocated";
\r
5092 case DSERR_CONTROLUNAVAIL:
\r
5093 return "Control unavailable";
\r
5095 case DSERR_INVALIDPARAM:
\r
5096 return "Invalid parameter";
\r
5098 case DSERR_INVALIDCALL:
\r
5099 return "Invalid call";
\r
5101 case DSERR_GENERIC:
\r
5102 return "Generic error";
\r
5104 case DSERR_PRIOLEVELNEEDED:
\r
5105 return "Priority level needed";
\r
5107 case DSERR_OUTOFMEMORY:
\r
5108 return "Out of memory";
\r
5110 case DSERR_BADFORMAT:
\r
5111 return "The sample rate or the channel format is not supported";
\r
5113 case DSERR_UNSUPPORTED:
\r
5114 return "Not supported";
\r
5116 case DSERR_NODRIVER:
\r
5117 return "No driver";
\r
5119 case DSERR_ALREADYINITIALIZED:
\r
5120 return "Already initialized";
\r
5122 case DSERR_NOAGGREGATION:
\r
5123 return "No aggregation";
\r
5125 case DSERR_BUFFERLOST:
\r
5126 return "Buffer lost";
\r
5128 case DSERR_OTHERAPPHASPRIO:
\r
5129 return "Another application already has priority";
\r
5131 case DSERR_UNINITIALIZED:
\r
5132 return "Uninitialized";
\r
5135 return "DirectSound unknown error";
\r
5138 //******************** End of __WINDOWS_DS__ *********************//
\r
5142 #if defined(__LINUX_ALSA__)
\r
5144 #include <alsa/asoundlib.h>
\r
5145 #include <unistd.h>
\r
5147 // A structure to hold various information related to the ALSA API
\r
5148 // implementation.
\r
5149 struct AlsaHandle {
\r
5150 snd_pcm_t *handles[2];
\r
5151 bool synchronized;
\r
5153 pthread_cond_t runnable_cv;
\r
5157 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5160 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5162 RtApiAlsa :: RtApiAlsa()
\r
5164 // Nothing to do here.
\r
5167 RtApiAlsa :: ~RtApiAlsa()
\r
5169 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5172 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5174 unsigned nDevices = 0;
\r
5175 int result, subdevice, card;
\r
5177 snd_ctl_t *handle;
\r
5179 // Count cards and devices
\r
5181 snd_card_next( &card );
\r
5182 while ( card >= 0 ) {
\r
5183 sprintf( name, "hw:%d", card );
\r
5184 result = snd_ctl_open( &handle, name, 0 );
\r
5185 if ( result < 0 ) {
\r
5186 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5187 errorText_ = errorStream_.str();
\r
5188 error( RtError::WARNING );
\r
5193 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5194 if ( result < 0 ) {
\r
5195 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5196 errorText_ = errorStream_.str();
\r
5197 error( RtError::WARNING );
\r
5200 if ( subdevice < 0 )
\r
5205 snd_ctl_close( handle );
\r
5206 snd_card_next( &card );
\r
5212 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5214 RtAudio::DeviceInfo info;
\r
5215 info.probed = false;
\r
5217 unsigned nDevices = 0;
\r
5218 int result, subdevice, card;
\r
5220 snd_ctl_t *chandle;
\r
5222 // Count cards and devices
\r
5224 snd_card_next( &card );
\r
5225 while ( card >= 0 ) {
\r
5226 sprintf( name, "hw:%d", card );
\r
5227 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5228 if ( result < 0 ) {
\r
5229 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5230 errorText_ = errorStream_.str();
\r
5231 error( RtError::WARNING );
\r
5236 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5237 if ( result < 0 ) {
\r
5238 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5239 errorText_ = errorStream_.str();
\r
5240 error( RtError::WARNING );
\r
5243 if ( subdevice < 0 ) break;
\r
5244 if ( nDevices == device ) {
\r
5245 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5251 snd_ctl_close( chandle );
\r
5252 snd_card_next( &card );
\r
5255 if ( nDevices == 0 ) {
\r
5256 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5257 error( RtError::INVALID_USE );
\r
5260 if ( device >= nDevices ) {
\r
5261 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5262 error( RtError::INVALID_USE );
\r
5267 // If a stream is already open, we cannot probe the stream devices.
\r
5268 // Thus, use the saved results.
\r
5269 if ( stream_.state != STREAM_CLOSED &&
\r
5270 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5271 if ( device >= devices_.size() ) {
\r
5272 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5273 error( RtError::WARNING );
\r
5276 return devices_[ device ];
\r
5279 int openMode = SND_PCM_ASYNC;
\r
5280 snd_pcm_stream_t stream;
\r
5281 snd_pcm_info_t *pcminfo;
\r
5282 snd_pcm_info_alloca( &pcminfo );
\r
5283 snd_pcm_t *phandle;
\r
5284 snd_pcm_hw_params_t *params;
\r
5285 snd_pcm_hw_params_alloca( ¶ms );
\r
5287 // First try for playback
\r
5288 stream = SND_PCM_STREAM_PLAYBACK;
\r
5289 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5290 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5291 snd_pcm_info_set_stream( pcminfo, stream );
\r
5293 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5294 if ( result < 0 ) {
\r
5295 // Device probably doesn't support playback.
\r
5296 goto captureProbe;
\r
5299 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5300 if ( result < 0 ) {
\r
5301 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5302 errorText_ = errorStream_.str();
\r
5303 error( RtError::WARNING );
\r
5304 goto captureProbe;
\r
5307 // The device is open ... fill the parameter structure.
\r
5308 result = snd_pcm_hw_params_any( phandle, params );
\r
5309 if ( result < 0 ) {
\r
5310 snd_pcm_close( phandle );
\r
5311 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5312 errorText_ = errorStream_.str();
\r
5313 error( RtError::WARNING );
\r
5314 goto captureProbe;
\r
5317 // Get output channel information.
\r
5318 unsigned int value;
\r
5319 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5320 if ( result < 0 ) {
\r
5321 snd_pcm_close( phandle );
\r
5322 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5323 errorText_ = errorStream_.str();
\r
5324 error( RtError::WARNING );
\r
5325 goto captureProbe;
\r
5327 info.outputChannels = value;
\r
5328 snd_pcm_close( phandle );
\r
5331 // Now try for capture
\r
5332 stream = SND_PCM_STREAM_CAPTURE;
\r
5333 snd_pcm_info_set_stream( pcminfo, stream );
\r
5335 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5336 snd_ctl_close( chandle );
\r
5337 if ( result < 0 ) {
\r
5338 // Device probably doesn't support capture.
\r
5339 if ( info.outputChannels == 0 ) return info;
\r
5340 goto probeParameters;
\r
5343 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5344 if ( result < 0 ) {
\r
5345 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5346 errorText_ = errorStream_.str();
\r
5347 error( RtError::WARNING );
\r
5348 if ( info.outputChannels == 0 ) return info;
\r
5349 goto probeParameters;
\r
5352 // The device is open ... fill the parameter structure.
\r
5353 result = snd_pcm_hw_params_any( phandle, params );
\r
5354 if ( result < 0 ) {
\r
5355 snd_pcm_close( phandle );
\r
5356 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5357 errorText_ = errorStream_.str();
\r
5358 error( RtError::WARNING );
\r
5359 if ( info.outputChannels == 0 ) return info;
\r
5360 goto probeParameters;
\r
5363 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5364 if ( result < 0 ) {
\r
5365 snd_pcm_close( phandle );
\r
5366 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5367 errorText_ = errorStream_.str();
\r
5368 error( RtError::WARNING );
\r
5369 if ( info.outputChannels == 0 ) return info;
\r
5370 goto probeParameters;
\r
5372 info.inputChannels = value;
\r
5373 snd_pcm_close( phandle );
\r
5375 // If device opens for both playback and capture, we determine the channels.
\r
5376 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5377 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5379 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5380 if ( device == 0 && info.outputChannels > 0 )
\r
5381 info.isDefaultOutput = true;
\r
5382 if ( device == 0 && info.inputChannels > 0 )
\r
5383 info.isDefaultInput = true;
\r
5386 // At this point, we just need to figure out the supported data
\r
5387 // formats and sample rates. We'll proceed by opening the device in
\r
5388 // the direction with the maximum number of channels, or playback if
\r
5389 // they are equal. This might limit our sample rate options, but so
\r
5392 if ( info.outputChannels >= info.inputChannels )
\r
5393 stream = SND_PCM_STREAM_PLAYBACK;
\r
5395 stream = SND_PCM_STREAM_CAPTURE;
\r
5396 snd_pcm_info_set_stream( pcminfo, stream );
\r
5398 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5399 if ( result < 0 ) {
\r
5400 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5401 errorText_ = errorStream_.str();
\r
5402 error( RtError::WARNING );
\r
5406 // The device is open ... fill the parameter structure.
\r
5407 result = snd_pcm_hw_params_any( phandle, params );
\r
5408 if ( result < 0 ) {
\r
5409 snd_pcm_close( phandle );
\r
5410 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5411 errorText_ = errorStream_.str();
\r
5412 error( RtError::WARNING );
\r
5416 // Test our discrete set of sample rate values.
\r
5417 info.sampleRates.clear();
\r
5418 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5419 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5420 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5422 if ( info.sampleRates.size() == 0 ) {
\r
5423 snd_pcm_close( phandle );
\r
5424 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5425 errorText_ = errorStream_.str();
\r
5426 error( RtError::WARNING );
\r
5430 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5431 snd_pcm_format_t format;
\r
5432 info.nativeFormats = 0;
\r
5433 format = SND_PCM_FORMAT_S8;
\r
5434 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5435 info.nativeFormats |= RTAUDIO_SINT8;
\r
5436 format = SND_PCM_FORMAT_S16;
\r
5437 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5438 info.nativeFormats |= RTAUDIO_SINT16;
\r
5439 format = SND_PCM_FORMAT_S24;
\r
5440 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5441 info.nativeFormats |= RTAUDIO_SINT24;
\r
5442 format = SND_PCM_FORMAT_S32;
\r
5443 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5444 info.nativeFormats |= RTAUDIO_SINT32;
\r
5445 format = SND_PCM_FORMAT_FLOAT;
\r
5446 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5447 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5448 format = SND_PCM_FORMAT_FLOAT64;
\r
5449 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5450 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5452 // Check that we have at least one supported format
\r
5453 if ( info.nativeFormats == 0 ) {
\r
5454 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5455 errorText_ = errorStream_.str();
\r
5456 error( RtError::WARNING );
\r
5460 // Get the device name
\r
5462 result = snd_card_get_name( card, &cardname );
\r
5463 if ( result >= 0 )
\r
5464 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5467 // That's all ... close the device and return
\r
5468 snd_pcm_close( phandle );
\r
5469 info.probed = true;
\r
5473 void RtApiAlsa :: saveDeviceInfo( void )
\r
5477 unsigned int nDevices = getDeviceCount();
\r
5478 devices_.resize( nDevices );
\r
5479 for ( unsigned int i=0; i<nDevices; i++ )
\r
5480 devices_[i] = getDeviceInfo( i );
\r
5483 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5484 unsigned int firstChannel, unsigned int sampleRate,
\r
5485 RtAudioFormat format, unsigned int *bufferSize,
\r
5486 RtAudio::StreamOptions *options )
\r
5489 #if defined(__RTAUDIO_DEBUG__)
\r
5490 snd_output_t *out;
\r
5491 snd_output_stdio_attach(&out, stderr, 0);
\r
5494 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5496 unsigned nDevices = 0;
\r
5497 int result, subdevice, card;
\r
5499 snd_ctl_t *chandle;
\r
5501 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5502 snprintf(name, sizeof(name), "%s", "default");
\r
5504 // Count cards and devices
\r
5506 snd_card_next( &card );
\r
5507 while ( card >= 0 ) {
\r
5508 sprintf( name, "hw:%d", card );
\r
5509 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5510 if ( result < 0 ) {
\r
5511 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5512 errorText_ = errorStream_.str();
\r
5517 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5518 if ( result < 0 ) break;
\r
5519 if ( subdevice < 0 ) break;
\r
5520 if ( nDevices == device ) {
\r
5521 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5522 snd_ctl_close( chandle );
\r
5527 snd_ctl_close( chandle );
\r
5528 snd_card_next( &card );
\r
5531 if ( nDevices == 0 ) {
\r
5532 // This should not happen because a check is made before this function is called.
\r
5533 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5537 if ( device >= nDevices ) {
\r
5538 // This should not happen because a check is made before this function is called.
\r
5539 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5546 // The getDeviceInfo() function will not work for a device that is
\r
5547 // already open. Thus, we'll probe the system before opening a
\r
5548 // stream and save the results for use by getDeviceInfo().
\r
5549 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5550 this->saveDeviceInfo();
\r
5552 snd_pcm_stream_t stream;
\r
5553 if ( mode == OUTPUT )
\r
5554 stream = SND_PCM_STREAM_PLAYBACK;
\r
5556 stream = SND_PCM_STREAM_CAPTURE;
\r
5558 snd_pcm_t *phandle;
\r
5559 int openMode = SND_PCM_ASYNC;
\r
5560 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5561 if ( result < 0 ) {
\r
5562 if ( mode == OUTPUT )
\r
5563 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5565 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5566 errorText_ = errorStream_.str();
\r
5570 // Fill the parameter structure.
\r
5571 snd_pcm_hw_params_t *hw_params;
\r
5572 snd_pcm_hw_params_alloca( &hw_params );
\r
5573 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5574 if ( result < 0 ) {
\r
5575 snd_pcm_close( phandle );
\r
5576 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5577 errorText_ = errorStream_.str();
\r
5581 #if defined(__RTAUDIO_DEBUG__)
\r
5582 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5583 snd_pcm_hw_params_dump( hw_params, out );
\r
5586 // Set access ... check user preference.
\r
5587 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5588 stream_.userInterleaved = false;
\r
5589 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5590 if ( result < 0 ) {
\r
5591 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5592 stream_.deviceInterleaved[mode] = true;
\r
5595 stream_.deviceInterleaved[mode] = false;
\r
5598 stream_.userInterleaved = true;
\r
5599 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5600 if ( result < 0 ) {
\r
5601 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5602 stream_.deviceInterleaved[mode] = false;
\r
5605 stream_.deviceInterleaved[mode] = true;
\r
5608 if ( result < 0 ) {
\r
5609 snd_pcm_close( phandle );
\r
5610 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5611 errorText_ = errorStream_.str();
\r
5615 // Determine how to set the device format.
\r
5616 stream_.userFormat = format;
\r
5617 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5619 if ( format == RTAUDIO_SINT8 )
\r
5620 deviceFormat = SND_PCM_FORMAT_S8;
\r
5621 else if ( format == RTAUDIO_SINT16 )
\r
5622 deviceFormat = SND_PCM_FORMAT_S16;
\r
5623 else if ( format == RTAUDIO_SINT24 )
\r
5624 deviceFormat = SND_PCM_FORMAT_S24;
\r
5625 else if ( format == RTAUDIO_SINT32 )
\r
5626 deviceFormat = SND_PCM_FORMAT_S32;
\r
5627 else if ( format == RTAUDIO_FLOAT32 )
\r
5628 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5629 else if ( format == RTAUDIO_FLOAT64 )
\r
5630 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5632 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5633 stream_.deviceFormat[mode] = format;
\r
5637 // The user requested format is not natively supported by the device.
\r
5638 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5639 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5640 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5644 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5645 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5646 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5650 deviceFormat = SND_PCM_FORMAT_S32;
\r
5651 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5652 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5656 deviceFormat = SND_PCM_FORMAT_S24;
\r
5657 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5658 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5662 deviceFormat = SND_PCM_FORMAT_S16;
\r
5663 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5664 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5668 deviceFormat = SND_PCM_FORMAT_S8;
\r
5669 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5670 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5674 // If we get here, no supported format was found.
\r
5675 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5676 errorText_ = errorStream_.str();
\r
5680 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5681 if ( result < 0 ) {
\r
5682 snd_pcm_close( phandle );
\r
5683 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5684 errorText_ = errorStream_.str();
\r
5688 // Determine whether byte-swaping is necessary.
\r
5689 stream_.doByteSwap[mode] = false;
\r
5690 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5691 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5692 if ( result == 0 )
\r
5693 stream_.doByteSwap[mode] = true;
\r
5694 else if (result < 0) {
\r
5695 snd_pcm_close( phandle );
\r
5696 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5697 errorText_ = errorStream_.str();
\r
5702 // Set the sample rate.
\r
5703 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5704 if ( result < 0 ) {
\r
5705 snd_pcm_close( phandle );
\r
5706 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5707 errorText_ = errorStream_.str();
\r
5711 // Determine the number of channels for this device. We support a possible
\r
5712 // minimum device channel number > than the value requested by the user.
\r
5713 stream_.nUserChannels[mode] = channels;
\r
5714 unsigned int value;
\r
5715 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5716 unsigned int deviceChannels = value;
\r
5717 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5718 snd_pcm_close( phandle );
\r
5719 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5720 errorText_ = errorStream_.str();
\r
5724 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5725 if ( result < 0 ) {
\r
5726 snd_pcm_close( phandle );
\r
5727 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5728 errorText_ = errorStream_.str();
\r
5731 deviceChannels = value;
\r
5732 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5733 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5735 // Set the device channels.
\r
5736 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5737 if ( result < 0 ) {
\r
5738 snd_pcm_close( phandle );
\r
5739 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5740 errorText_ = errorStream_.str();
\r
5744 // Set the buffer (or period) size.
\r
5746 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5747 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5748 if ( result < 0 ) {
\r
5749 snd_pcm_close( phandle );
\r
5750 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5751 errorText_ = errorStream_.str();
\r
5754 *bufferSize = periodSize;
\r
5756 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5757 unsigned int periods = 0;
\r
5758 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5759 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5760 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5761 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5762 if ( result < 0 ) {
\r
5763 snd_pcm_close( phandle );
\r
5764 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5765 errorText_ = errorStream_.str();
\r
5769 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5770 // MUST be the same in both directions!
\r
5771 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5772 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5773 errorText_ = errorStream_.str();
\r
5777 stream_.bufferSize = *bufferSize;
\r
5779 // Install the hardware configuration
\r
5780 result = snd_pcm_hw_params( phandle, hw_params );
\r
5781 if ( result < 0 ) {
\r
5782 snd_pcm_close( phandle );
\r
5783 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5784 errorText_ = errorStream_.str();
\r
5788 #if defined(__RTAUDIO_DEBUG__)
\r
5789 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5790 snd_pcm_hw_params_dump( hw_params, out );
\r
5793 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5794 snd_pcm_sw_params_t *sw_params = NULL;
\r
5795 snd_pcm_sw_params_alloca( &sw_params );
\r
5796 snd_pcm_sw_params_current( phandle, sw_params );
\r
5797 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5798 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5799 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5801 // The following two settings were suggested by Theo Veenker
\r
5802 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5803 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5805 // here are two options for a fix
\r
5806 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5807 snd_pcm_uframes_t val;
\r
5808 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5809 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5811 result = snd_pcm_sw_params( phandle, sw_params );
\r
5812 if ( result < 0 ) {
\r
5813 snd_pcm_close( phandle );
\r
5814 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5815 errorText_ = errorStream_.str();
\r
5819 #if defined(__RTAUDIO_DEBUG__)
\r
5820 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5821 snd_pcm_sw_params_dump( sw_params, out );
\r
5824 // Set flags for buffer conversion
\r
5825 stream_.doConvertBuffer[mode] = false;
\r
5826 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5827 stream_.doConvertBuffer[mode] = true;
\r
5828 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5829 stream_.doConvertBuffer[mode] = true;
\r
5830 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5831 stream_.nUserChannels[mode] > 1 )
\r
5832 stream_.doConvertBuffer[mode] = true;
\r
5834 // Allocate the ApiHandle if necessary and then save.
\r
5835 AlsaHandle *apiInfo = 0;
\r
5836 if ( stream_.apiHandle == 0 ) {
\r
5838 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5840 catch ( std::bad_alloc& ) {
\r
5841 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5845 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5846 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5850 stream_.apiHandle = (void *) apiInfo;
\r
5851 apiInfo->handles[0] = 0;
\r
5852 apiInfo->handles[1] = 0;
\r
5855 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5857 apiInfo->handles[mode] = phandle;
\r
5859 // Allocate necessary internal buffers.
\r
5860 unsigned long bufferBytes;
\r
5861 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5862 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5863 if ( stream_.userBuffer[mode] == NULL ) {
\r
5864 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5868 if ( stream_.doConvertBuffer[mode] ) {
\r
5870 bool makeBuffer = true;
\r
5871 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5872 if ( mode == INPUT ) {
\r
5873 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5874 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5875 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5879 if ( makeBuffer ) {
\r
5880 bufferBytes *= *bufferSize;
\r
5881 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5882 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5883 if ( stream_.deviceBuffer == NULL ) {
\r
5884 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5890 stream_.sampleRate = sampleRate;
\r
5891 stream_.nBuffers = periods;
\r
5892 stream_.device[mode] = device;
\r
5893 stream_.state = STREAM_STOPPED;
\r
5895 // Setup the buffer conversion information structure.
\r
5896 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5898 // Setup thread if necessary.
\r
5899 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5900 // We had already set up an output stream.
\r
5901 stream_.mode = DUPLEX;
\r
5902 // Link the streams if possible.
\r
5903 apiInfo->synchronized = false;
\r
5904 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5905 apiInfo->synchronized = true;
\r
5907 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5908 error( RtError::WARNING );
\r
5912 stream_.mode = mode;
\r
5914 // Setup callback thread.
\r
5915 stream_.callbackInfo.object = (void *) this;
\r
5917 // Set the thread attributes for joinable and realtime scheduling
\r
5918 // priority (optional). The higher priority will only take affect
\r
5919 // if the program is run as root or suid. Note, under Linux
\r
5920 // processes with CAP_SYS_NICE privilege, a user can change
\r
5921 // scheduling policy and priority (thus need not be root). See
\r
5922 // POSIX "capabilities".
\r
5923 pthread_attr_t attr;
\r
5924 pthread_attr_init( &attr );
\r
5925 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5926 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5927 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5928 struct sched_param param;
\r
5929 int priority = options->priority;
\r
5930 int min = sched_get_priority_min( SCHED_RR );
\r
5931 int max = sched_get_priority_max( SCHED_RR );
\r
5932 if ( priority < min ) priority = min;
\r
5933 else if ( priority > max ) priority = max;
\r
5934 param.sched_priority = priority;
\r
5935 pthread_attr_setschedparam( &attr, ¶m );
\r
5936 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5939 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5941 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5944 stream_.callbackInfo.isRunning = true;
\r
5945 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5946 pthread_attr_destroy( &attr );
\r
5948 stream_.callbackInfo.isRunning = false;
\r
5949 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5958 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5959 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5960 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5962 stream_.apiHandle = 0;
\r
5965 for ( int i=0; i<2; i++ ) {
\r
5966 if ( stream_.userBuffer[i] ) {
\r
5967 free( stream_.userBuffer[i] );
\r
5968 stream_.userBuffer[i] = 0;
\r
5972 if ( stream_.deviceBuffer ) {
\r
5973 free( stream_.deviceBuffer );
\r
5974 stream_.deviceBuffer = 0;
\r
5980 void RtApiAlsa :: closeStream()
\r
5982 if ( stream_.state == STREAM_CLOSED ) {
\r
5983 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5984 error( RtError::WARNING );
\r
5988 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5989 stream_.callbackInfo.isRunning = false;
\r
5990 MUTEX_LOCK( &stream_.mutex );
\r
5991 if ( stream_.state == STREAM_STOPPED ) {
\r
5992 apiInfo->runnable = true;
\r
5993 pthread_cond_signal( &apiInfo->runnable_cv );
\r
5995 MUTEX_UNLOCK( &stream_.mutex );
\r
5996 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5998 if ( stream_.state == STREAM_RUNNING ) {
\r
5999 stream_.state = STREAM_STOPPED;
\r
6000 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6001 snd_pcm_drop( apiInfo->handles[0] );
\r
6002 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6003 snd_pcm_drop( apiInfo->handles[1] );
\r
6007 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6008 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6009 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6011 stream_.apiHandle = 0;
\r
6014 for ( int i=0; i<2; i++ ) {
\r
6015 if ( stream_.userBuffer[i] ) {
\r
6016 free( stream_.userBuffer[i] );
\r
6017 stream_.userBuffer[i] = 0;
\r
6021 if ( stream_.deviceBuffer ) {
\r
6022 free( stream_.deviceBuffer );
\r
6023 stream_.deviceBuffer = 0;
\r
6026 stream_.mode = UNINITIALIZED;
\r
6027 stream_.state = STREAM_CLOSED;
\r
6030 void RtApiAlsa :: startStream()
\r
6032 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6035 if ( stream_.state == STREAM_RUNNING ) {
\r
6036 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6037 error( RtError::WARNING );
\r
6041 MUTEX_LOCK( &stream_.mutex );
\r
6044 snd_pcm_state_t state;
\r
6045 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6046 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6047 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6048 state = snd_pcm_state( handle[0] );
\r
6049 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6050 result = snd_pcm_prepare( handle[0] );
\r
6051 if ( result < 0 ) {
\r
6052 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6053 errorText_ = errorStream_.str();
\r
6059 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6060 state = snd_pcm_state( handle[1] );
\r
6061 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6062 result = snd_pcm_prepare( handle[1] );
\r
6063 if ( result < 0 ) {
\r
6064 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6065 errorText_ = errorStream_.str();
\r
6071 stream_.state = STREAM_RUNNING;
\r
6074 apiInfo->runnable = true;
\r
6075 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6076 MUTEX_UNLOCK( &stream_.mutex );
\r
6078 if ( result >= 0 ) return;
\r
6079 error( RtError::SYSTEM_ERROR );
\r
6082 void RtApiAlsa :: stopStream()
\r
6085 if ( stream_.state == STREAM_STOPPED ) {
\r
6086 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6087 error( RtError::WARNING );
\r
6091 stream_.state = STREAM_STOPPED;
\r
6092 MUTEX_LOCK( &stream_.mutex );
\r
6094 //if ( stream_.state == STREAM_STOPPED ) {
\r
6095 // MUTEX_UNLOCK( &stream_.mutex );
\r
6100 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6101 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6102 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6103 if ( apiInfo->synchronized )
\r
6104 result = snd_pcm_drop( handle[0] );
\r
6106 result = snd_pcm_drain( handle[0] );
\r
6107 if ( result < 0 ) {
\r
6108 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6109 errorText_ = errorStream_.str();
\r
6114 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6115 result = snd_pcm_drop( handle[1] );
\r
6116 if ( result < 0 ) {
\r
6117 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6118 errorText_ = errorStream_.str();
\r
6124 stream_.state = STREAM_STOPPED;
\r
6125 MUTEX_UNLOCK( &stream_.mutex );
\r
6127 if ( result >= 0 ) return;
\r
6128 error( RtError::SYSTEM_ERROR );
\r
6131 void RtApiAlsa :: abortStream()
\r
6134 if ( stream_.state == STREAM_STOPPED ) {
\r
6135 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6136 error( RtError::WARNING );
\r
6140 stream_.state = STREAM_STOPPED;
\r
6141 MUTEX_LOCK( &stream_.mutex );
\r
6143 //if ( stream_.state == STREAM_STOPPED ) {
\r
6144 // MUTEX_UNLOCK( &stream_.mutex );
\r
6149 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6150 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6151 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6152 result = snd_pcm_drop( handle[0] );
\r
6153 if ( result < 0 ) {
\r
6154 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6155 errorText_ = errorStream_.str();
\r
6160 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6161 result = snd_pcm_drop( handle[1] );
\r
6162 if ( result < 0 ) {
\r
6163 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6164 errorText_ = errorStream_.str();
\r
6170 stream_.state = STREAM_STOPPED;
\r
6171 MUTEX_UNLOCK( &stream_.mutex );
\r
6173 if ( result >= 0 ) return;
\r
6174 error( RtError::SYSTEM_ERROR );
\r
6177 void RtApiAlsa :: callbackEvent()
\r
6179 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6180 if ( stream_.state == STREAM_STOPPED ) {
\r
6181 MUTEX_LOCK( &stream_.mutex );
\r
6182 while ( !apiInfo->runnable )
\r
6183 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6185 if ( stream_.state != STREAM_RUNNING ) {
\r
6186 MUTEX_UNLOCK( &stream_.mutex );
\r
6189 MUTEX_UNLOCK( &stream_.mutex );
\r
6192 if ( stream_.state == STREAM_CLOSED ) {
\r
6193 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6194 error( RtError::WARNING );
\r
6198 int doStopStream = 0;
\r
6199 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6200 double streamTime = getStreamTime();
\r
6201 RtAudioStreamStatus status = 0;
\r
6202 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6203 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6204 apiInfo->xrun[0] = false;
\r
6206 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6207 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6208 apiInfo->xrun[1] = false;
\r
6210 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6211 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6213 if ( doStopStream == 2 ) {
\r
6218 MUTEX_LOCK( &stream_.mutex );
\r
6220 // The state might change while waiting on a mutex.
\r
6221 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6226 snd_pcm_t **handle;
\r
6227 snd_pcm_sframes_t frames;
\r
6228 RtAudioFormat format;
\r
6229 handle = (snd_pcm_t **) apiInfo->handles;
\r
6231 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6233 // Setup parameters.
\r
6234 if ( stream_.doConvertBuffer[1] ) {
\r
6235 buffer = stream_.deviceBuffer;
\r
6236 channels = stream_.nDeviceChannels[1];
\r
6237 format = stream_.deviceFormat[1];
\r
6240 buffer = stream_.userBuffer[1];
\r
6241 channels = stream_.nUserChannels[1];
\r
6242 format = stream_.userFormat;
\r
6245 // Read samples from device in interleaved/non-interleaved format.
\r
6246 if ( stream_.deviceInterleaved[1] )
\r
6247 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6249 void *bufs[channels];
\r
6250 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6251 for ( int i=0; i<channels; i++ )
\r
6252 bufs[i] = (void *) (buffer + (i * offset));
\r
6253 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6256 if ( result < (int) stream_.bufferSize ) {
\r
6257 // Either an error or overrun occured.
\r
6258 if ( result == -EPIPE ) {
\r
6259 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6260 if ( state == SND_PCM_STATE_XRUN ) {
\r
6261 apiInfo->xrun[1] = true;
\r
6262 result = snd_pcm_prepare( handle[1] );
\r
6263 if ( result < 0 ) {
\r
6264 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6265 errorText_ = errorStream_.str();
\r
6269 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6270 errorText_ = errorStream_.str();
\r
6274 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6275 errorText_ = errorStream_.str();
\r
6277 error( RtError::WARNING );
\r
6281 // Do byte swapping if necessary.
\r
6282 if ( stream_.doByteSwap[1] )
\r
6283 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6285 // Do buffer conversion if necessary.
\r
6286 if ( stream_.doConvertBuffer[1] )
\r
6287 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6289 // Check stream latency
\r
6290 result = snd_pcm_delay( handle[1], &frames );
\r
6291 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6296 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6298 // Setup parameters and do buffer conversion if necessary.
\r
6299 if ( stream_.doConvertBuffer[0] ) {
\r
6300 buffer = stream_.deviceBuffer;
\r
6301 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6302 channels = stream_.nDeviceChannels[0];
\r
6303 format = stream_.deviceFormat[0];
\r
6306 buffer = stream_.userBuffer[0];
\r
6307 channels = stream_.nUserChannels[0];
\r
6308 format = stream_.userFormat;
\r
6311 // Do byte swapping if necessary.
\r
6312 if ( stream_.doByteSwap[0] )
\r
6313 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6315 // Write samples to device in interleaved/non-interleaved format.
\r
6316 if ( stream_.deviceInterleaved[0] )
\r
6317 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6319 void *bufs[channels];
\r
6320 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6321 for ( int i=0; i<channels; i++ )
\r
6322 bufs[i] = (void *) (buffer + (i * offset));
\r
6323 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6326 if ( result < (int) stream_.bufferSize ) {
\r
6327 // Either an error or underrun occured.
\r
6328 if ( result == -EPIPE ) {
\r
6329 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6330 if ( state == SND_PCM_STATE_XRUN ) {
\r
6331 apiInfo->xrun[0] = true;
\r
6332 result = snd_pcm_prepare( handle[0] );
\r
6333 if ( result < 0 ) {
\r
6334 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6335 errorText_ = errorStream_.str();
\r
6339 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6340 errorText_ = errorStream_.str();
\r
6344 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6345 errorText_ = errorStream_.str();
\r
6347 error( RtError::WARNING );
\r
6351 // Check stream latency
\r
6352 result = snd_pcm_delay( handle[0], &frames );
\r
6353 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6357 MUTEX_UNLOCK( &stream_.mutex );
\r
6359 RtApi::tickStreamTime();
\r
6360 if ( doStopStream == 1 ) this->stopStream();
\r
6363 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6365 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6366 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6367 bool *isRunning = &info->isRunning;
\r
6369 while ( *isRunning == true ) {
\r
6370 pthread_testcancel();
\r
6371 object->callbackEvent();
\r
6374 pthread_exit( NULL );
\r
6377 //******************** End of __LINUX_ALSA__ *********************//
\r
6381 #if defined(__LINUX_OSS__)
\r
6383 #include <unistd.h>
\r
6384 #include <sys/ioctl.h>
\r
6385 #include <unistd.h>
\r
6386 #include <fcntl.h>
\r
6387 #include "soundcard.h"
\r
6388 #include <errno.h>
\r
6391 extern "C" void *ossCallbackHandler(void * ptr);
\r
6393 // A structure to hold various information related to the OSS API
\r
6394 // implementation.
\r
6395 struct OssHandle {
\r
6396 int id[2]; // device ids
\r
6399 pthread_cond_t runnable;
\r
6402 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6405 RtApiOss :: RtApiOss()
\r
6407 // Nothing to do here.
\r
6410 RtApiOss :: ~RtApiOss()
\r
6412 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6415 unsigned int RtApiOss :: getDeviceCount( void )
\r
6417 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6418 if ( mixerfd == -1 ) {
\r
6419 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6420 error( RtError::WARNING );
\r
6424 oss_sysinfo sysinfo;
\r
6425 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6427 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6428 error( RtError::WARNING );
\r
6433 return sysinfo.numaudios;
\r
6436 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6438 RtAudio::DeviceInfo info;
\r
6439 info.probed = false;
\r
6441 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6442 if ( mixerfd == -1 ) {
\r
6443 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6444 error( RtError::WARNING );
\r
6448 oss_sysinfo sysinfo;
\r
6449 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6450 if ( result == -1 ) {
\r
6452 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6453 error( RtError::WARNING );
\r
6457 unsigned nDevices = sysinfo.numaudios;
\r
6458 if ( nDevices == 0 ) {
\r
6460 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6461 error( RtError::INVALID_USE );
\r
6464 if ( device >= nDevices ) {
\r
6466 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6467 error( RtError::INVALID_USE );
\r
6470 oss_audioinfo ainfo;
\r
6471 ainfo.dev = device;
\r
6472 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6474 if ( result == -1 ) {
\r
6475 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6476 errorText_ = errorStream_.str();
\r
6477 error( RtError::WARNING );
\r
6482 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6483 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6484 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6485 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6486 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6489 // Probe data formats ... do for input
\r
6490 unsigned long mask = ainfo.iformats;
\r
6491 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6492 info.nativeFormats |= RTAUDIO_SINT16;
\r
6493 if ( mask & AFMT_S8 )
\r
6494 info.nativeFormats |= RTAUDIO_SINT8;
\r
6495 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6496 info.nativeFormats |= RTAUDIO_SINT32;
\r
6497 if ( mask & AFMT_FLOAT )
\r
6498 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6499 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6500 info.nativeFormats |= RTAUDIO_SINT24;
\r
6502 // Check that we have at least one supported format
\r
6503 if ( info.nativeFormats == 0 ) {
\r
6504 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6505 errorText_ = errorStream_.str();
\r
6506 error( RtError::WARNING );
\r
6510 // Probe the supported sample rates.
\r
6511 info.sampleRates.clear();
\r
6512 if ( ainfo.nrates ) {
\r
6513 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6514 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6515 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6516 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6523 // Check min and max rate values;
\r
6524 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6525 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6526 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6530 if ( info.sampleRates.size() == 0 ) {
\r
6531 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6532 errorText_ = errorStream_.str();
\r
6533 error( RtError::WARNING );
\r
6536 info.probed = true;
\r
6537 info.name = ainfo.name;
\r
6544 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6545 unsigned int firstChannel, unsigned int sampleRate,
\r
6546 RtAudioFormat format, unsigned int *bufferSize,
\r
6547 RtAudio::StreamOptions *options )
\r
6549 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6550 if ( mixerfd == -1 ) {
\r
6551 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6555 oss_sysinfo sysinfo;
\r
6556 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6557 if ( result == -1 ) {
\r
6559 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6563 unsigned nDevices = sysinfo.numaudios;
\r
6564 if ( nDevices == 0 ) {
\r
6565 // This should not happen because a check is made before this function is called.
\r
6567 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6571 if ( device >= nDevices ) {
\r
6572 // This should not happen because a check is made before this function is called.
\r
6574 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6578 oss_audioinfo ainfo;
\r
6579 ainfo.dev = device;
\r
6580 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6582 if ( result == -1 ) {
\r
6583 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6584 errorText_ = errorStream_.str();
\r
6588 // Check if device supports input or output
\r
6589 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6590 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6591 if ( mode == OUTPUT )
\r
6592 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6594 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6595 errorText_ = errorStream_.str();
\r
6600 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6601 if ( mode == OUTPUT )
\r
6602 flags |= O_WRONLY;
\r
6603 else { // mode == INPUT
\r
6604 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6605 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6606 close( handle->id[0] );
\r
6607 handle->id[0] = 0;
\r
6608 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6609 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6610 errorText_ = errorStream_.str();
\r
6613 // Check that the number previously set channels is the same.
\r
6614 if ( stream_.nUserChannels[0] != channels ) {
\r
6615 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6616 errorText_ = errorStream_.str();
\r
6622 flags |= O_RDONLY;
\r
6625 // Set exclusive access if specified.
\r
6626 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
6628 // Try to open the device.
\r
6630 fd = open( ainfo.devnode, flags, 0 );
\r
6632 if ( errno == EBUSY )
\r
6633 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
6635 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
6636 errorText_ = errorStream_.str();
\r
6640 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
6642 if ( flags | O_RDWR ) {
\r
6643 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
6644 if ( result == -1) {
\r
6645 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
6646 errorText_ = errorStream_.str();
\r
6652 // Check the device channel support.
\r
6653 stream_.nUserChannels[mode] = channels;
\r
6654 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
6656 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
6657 errorText_ = errorStream_.str();
\r
6661 // Set the number of channels.
\r
6662 int deviceChannels = channels + firstChannel;
\r
6663 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
6664 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
6666 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
6667 errorText_ = errorStream_.str();
\r
6670 stream_.nDeviceChannels[mode] = deviceChannels;
\r
6672 // Get the data format mask
\r
6674 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
6675 if ( result == -1 ) {
\r
6677 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
6678 errorText_ = errorStream_.str();
\r
6682 // Determine how to set the device format.
\r
6683 stream_.userFormat = format;
\r
6684 int deviceFormat = -1;
\r
6685 stream_.doByteSwap[mode] = false;
\r
6686 if ( format == RTAUDIO_SINT8 ) {
\r
6687 if ( mask & AFMT_S8 ) {
\r
6688 deviceFormat = AFMT_S8;
\r
6689 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6692 else if ( format == RTAUDIO_SINT16 ) {
\r
6693 if ( mask & AFMT_S16_NE ) {
\r
6694 deviceFormat = AFMT_S16_NE;
\r
6695 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6697 else if ( mask & AFMT_S16_OE ) {
\r
6698 deviceFormat = AFMT_S16_OE;
\r
6699 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6700 stream_.doByteSwap[mode] = true;
\r
6703 else if ( format == RTAUDIO_SINT24 ) {
\r
6704 if ( mask & AFMT_S24_NE ) {
\r
6705 deviceFormat = AFMT_S24_NE;
\r
6706 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6708 else if ( mask & AFMT_S24_OE ) {
\r
6709 deviceFormat = AFMT_S24_OE;
\r
6710 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6711 stream_.doByteSwap[mode] = true;
\r
6714 else if ( format == RTAUDIO_SINT32 ) {
\r
6715 if ( mask & AFMT_S32_NE ) {
\r
6716 deviceFormat = AFMT_S32_NE;
\r
6717 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6719 else if ( mask & AFMT_S32_OE ) {
\r
6720 deviceFormat = AFMT_S32_OE;
\r
6721 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6722 stream_.doByteSwap[mode] = true;
\r
6726 if ( deviceFormat == -1 ) {
\r
6727 // The user requested format is not natively supported by the device.
\r
6728 if ( mask & AFMT_S16_NE ) {
\r
6729 deviceFormat = AFMT_S16_NE;
\r
6730 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6732 else if ( mask & AFMT_S32_NE ) {
\r
6733 deviceFormat = AFMT_S32_NE;
\r
6734 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6736 else if ( mask & AFMT_S24_NE ) {
\r
6737 deviceFormat = AFMT_S24_NE;
\r
6738 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6740 else if ( mask & AFMT_S16_OE ) {
\r
6741 deviceFormat = AFMT_S16_OE;
\r
6742 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6743 stream_.doByteSwap[mode] = true;
\r
6745 else if ( mask & AFMT_S32_OE ) {
\r
6746 deviceFormat = AFMT_S32_OE;
\r
6747 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6748 stream_.doByteSwap[mode] = true;
\r
6750 else if ( mask & AFMT_S24_OE ) {
\r
6751 deviceFormat = AFMT_S24_OE;
\r
6752 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6753 stream_.doByteSwap[mode] = true;
\r
6755 else if ( mask & AFMT_S8) {
\r
6756 deviceFormat = AFMT_S8;
\r
6757 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6761 if ( stream_.deviceFormat[mode] == 0 ) {
\r
6762 // This really shouldn't happen ...
\r
6764 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6765 errorText_ = errorStream_.str();
\r
6769 // Set the data format.
\r
6770 int temp = deviceFormat;
\r
6771 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
6772 if ( result == -1 || deviceFormat != temp ) {
\r
6774 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
6775 errorText_ = errorStream_.str();
\r
6779 // Attempt to set the buffer size. According to OSS, the minimum
\r
6780 // number of buffers is two. The supposed minimum buffer size is 16
\r
6781 // bytes, so that will be our lower bound. The argument to this
\r
6782 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
6783 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
6784 // We'll check the actual value used near the end of the setup
\r
6786 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
6787 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
6789 if ( options ) buffers = options->numberOfBuffers;
\r
6790 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
6791 if ( buffers < 2 ) buffers = 3;
\r
6792 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
6793 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
6794 if ( result == -1 ) {
\r
6796 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
6797 errorText_ = errorStream_.str();
\r
6800 stream_.nBuffers = buffers;
\r
6802 // Save buffer size (in sample frames).
\r
6803 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
6804 stream_.bufferSize = *bufferSize;
\r
6806 // Set the sample rate.
\r
6807 int srate = sampleRate;
\r
6808 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
6809 if ( result == -1 ) {
\r
6811 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
6812 errorText_ = errorStream_.str();
\r
6816 // Verify the sample rate setup worked.
\r
6817 if ( abs( srate - sampleRate ) > 100 ) {
\r
6819 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
6820 errorText_ = errorStream_.str();
\r
6823 stream_.sampleRate = sampleRate;
\r
6825 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6826 // We're doing duplex setup here.
\r
6827 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
6828 stream_.nDeviceChannels[0] = deviceChannels;
\r
6831 // Set interleaving parameters.
\r
6832 stream_.userInterleaved = true;
\r
6833 stream_.deviceInterleaved[mode] = true;
\r
6834 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
6835 stream_.userInterleaved = false;
\r
6837 // Set flags for buffer conversion
\r
6838 stream_.doConvertBuffer[mode] = false;
\r
6839 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
6840 stream_.doConvertBuffer[mode] = true;
\r
6841 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
6842 stream_.doConvertBuffer[mode] = true;
\r
6843 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
6844 stream_.nUserChannels[mode] > 1 )
\r
6845 stream_.doConvertBuffer[mode] = true;
\r
6847 // Allocate the stream handles if necessary and then save.
\r
6848 if ( stream_.apiHandle == 0 ) {
\r
6850 handle = new OssHandle;
\r
6852 catch ( std::bad_alloc& ) {
\r
6853 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
6857 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
6858 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
6862 stream_.apiHandle = (void *) handle;
\r
6865 handle = (OssHandle *) stream_.apiHandle;
\r
6867 handle->id[mode] = fd;
\r
6869 // Allocate necessary internal buffers.
\r
6870 unsigned long bufferBytes;
\r
6871 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6872 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6873 if ( stream_.userBuffer[mode] == NULL ) {
\r
6874 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
6878 if ( stream_.doConvertBuffer[mode] ) {
\r
6880 bool makeBuffer = true;
\r
6881 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6882 if ( mode == INPUT ) {
\r
6883 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6884 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6885 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6889 if ( makeBuffer ) {
\r
6890 bufferBytes *= *bufferSize;
\r
6891 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6892 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6893 if ( stream_.deviceBuffer == NULL ) {
\r
6894 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
6900 stream_.device[mode] = device;
\r
6901 stream_.state = STREAM_STOPPED;
\r
6903 // Setup the buffer conversion information structure.
\r
6904 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6906 // Setup thread if necessary.
\r
6907 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
6908 // We had already set up an output stream.
\r
6909 stream_.mode = DUPLEX;
\r
6910 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
6913 stream_.mode = mode;
\r
6915 // Setup callback thread.
\r
6916 stream_.callbackInfo.object = (void *) this;
\r
6918 // Set the thread attributes for joinable and realtime scheduling
\r
6919 // priority. The higher priority will only take affect if the
\r
6920 // program is run as root or suid.
\r
6921 pthread_attr_t attr;
\r
6922 pthread_attr_init( &attr );
\r
6923 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
6924 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6925 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
6926 struct sched_param param;
\r
6927 int priority = options->priority;
\r
6928 int min = sched_get_priority_min( SCHED_RR );
\r
6929 int max = sched_get_priority_max( SCHED_RR );
\r
6930 if ( priority < min ) priority = min;
\r
6931 else if ( priority > max ) priority = max;
\r
6932 param.sched_priority = priority;
\r
6933 pthread_attr_setschedparam( &attr, ¶m );
\r
6934 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
6937 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6939 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6942 stream_.callbackInfo.isRunning = true;
\r
6943 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
6944 pthread_attr_destroy( &attr );
\r
6946 stream_.callbackInfo.isRunning = false;
\r
6947 errorText_ = "RtApiOss::error creating callback thread!";
\r
6956 pthread_cond_destroy( &handle->runnable );
\r
6957 if ( handle->id[0] ) close( handle->id[0] );
\r
6958 if ( handle->id[1] ) close( handle->id[1] );
\r
6960 stream_.apiHandle = 0;
\r
6963 for ( int i=0; i<2; i++ ) {
\r
6964 if ( stream_.userBuffer[i] ) {
\r
6965 free( stream_.userBuffer[i] );
\r
6966 stream_.userBuffer[i] = 0;
\r
6970 if ( stream_.deviceBuffer ) {
\r
6971 free( stream_.deviceBuffer );
\r
6972 stream_.deviceBuffer = 0;
\r
6978 void RtApiOss :: closeStream()
\r
6980 if ( stream_.state == STREAM_CLOSED ) {
\r
6981 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
6982 error( RtError::WARNING );
\r
6986 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6987 stream_.callbackInfo.isRunning = false;
\r
6988 MUTEX_LOCK( &stream_.mutex );
\r
6989 if ( stream_.state == STREAM_STOPPED )
\r
6990 pthread_cond_signal( &handle->runnable );
\r
6991 MUTEX_UNLOCK( &stream_.mutex );
\r
6992 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6994 if ( stream_.state == STREAM_RUNNING ) {
\r
6995 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6996 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
6998 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
6999 stream_.state = STREAM_STOPPED;
\r
7003 pthread_cond_destroy( &handle->runnable );
\r
7004 if ( handle->id[0] ) close( handle->id[0] );
\r
7005 if ( handle->id[1] ) close( handle->id[1] );
\r
7007 stream_.apiHandle = 0;
\r
7010 for ( int i=0; i<2; i++ ) {
\r
7011 if ( stream_.userBuffer[i] ) {
\r
7012 free( stream_.userBuffer[i] );
\r
7013 stream_.userBuffer[i] = 0;
\r
7017 if ( stream_.deviceBuffer ) {
\r
7018 free( stream_.deviceBuffer );
\r
7019 stream_.deviceBuffer = 0;
\r
7022 stream_.mode = UNINITIALIZED;
\r
7023 stream_.state = STREAM_CLOSED;
\r
7026 void RtApiOss :: startStream()
\r
7029 if ( stream_.state == STREAM_RUNNING ) {
\r
7030 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7031 error( RtError::WARNING );
\r
7035 MUTEX_LOCK( &stream_.mutex );
\r
7037 stream_.state = STREAM_RUNNING;
\r
7039 // No need to do anything else here ... OSS automatically starts
\r
7040 // when fed samples.
\r
7042 MUTEX_UNLOCK( &stream_.mutex );
\r
7044 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7045 pthread_cond_signal( &handle->runnable );
\r
7048 void RtApiOss :: stopStream()
\r
7051 if ( stream_.state == STREAM_STOPPED ) {
\r
7052 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7053 error( RtError::WARNING );
\r
7057 MUTEX_LOCK( &stream_.mutex );
\r
7059 // The state might change while waiting on a mutex.
\r
7060 if ( stream_.state == STREAM_STOPPED ) {
\r
7061 MUTEX_UNLOCK( &stream_.mutex );
\r
7066 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7067 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7069 // Flush the output with zeros a few times.
\r
7072 RtAudioFormat format;
\r
7074 if ( stream_.doConvertBuffer[0] ) {
\r
7075 buffer = stream_.deviceBuffer;
\r
7076 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7077 format = stream_.deviceFormat[0];
\r
7080 buffer = stream_.userBuffer[0];
\r
7081 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7082 format = stream_.userFormat;
\r
7085 memset( buffer, 0, samples * formatBytes(format) );
\r
7086 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7087 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7088 if ( result == -1 ) {
\r
7089 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7090 error( RtError::WARNING );
\r
7094 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7095 if ( result == -1 ) {
\r
7096 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7097 errorText_ = errorStream_.str();
\r
7100 handle->triggered = false;
\r
7103 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7104 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7105 if ( result == -1 ) {
\r
7106 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7107 errorText_ = errorStream_.str();
\r
7113 stream_.state = STREAM_STOPPED;
\r
7114 MUTEX_UNLOCK( &stream_.mutex );
\r
7116 if ( result != -1 ) return;
\r
7117 error( RtError::SYSTEM_ERROR );
\r
7120 void RtApiOss :: abortStream()
\r
7123 if ( stream_.state == STREAM_STOPPED ) {
\r
7124 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7125 error( RtError::WARNING );
\r
7129 MUTEX_LOCK( &stream_.mutex );
\r
7131 // The state might change while waiting on a mutex.
\r
7132 if ( stream_.state == STREAM_STOPPED ) {
\r
7133 MUTEX_UNLOCK( &stream_.mutex );
\r
7138 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7139 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7140 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7141 if ( result == -1 ) {
\r
7142 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7143 errorText_ = errorStream_.str();
\r
7146 handle->triggered = false;
\r
7149 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7150 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7151 if ( result == -1 ) {
\r
7152 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7153 errorText_ = errorStream_.str();
\r
7159 stream_.state = STREAM_STOPPED;
\r
7160 MUTEX_UNLOCK( &stream_.mutex );
\r
7162 if ( result != -1 ) return;
\r
7163 error( RtError::SYSTEM_ERROR );
\r
7166 void RtApiOss :: callbackEvent()
\r
7168 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7169 if ( stream_.state == STREAM_STOPPED ) {
\r
7170 MUTEX_LOCK( &stream_.mutex );
\r
7171 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7172 if ( stream_.state != STREAM_RUNNING ) {
\r
7173 MUTEX_UNLOCK( &stream_.mutex );
\r
7176 MUTEX_UNLOCK( &stream_.mutex );
\r
7179 if ( stream_.state == STREAM_CLOSED ) {
\r
7180 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7181 error( RtError::WARNING );
\r
7185 // Invoke user callback to get fresh output data.
\r
7186 int doStopStream = 0;
\r
7187 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7188 double streamTime = getStreamTime();
\r
7189 RtAudioStreamStatus status = 0;
\r
7190 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7191 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7192 handle->xrun[0] = false;
\r
7194 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7195 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7196 handle->xrun[1] = false;
\r
7198 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7199 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7200 if ( doStopStream == 2 ) {
\r
7201 this->abortStream();
\r
7205 MUTEX_LOCK( &stream_.mutex );
\r
7207 // The state might change while waiting on a mutex.
\r
7208 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7213 RtAudioFormat format;
\r
7215 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7217 // Setup parameters and do buffer conversion if necessary.
\r
7218 if ( stream_.doConvertBuffer[0] ) {
\r
7219 buffer = stream_.deviceBuffer;
\r
7220 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7221 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7222 format = stream_.deviceFormat[0];
\r
7225 buffer = stream_.userBuffer[0];
\r
7226 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7227 format = stream_.userFormat;
\r
7230 // Do byte swapping if necessary.
\r
7231 if ( stream_.doByteSwap[0] )
\r
7232 byteSwapBuffer( buffer, samples, format );
\r
7234 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7236 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7237 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7238 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7239 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7240 handle->triggered = true;
\r
7243 // Write samples to device.
\r
7244 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7246 if ( result == -1 ) {
\r
7247 // We'll assume this is an underrun, though there isn't a
\r
7248 // specific means for determining that.
\r
7249 handle->xrun[0] = true;
\r
7250 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7251 error( RtError::WARNING );
\r
7252 // Continue on to input section.
\r
7256 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7258 // Setup parameters.
\r
7259 if ( stream_.doConvertBuffer[1] ) {
\r
7260 buffer = stream_.deviceBuffer;
\r
7261 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7262 format = stream_.deviceFormat[1];
\r
7265 buffer = stream_.userBuffer[1];
\r
7266 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7267 format = stream_.userFormat;
\r
7270 // Read samples from device.
\r
7271 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7273 if ( result == -1 ) {
\r
7274 // We'll assume this is an overrun, though there isn't a
\r
7275 // specific means for determining that.
\r
7276 handle->xrun[1] = true;
\r
7277 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7278 error( RtError::WARNING );
\r
7282 // Do byte swapping if necessary.
\r
7283 if ( stream_.doByteSwap[1] )
\r
7284 byteSwapBuffer( buffer, samples, format );
\r
7286 // Do buffer conversion if necessary.
\r
7287 if ( stream_.doConvertBuffer[1] )
\r
7288 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7292 MUTEX_UNLOCK( &stream_.mutex );
\r
7294 RtApi::tickStreamTime();
\r
7295 if ( doStopStream == 1 ) this->stopStream();
\r
7298 extern "C" void *ossCallbackHandler( void *ptr )
\r
7300 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7301 RtApiOss *object = (RtApiOss *) info->object;
\r
7302 bool *isRunning = &info->isRunning;
\r
7304 while ( *isRunning == true ) {
\r
7305 pthread_testcancel();
\r
7306 object->callbackEvent();
\r
7309 pthread_exit( NULL );
\r
7312 //******************** End of __LINUX_OSS__ *********************//
\r
7316 // *************************************************** //
\r
7318 // Protected common (OS-independent) RtAudio methods.
\r
7320 // *************************************************** //
\r
7322 // This method can be modified to control the behavior of error
\r
7323 // message printing.
\r
7324 void RtApi :: error( RtError::Type type )
\r
7326 errorStream_.str(""); // clear the ostringstream
\r
7327 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7328 std::cerr << '\n' << errorText_ << "\n\n";
\r
7329 else if ( type != RtError::WARNING )
\r
7330 throw( RtError( errorText_, type ) );
\r
7333 void RtApi :: verifyStream()
\r
7335 if ( stream_.state == STREAM_CLOSED ) {
\r
7336 errorText_ = "RtApi:: a stream is not open!";
\r
7337 error( RtError::INVALID_USE );
\r
7341 void RtApi :: clearStreamInfo()
\r
7343 stream_.mode = UNINITIALIZED;
\r
7344 stream_.state = STREAM_CLOSED;
\r
7345 stream_.sampleRate = 0;
\r
7346 stream_.bufferSize = 0;
\r
7347 stream_.nBuffers = 0;
\r
7348 stream_.userFormat = 0;
\r
7349 stream_.userInterleaved = true;
\r
7350 stream_.streamTime = 0.0;
\r
7351 stream_.apiHandle = 0;
\r
7352 stream_.deviceBuffer = 0;
\r
7353 stream_.callbackInfo.callback = 0;
\r
7354 stream_.callbackInfo.userData = 0;
\r
7355 stream_.callbackInfo.isRunning = false;
\r
7356 for ( int i=0; i<2; i++ ) {
\r
7357 stream_.device[i] = 11111;
\r
7358 stream_.doConvertBuffer[i] = false;
\r
7359 stream_.deviceInterleaved[i] = true;
\r
7360 stream_.doByteSwap[i] = false;
\r
7361 stream_.nUserChannels[i] = 0;
\r
7362 stream_.nDeviceChannels[i] = 0;
\r
7363 stream_.channelOffset[i] = 0;
\r
7364 stream_.deviceFormat[i] = 0;
\r
7365 stream_.latency[i] = 0;
\r
7366 stream_.userBuffer[i] = 0;
\r
7367 stream_.convertInfo[i].channels = 0;
\r
7368 stream_.convertInfo[i].inJump = 0;
\r
7369 stream_.convertInfo[i].outJump = 0;
\r
7370 stream_.convertInfo[i].inFormat = 0;
\r
7371 stream_.convertInfo[i].outFormat = 0;
\r
7372 stream_.convertInfo[i].inOffset.clear();
\r
7373 stream_.convertInfo[i].outOffset.clear();
\r
7377 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7379 if ( format == RTAUDIO_SINT16 )
\r
7381 else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||
\r
7382 format == RTAUDIO_FLOAT32 )
\r
7384 else if ( format == RTAUDIO_FLOAT64 )
\r
7386 else if ( format == RTAUDIO_SINT8 )
\r
7389 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7390 error( RtError::WARNING );
\r
7395 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7397 if ( mode == INPUT ) { // convert device to user buffer
\r
7398 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7399 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7400 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7401 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7403 else { // convert user to device buffer
\r
7404 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7405 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7406 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7407 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7410 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7411 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7413 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7415 // Set up the interleave/deinterleave offsets.
\r
7416 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7417 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7418 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7419 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7420 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7421 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7422 stream_.convertInfo[mode].inJump = 1;
\r
7426 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7427 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7428 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7429 stream_.convertInfo[mode].outJump = 1;
\r
7433 else { // no (de)interleaving
\r
7434 if ( stream_.userInterleaved ) {
\r
7435 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7436 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7437 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7441 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7442 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7443 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7444 stream_.convertInfo[mode].inJump = 1;
\r
7445 stream_.convertInfo[mode].outJump = 1;
\r
7450 // Add channel offset.
\r
7451 if ( firstChannel > 0 ) {
\r
7452 if ( stream_.deviceInterleaved[mode] ) {
\r
7453 if ( mode == OUTPUT ) {
\r
7454 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7455 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7458 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7459 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7463 if ( mode == OUTPUT ) {
\r
7464 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7465 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7468 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7469 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7475 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7477 // This function does format conversion, input/output channel compensation, and
\r
7478 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7479 // the lower three bytes of a 32-bit integer.
\r
7481 // Clear our device buffer when in/out duplex device channels are different
\r
7482 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7483 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7484 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7487 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7489 Float64 *out = (Float64 *)outBuffer;
\r
7491 if (info.inFormat == RTAUDIO_SINT8) {
\r
7492 signed char *in = (signed char *)inBuffer;
\r
7493 scale = 1.0 / 127.5;
\r
7494 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7495 for (j=0; j<info.channels; j++) {
\r
7496 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7497 out[info.outOffset[j]] += 0.5;
\r
7498 out[info.outOffset[j]] *= scale;
\r
7500 in += info.inJump;
\r
7501 out += info.outJump;
\r
7504 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7505 Int16 *in = (Int16 *)inBuffer;
\r
7506 scale = 1.0 / 32767.5;
\r
7507 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7508 for (j=0; j<info.channels; j++) {
\r
7509 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7510 out[info.outOffset[j]] += 0.5;
\r
7511 out[info.outOffset[j]] *= scale;
\r
7513 in += info.inJump;
\r
7514 out += info.outJump;
\r
7517 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7518 Int32 *in = (Int32 *)inBuffer;
\r
7519 scale = 1.0 / 8388607.5;
\r
7520 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7521 for (j=0; j<info.channels; j++) {
\r
7522 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);
\r
7523 out[info.outOffset[j]] += 0.5;
\r
7524 out[info.outOffset[j]] *= scale;
\r
7526 in += info.inJump;
\r
7527 out += info.outJump;
\r
7530 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7531 Int32 *in = (Int32 *)inBuffer;
\r
7532 scale = 1.0 / 2147483647.5;
\r
7533 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7534 for (j=0; j<info.channels; j++) {
\r
7535 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7536 out[info.outOffset[j]] += 0.5;
\r
7537 out[info.outOffset[j]] *= scale;
\r
7539 in += info.inJump;
\r
7540 out += info.outJump;
\r
7543 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7544 Float32 *in = (Float32 *)inBuffer;
\r
7545 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7546 for (j=0; j<info.channels; j++) {
\r
7547 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7549 in += info.inJump;
\r
7550 out += info.outJump;
\r
7553 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7554 // Channel compensation and/or (de)interleaving only.
\r
7555 Float64 *in = (Float64 *)inBuffer;
\r
7556 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7557 for (j=0; j<info.channels; j++) {
\r
7558 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7560 in += info.inJump;
\r
7561 out += info.outJump;
\r
7565 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7567 Float32 *out = (Float32 *)outBuffer;
\r
7569 if (info.inFormat == RTAUDIO_SINT8) {
\r
7570 signed char *in = (signed char *)inBuffer;
\r
7571 scale = (Float32) ( 1.0 / 127.5 );
\r
7572 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7573 for (j=0; j<info.channels; j++) {
\r
7574 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7575 out[info.outOffset[j]] += 0.5;
\r
7576 out[info.outOffset[j]] *= scale;
\r
7578 in += info.inJump;
\r
7579 out += info.outJump;
\r
7582 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7583 Int16 *in = (Int16 *)inBuffer;
\r
7584 scale = (Float32) ( 1.0 / 32767.5 );
\r
7585 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7586 for (j=0; j<info.channels; j++) {
\r
7587 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7588 out[info.outOffset[j]] += 0.5;
\r
7589 out[info.outOffset[j]] *= scale;
\r
7591 in += info.inJump;
\r
7592 out += info.outJump;
\r
7595 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7596 Int32 *in = (Int32 *)inBuffer;
\r
7597 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7598 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7599 for (j=0; j<info.channels; j++) {
\r
7600 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);
\r
7601 out[info.outOffset[j]] += 0.5;
\r
7602 out[info.outOffset[j]] *= scale;
\r
7604 in += info.inJump;
\r
7605 out += info.outJump;
\r
7608 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7609 Int32 *in = (Int32 *)inBuffer;
\r
7610 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7611 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7612 for (j=0; j<info.channels; j++) {
\r
7613 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7614 out[info.outOffset[j]] += 0.5;
\r
7615 out[info.outOffset[j]] *= scale;
\r
7617 in += info.inJump;
\r
7618 out += info.outJump;
\r
7621 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7622 // Channel compensation and/or (de)interleaving only.
\r
7623 Float32 *in = (Float32 *)inBuffer;
\r
7624 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7625 for (j=0; j<info.channels; j++) {
\r
7626 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7628 in += info.inJump;
\r
7629 out += info.outJump;
\r
7632 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7633 Float64 *in = (Float64 *)inBuffer;
\r
7634 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7635 for (j=0; j<info.channels; j++) {
\r
7636 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7638 in += info.inJump;
\r
7639 out += info.outJump;
\r
7643 else if (info.outFormat == RTAUDIO_SINT32) {
\r
7644 Int32 *out = (Int32 *)outBuffer;
\r
7645 if (info.inFormat == RTAUDIO_SINT8) {
\r
7646 signed char *in = (signed char *)inBuffer;
\r
7647 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7648 for (j=0; j<info.channels; j++) {
\r
7649 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7650 out[info.outOffset[j]] <<= 24;
\r
7652 in += info.inJump;
\r
7653 out += info.outJump;
\r
7656 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7657 Int16 *in = (Int16 *)inBuffer;
\r
7658 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7659 for (j=0; j<info.channels; j++) {
\r
7660 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7661 out[info.outOffset[j]] <<= 16;
\r
7663 in += info.inJump;
\r
7664 out += info.outJump;
\r
7667 else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes
\r
7668 Int32 *in = (Int32 *)inBuffer;
\r
7669 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7670 for (j=0; j<info.channels; j++) {
\r
7671 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7672 out[info.outOffset[j]] <<= 8;
\r
7674 in += info.inJump;
\r
7675 out += info.outJump;
\r
7678 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7679 // Channel compensation and/or (de)interleaving only.
\r
7680 Int32 *in = (Int32 *)inBuffer;
\r
7681 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7682 for (j=0; j<info.channels; j++) {
\r
7683 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7685 in += info.inJump;
\r
7686 out += info.outJump;
\r
7689 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7690 Float32 *in = (Float32 *)inBuffer;
\r
7691 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7692 for (j=0; j<info.channels; j++) {
\r
7693 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7695 in += info.inJump;
\r
7696 out += info.outJump;
\r
7699 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7700 Float64 *in = (Float64 *)inBuffer;
\r
7701 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7702 for (j=0; j<info.channels; j++) {
\r
7703 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7705 in += info.inJump;
\r
7706 out += info.outJump;
\r
7710 else if (info.outFormat == RTAUDIO_SINT24) {
\r
7711 Int32 *out = (Int32 *)outBuffer;
\r
7712 if (info.inFormat == RTAUDIO_SINT8) {
\r
7713 signed char *in = (signed char *)inBuffer;
\r
7714 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7715 for (j=0; j<info.channels; j++) {
\r
7716 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7717 out[info.outOffset[j]] <<= 16;
\r
7719 in += info.inJump;
\r
7720 out += info.outJump;
\r
7723 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7724 Int16 *in = (Int16 *)inBuffer;
\r
7725 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7726 for (j=0; j<info.channels; j++) {
\r
7727 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7728 out[info.outOffset[j]] <<= 8;
\r
7730 in += info.inJump;
\r
7731 out += info.outJump;
\r
7734 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7735 // Channel compensation and/or (de)interleaving only.
\r
7736 Int32 *in = (Int32 *)inBuffer;
\r
7737 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7738 for (j=0; j<info.channels; j++) {
\r
7739 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7741 in += info.inJump;
\r
7742 out += info.outJump;
\r
7745 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7746 Int32 *in = (Int32 *)inBuffer;
\r
7747 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7748 for (j=0; j<info.channels; j++) {
\r
7749 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7750 out[info.outOffset[j]] >>= 8;
\r
7752 in += info.inJump;
\r
7753 out += info.outJump;
\r
7756 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7757 Float32 *in = (Float32 *)inBuffer;
\r
7758 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7759 for (j=0; j<info.channels; j++) {
\r
7760 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7762 in += info.inJump;
\r
7763 out += info.outJump;
\r
7766 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7767 Float64 *in = (Float64 *)inBuffer;
\r
7768 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7769 for (j=0; j<info.channels; j++) {
\r
7770 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7772 in += info.inJump;
\r
7773 out += info.outJump;
\r
7777 else if (info.outFormat == RTAUDIO_SINT16) {
\r
7778 Int16 *out = (Int16 *)outBuffer;
\r
7779 if (info.inFormat == RTAUDIO_SINT8) {
\r
7780 signed char *in = (signed char *)inBuffer;
\r
7781 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7782 for (j=0; j<info.channels; j++) {
\r
7783 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
7784 out[info.outOffset[j]] <<= 8;
\r
7786 in += info.inJump;
\r
7787 out += info.outJump;
\r
7790 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7791 // Channel compensation and/or (de)interleaving only.
\r
7792 Int16 *in = (Int16 *)inBuffer;
\r
7793 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7794 for (j=0; j<info.channels; j++) {
\r
7795 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7797 in += info.inJump;
\r
7798 out += info.outJump;
\r
7801 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7802 Int32 *in = (Int32 *)inBuffer;
\r
7803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7804 for (j=0; j<info.channels; j++) {
\r
7805 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);
\r
7807 in += info.inJump;
\r
7808 out += info.outJump;
\r
7811 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7812 Int32 *in = (Int32 *)inBuffer;
\r
7813 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7814 for (j=0; j<info.channels; j++) {
\r
7815 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
7817 in += info.inJump;
\r
7818 out += info.outJump;
\r
7821 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7822 Float32 *in = (Float32 *)inBuffer;
\r
7823 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7824 for (j=0; j<info.channels; j++) {
\r
7825 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7827 in += info.inJump;
\r
7828 out += info.outJump;
\r
7831 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7832 Float64 *in = (Float64 *)inBuffer;
\r
7833 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7834 for (j=0; j<info.channels; j++) {
\r
7835 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7837 in += info.inJump;
\r
7838 out += info.outJump;
\r
7842 else if (info.outFormat == RTAUDIO_SINT8) {
\r
7843 signed char *out = (signed char *)outBuffer;
\r
7844 if (info.inFormat == RTAUDIO_SINT8) {
\r
7845 // Channel compensation and/or (de)interleaving only.
\r
7846 signed char *in = (signed char *)inBuffer;
\r
7847 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7848 for (j=0; j<info.channels; j++) {
\r
7849 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7851 in += info.inJump;
\r
7852 out += info.outJump;
\r
7855 if (info.inFormat == RTAUDIO_SINT16) {
\r
7856 Int16 *in = (Int16 *)inBuffer;
\r
7857 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7858 for (j=0; j<info.channels; j++) {
\r
7859 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
7861 in += info.inJump;
\r
7862 out += info.outJump;
\r
7865 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7866 Int32 *in = (Int32 *)inBuffer;
\r
7867 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7868 for (j=0; j<info.channels; j++) {
\r
7869 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);
\r
7871 in += info.inJump;
\r
7872 out += info.outJump;
\r
7875 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7876 Int32 *in = (Int32 *)inBuffer;
\r
7877 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7878 for (j=0; j<info.channels; j++) {
\r
7879 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
7881 in += info.inJump;
\r
7882 out += info.outJump;
\r
7885 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7886 Float32 *in = (Float32 *)inBuffer;
\r
7887 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7888 for (j=0; j<info.channels; j++) {
\r
7889 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7891 in += info.inJump;
\r
7892 out += info.outJump;
\r
7895 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7896 Float64 *in = (Float64 *)inBuffer;
\r
7897 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7898 for (j=0; j<info.channels; j++) {
\r
7899 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7901 in += info.inJump;
\r
7902 out += info.outJump;
\r
7908 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
7909 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
7910 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
7912 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
7914 register char val;
\r
7915 register char *ptr;
\r
7918 if ( format == RTAUDIO_SINT16 ) {
\r
7919 for ( unsigned int i=0; i<samples; i++ ) {
\r
7920 // Swap 1st and 2nd bytes.
\r
7922 *(ptr) = *(ptr+1);
\r
7925 // Increment 2 bytes.
\r
7929 else if ( format == RTAUDIO_SINT24 ||
\r
7930 format == RTAUDIO_SINT32 ||
\r
7931 format == RTAUDIO_FLOAT32 ) {
\r
7932 for ( unsigned int i=0; i<samples; i++ ) {
\r
7933 // Swap 1st and 4th bytes.
\r
7935 *(ptr) = *(ptr+3);
\r
7938 // Swap 2nd and 3rd bytes.
\r
7941 *(ptr) = *(ptr+1);
\r
7944 // Increment 3 more bytes.
\r
7948 else if ( format == RTAUDIO_FLOAT64 ) {
\r
7949 for ( unsigned int i=0; i<samples; i++ ) {
\r
7950 // Swap 1st and 8th bytes
\r
7952 *(ptr) = *(ptr+7);
\r
7955 // Swap 2nd and 7th bytes
\r
7958 *(ptr) = *(ptr+5);
\r
7961 // Swap 3rd and 6th bytes
\r
7964 *(ptr) = *(ptr+3);
\r
7967 // Swap 4th and 5th bytes
\r
7970 *(ptr) = *(ptr+1);
\r
7973 // Increment 5 more bytes.
\r
7979 // Indentation settings for Vim and Emacs
\r
7981 // Local Variables:
\r
7982 // c-basic-offset: 2
\r
7983 // indent-tabs-mode: nil
\r
7986 // vim: et sts=2 sw=2
\r