1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2011 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.8
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_OSS__)
\r
91 apis.push_back( LINUX_OSS );
\r
93 #if defined(__WINDOWS_ASIO__)
\r
94 apis.push_back( WINDOWS_ASIO );
\r
96 #if defined(__WINDOWS_DS__)
\r
97 apis.push_back( WINDOWS_DS );
\r
99 #if defined(__MACOSX_CORE__)
\r
100 apis.push_back( MACOSX_CORE );
\r
102 #if defined(__RTAUDIO_DUMMY__)
\r
103 apis.push_back( RTAUDIO_DUMMY );
\r
107 void RtAudio :: openRtApi( RtAudio::Api api )
\r
109 #if defined(__UNIX_JACK__)
\r
110 if ( api == UNIX_JACK )
\r
111 rtapi_ = new RtApiJack();
\r
113 #if defined(__LINUX_ALSA__)
\r
114 if ( api == LINUX_ALSA )
\r
115 rtapi_ = new RtApiAlsa();
\r
117 #if defined(__LINUX_OSS__)
\r
118 if ( api == LINUX_OSS )
\r
119 rtapi_ = new RtApiOss();
\r
121 #if defined(__WINDOWS_ASIO__)
\r
122 if ( api == WINDOWS_ASIO )
\r
123 rtapi_ = new RtApiAsio();
\r
125 #if defined(__WINDOWS_DS__)
\r
126 if ( api == WINDOWS_DS )
\r
127 rtapi_ = new RtApiDs();
\r
129 #if defined(__MACOSX_CORE__)
\r
130 if ( api == MACOSX_CORE )
\r
131 rtapi_ = new RtApiCore();
\r
133 #if defined(__RTAUDIO_DUMMY__)
\r
134 if ( api == RTAUDIO_DUMMY )
\r
135 rtapi_ = new RtApiDummy();
\r
139 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
143 if ( api != UNSPECIFIED ) {
\r
144 // Attempt to open the specified API.
\r
146 if ( rtapi_ ) return;
\r
148 // No compiled support for specified API value. Issue a debug
\r
149 // warning and continue as if no API was specified.
\r
150 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
153 // Iterate through the compiled APIs and return as soon as we find
\r
154 // one with at least one device or we reach the end of the list.
\r
155 std::vector< RtAudio::Api > apis;
\r
156 getCompiledApi( apis );
\r
157 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
158 openRtApi( apis[i] );
\r
159 if ( rtapi_->getDeviceCount() ) break;
\r
162 if ( rtapi_ ) return;
\r
164 // It should not be possible to get here because the preprocessor
\r
165 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
166 // API-specific definitions are passed to the compiler. But just in
\r
167 // case something weird happens, we'll print out an error message.
\r
168 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
171 RtAudio :: ~RtAudio() throw()
\r
176 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
177 RtAudio::StreamParameters *inputParameters,
\r
178 RtAudioFormat format, unsigned int sampleRate,
\r
179 unsigned int *bufferFrames,
\r
180 RtAudioCallback callback, void *userData,
\r
181 RtAudio::StreamOptions *options )
\r
183 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
184 sampleRate, bufferFrames, callback,
\r
185 userData, options );
\r
188 // *************************************************** //
\r
190 // Public RtApi definitions (see end of file for
\r
191 // private or protected utility functions).
\r
193 // *************************************************** //
\r
197 stream_.state = STREAM_CLOSED;
\r
198 stream_.mode = UNINITIALIZED;
\r
199 stream_.apiHandle = 0;
\r
200 stream_.userBuffer[0] = 0;
\r
201 stream_.userBuffer[1] = 0;
\r
202 MUTEX_INITIALIZE( &stream_.mutex );
\r
203 showWarnings_ = true;
\r
208 MUTEX_DESTROY( &stream_.mutex );
\r
211 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
212 RtAudio::StreamParameters *iParams,
\r
213 RtAudioFormat format, unsigned int sampleRate,
\r
214 unsigned int *bufferFrames,
\r
215 RtAudioCallback callback, void *userData,
\r
216 RtAudio::StreamOptions *options )
\r
218 if ( stream_.state != STREAM_CLOSED ) {
\r
219 errorText_ = "RtApi::openStream: a stream is already open!";
\r
220 error( RtError::INVALID_USE );
\r
223 if ( oParams && oParams->nChannels < 1 ) {
\r
224 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
225 error( RtError::INVALID_USE );
\r
228 if ( iParams && iParams->nChannels < 1 ) {
\r
229 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
230 error( RtError::INVALID_USE );
\r
233 if ( oParams == NULL && iParams == NULL ) {
\r
234 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
235 error( RtError::INVALID_USE );
\r
238 if ( formatBytes(format) == 0 ) {
\r
239 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
240 error( RtError::INVALID_USE );
\r
243 unsigned int nDevices = getDeviceCount();
\r
244 unsigned int oChannels = 0;
\r
246 oChannels = oParams->nChannels;
\r
247 if ( oParams->deviceId >= nDevices ) {
\r
248 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
249 error( RtError::INVALID_USE );
\r
253 unsigned int iChannels = 0;
\r
255 iChannels = iParams->nChannels;
\r
256 if ( iParams->deviceId >= nDevices ) {
\r
257 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
258 error( RtError::INVALID_USE );
\r
265 if ( oChannels > 0 ) {
\r
267 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
268 sampleRate, format, bufferFrames, options );
\r
269 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
272 if ( iChannels > 0 ) {
\r
274 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
275 sampleRate, format, bufferFrames, options );
\r
276 if ( result == false ) {
\r
277 if ( oChannels > 0 ) closeStream();
\r
278 error( RtError::SYSTEM_ERROR );
\r
282 stream_.callbackInfo.callback = (void *) callback;
\r
283 stream_.callbackInfo.userData = userData;
\r
285 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
286 stream_.state = STREAM_STOPPED;
\r
289 unsigned int RtApi :: getDefaultInputDevice( void )
\r
291 // Should be implemented in subclasses if possible.
\r
295 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
297 // Should be implemented in subclasses if possible.
\r
301 void RtApi :: closeStream( void )
\r
303 // MUST be implemented in subclasses!
\r
307 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
308 unsigned int firstChannel, unsigned int sampleRate,
\r
309 RtAudioFormat format, unsigned int *bufferSize,
\r
310 RtAudio::StreamOptions *options )
\r
312 // MUST be implemented in subclasses!
\r
316 void RtApi :: tickStreamTime( void )
\r
318 // Subclasses that do not provide their own implementation of
\r
319 // getStreamTime should call this function once per buffer I/O to
\r
320 // provide basic stream time support.
\r
322 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
324 #if defined( HAVE_GETTIMEOFDAY )
\r
325 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
329 long RtApi :: getStreamLatency( void )
\r
333 long totalLatency = 0;
\r
334 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
335 totalLatency = stream_.latency[0];
\r
336 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
337 totalLatency += stream_.latency[1];
\r
339 return totalLatency;
\r
342 double RtApi :: getStreamTime( void )
\r
346 #if defined( HAVE_GETTIMEOFDAY )
\r
347 // Return a very accurate estimate of the stream time by
\r
348 // adding in the elapsed time since the last tick.
\r
349 struct timeval then;
\r
350 struct timeval now;
\r
352 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
353 return stream_.streamTime;
\r
355 gettimeofday( &now, NULL );
\r
356 then = stream_.lastTickTimestamp;
\r
357 return stream_.streamTime +
\r
358 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
359 (then.tv_sec + 0.000001 * then.tv_usec));
\r
361 return stream_.streamTime;
\r
365 unsigned int RtApi :: getStreamSampleRate( void )
\r
369 return stream_.sampleRate;
\r
373 // *************************************************** //
\r
375 // OS/API-specific methods.
\r
377 // *************************************************** //
\r
379 #if defined(__MACOSX_CORE__)
\r
381 // The OS X CoreAudio API is designed to use a separate callback
\r
382 // procedure for each of its audio devices. A single RtAudio duplex
\r
383 // stream using two different devices is supported here, though it
\r
384 // cannot be guaranteed to always behave correctly because we cannot
\r
385 // synchronize these two callbacks.
\r
387 // A property listener is installed for over/underrun information.
\r
388 // However, no functionality is currently provided to allow property
\r
389 // listeners to trigger user handlers because it is unclear what could
\r
390 // be done if a critical stream parameter (buffer size, sample rate,
\r
391 // device disconnect) notification arrived. The listeners entail
\r
392 // quite a bit of extra code and most likely, a user program wouldn't
\r
393 // be prepared for the result anyway. However, we do provide a flag
\r
394 // to the client callback function to inform of an over/underrun.
\r
396 // The mechanism for querying and setting system parameters was
\r
397 // updated (and perhaps simplified) in OS-X version 10.4. However,
\r
398 // since 10.4 support is not necessarily available to all users, I've
\r
399 // decided not to update the respective code at this time. Perhaps
\r
400 // this will happen when Apple makes 10.4 free for everyone. :-)
\r
402 // A structure to hold various information related to the CoreAudio API
\r
404 struct CoreHandle {
\r
405 AudioDeviceID id[2]; // device ids
\r
406 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
407 AudioDeviceIOProcID procId[2];
\r
409 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
410 UInt32 nStreams[2]; // number of streams to use
\r
412 char *deviceBuffer;
\r
413 pthread_cond_t condition;
\r
414 int drainCounter; // Tracks callback counts when draining
\r
415 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
418 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
421 RtApiCore :: RtApiCore()
\r
423 // Nothing to do here.
\r
426 RtApiCore :: ~RtApiCore()
\r
428 // The subclass destructor gets called before the base class
\r
429 // destructor, so close an existing stream before deallocating
\r
430 // apiDeviceId memory.
\r
431 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
434 unsigned int RtApiCore :: getDeviceCount( void )
\r
436 // Find out how many audio devices there are, if any.
\r
438 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
439 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
440 if ( result != noErr ) {
\r
441 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
442 error( RtError::WARNING );
\r
446 return dataSize / sizeof( AudioDeviceID );
\r
449 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
451 unsigned int nDevices = getDeviceCount();
\r
452 if ( nDevices <= 1 ) return 0;
\r
455 UInt32 dataSize = sizeof( AudioDeviceID );
\r
456 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
457 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
458 if ( result != noErr ) {
\r
459 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
460 error( RtError::WARNING );
\r
464 dataSize *= nDevices;
\r
465 AudioDeviceID deviceList[ nDevices ];
\r
466 property.mSelector = kAudioHardwarePropertyDevices;
\r
467 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
468 if ( result != noErr ) {
\r
469 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
470 error( RtError::WARNING );
\r
474 for ( unsigned int i=0; i<nDevices; i++ )
\r
475 if ( id == deviceList[i] ) return i;
\r
477 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
478 error( RtError::WARNING );
\r
482 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
484 unsigned int nDevices = getDeviceCount();
\r
485 if ( nDevices <= 1 ) return 0;
\r
488 UInt32 dataSize = sizeof( AudioDeviceID );
\r
489 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
490 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
491 if ( result != noErr ) {
\r
492 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
493 error( RtError::WARNING );
\r
497 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
498 AudioDeviceID deviceList[ nDevices ];
\r
499 property.mSelector = kAudioHardwarePropertyDevices;
\r
500 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
501 if ( result != noErr ) {
\r
502 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
503 error( RtError::WARNING );
\r
507 for ( unsigned int i=0; i<nDevices; i++ )
\r
508 if ( id == deviceList[i] ) return i;
\r
510 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
511 error( RtError::WARNING );
\r
515 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
517 RtAudio::DeviceInfo info;
\r
518 info.probed = false;
\r
521 unsigned int nDevices = getDeviceCount();
\r
522 if ( nDevices == 0 ) {
\r
523 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
524 error( RtError::INVALID_USE );
\r
527 if ( device >= nDevices ) {
\r
528 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
529 error( RtError::INVALID_USE );
\r
532 AudioDeviceID deviceList[ nDevices ];
\r
533 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
534 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
535 kAudioObjectPropertyScopeGlobal,
\r
536 kAudioObjectPropertyElementMaster };
\r
537 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
538 0, NULL, &dataSize, (void *) &deviceList );
\r
539 if ( result != noErr ) {
\r
540 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
541 error( RtError::WARNING );
\r
545 AudioDeviceID id = deviceList[ device ];
\r
547 // Get the device name.
\r
549 CFStringRef cfname;
\r
550 dataSize = sizeof( CFStringRef );
\r
551 property.mSelector = kAudioObjectPropertyManufacturer;
\r
552 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
553 if ( result != noErr ) {
\r
554 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
555 errorText_ = errorStream_.str();
\r
556 error( RtError::WARNING );
\r
560 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
561 int length = CFStringGetLength(cfname);
\r
562 char *mname = (char *)malloc(length * 3 + 1);
\r
563 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
564 info.name.append( (const char *)mname, strlen(mname) );
\r
565 info.name.append( ": " );
\r
566 CFRelease( cfname );
\r
569 property.mSelector = kAudioObjectPropertyName;
\r
570 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
571 if ( result != noErr ) {
\r
572 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
573 errorText_ = errorStream_.str();
\r
574 error( RtError::WARNING );
\r
578 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
579 length = CFStringGetLength(cfname);
\r
580 char *name = (char *)malloc(length * 3 + 1);
\r
581 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
582 info.name.append( (const char *)name, strlen(name) );
\r
583 CFRelease( cfname );
\r
586 // Get the output stream "configuration".
\r
587 AudioBufferList *bufferList = nil;
\r
588 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
589 property.mScope = kAudioDevicePropertyScopeOutput;
\r
590 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
592 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
593 if ( result != noErr || dataSize == 0 ) {
\r
594 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
595 errorText_ = errorStream_.str();
\r
596 error( RtError::WARNING );
\r
600 // Allocate the AudioBufferList.
\r
601 bufferList = (AudioBufferList *) malloc( dataSize );
\r
602 if ( bufferList == NULL ) {
\r
603 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
604 error( RtError::WARNING );
\r
608 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
609 if ( result != noErr || dataSize == 0 ) {
\r
610 free( bufferList );
\r
611 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
612 errorText_ = errorStream_.str();
\r
613 error( RtError::WARNING );
\r
617 // Get output channel information.
\r
618 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
619 for ( i=0; i<nStreams; i++ )
\r
620 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
621 free( bufferList );
\r
623 // Get the input stream "configuration".
\r
624 property.mScope = kAudioDevicePropertyScopeInput;
\r
625 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
626 if ( result != noErr || dataSize == 0 ) {
\r
627 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
628 errorText_ = errorStream_.str();
\r
629 error( RtError::WARNING );
\r
633 // Allocate the AudioBufferList.
\r
634 bufferList = (AudioBufferList *) malloc( dataSize );
\r
635 if ( bufferList == NULL ) {
\r
636 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
637 error( RtError::WARNING );
\r
641 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
642 if (result != noErr || dataSize == 0) {
\r
643 free( bufferList );
\r
644 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
645 errorText_ = errorStream_.str();
\r
646 error( RtError::WARNING );
\r
650 // Get input channel information.
\r
651 nStreams = bufferList->mNumberBuffers;
\r
652 for ( i=0; i<nStreams; i++ )
\r
653 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
654 free( bufferList );
\r
656 // If device opens for both playback and capture, we determine the channels.
\r
657 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
658 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
660 // Probe the device sample rates.
\r
661 bool isInput = false;
\r
662 if ( info.outputChannels == 0 ) isInput = true;
\r
664 // Determine the supported sample rates.
\r
665 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
666 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
667 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
668 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
669 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
670 errorText_ = errorStream_.str();
\r
671 error( RtError::WARNING );
\r
675 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
676 AudioValueRange rangeList[ nRanges ];
\r
677 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
678 if ( result != kAudioHardwareNoError ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtError::WARNING );
\r
685 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
686 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
687 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
688 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
691 info.sampleRates.clear();
\r
692 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
693 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
694 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
697 if ( info.sampleRates.size() == 0 ) {
\r
698 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
699 errorText_ = errorStream_.str();
\r
700 error( RtError::WARNING );
\r
704 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
705 // Thus, any other "physical" formats supported by the device are of
\r
706 // no interest to the client.
\r
707 info.nativeFormats = RTAUDIO_FLOAT32;
\r
709 if ( info.outputChannels > 0 )
\r
710 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
711 if ( info.inputChannels > 0 )
\r
712 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
714 info.probed = true;
\r
718 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
719 const AudioTimeStamp* inNow,
\r
720 const AudioBufferList* inInputData,
\r
721 const AudioTimeStamp* inInputTime,
\r
722 AudioBufferList* outOutputData,
\r
723 const AudioTimeStamp* inOutputTime,
\r
724 void* infoPointer )
\r
726 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
728 RtApiCore *object = (RtApiCore *) info->object;
\r
729 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
730 return kAudioHardwareUnspecifiedError;
\r
732 return kAudioHardwareNoError;
\r
735 OSStatus deviceListener( AudioObjectID inDevice,
\r
737 const AudioObjectPropertyAddress properties[],
\r
738 void* handlePointer )
\r
740 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
741 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
742 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
743 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
744 handle->xrun[1] = true;
\r
746 handle->xrun[0] = true;
\r
750 return kAudioHardwareNoError;
\r
753 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
754 unsigned int firstChannel, unsigned int sampleRate,
\r
755 RtAudioFormat format, unsigned int *bufferSize,
\r
756 RtAudio::StreamOptions *options )
\r
759 unsigned int nDevices = getDeviceCount();
\r
760 if ( nDevices == 0 ) {
\r
761 // This should not happen because a check is made before this function is called.
\r
762 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
766 if ( device >= nDevices ) {
\r
767 // This should not happen because a check is made before this function is called.
\r
768 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
772 AudioDeviceID deviceList[ nDevices ];
\r
773 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
774 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
775 kAudioObjectPropertyScopeGlobal,
\r
776 kAudioObjectPropertyElementMaster };
\r
777 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
778 0, NULL, &dataSize, (void *) &deviceList );
\r
779 if ( result != noErr ) {
\r
780 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
784 AudioDeviceID id = deviceList[ device ];
\r
786 // Setup for stream mode.
\r
787 bool isInput = false;
\r
788 if ( mode == INPUT ) {
\r
790 property.mScope = kAudioDevicePropertyScopeInput;
\r
793 property.mScope = kAudioDevicePropertyScopeOutput;
\r
795 // Get the stream "configuration".
\r
796 AudioBufferList *bufferList = nil;
\r
798 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
799 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
800 if ( result != noErr || dataSize == 0 ) {
\r
801 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
802 errorText_ = errorStream_.str();
\r
806 // Allocate the AudioBufferList.
\r
807 bufferList = (AudioBufferList *) malloc( dataSize );
\r
808 if ( bufferList == NULL ) {
\r
809 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
813 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
814 if (result != noErr || dataSize == 0) {
\r
815 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
816 errorText_ = errorStream_.str();
\r
820 // Search for one or more streams that contain the desired number of
\r
821 // channels. CoreAudio devices can have an arbitrary number of
\r
822 // streams and each stream can have an arbitrary number of channels.
\r
823 // For each stream, a single buffer of interleaved samples is
\r
824 // provided. RtAudio prefers the use of one stream of interleaved
\r
825 // data or multiple consecutive single-channel streams. However, we
\r
826 // now support multiple consecutive multi-channel streams of
\r
827 // interleaved data as well.
\r
828 UInt32 iStream, offsetCounter = firstChannel;
\r
829 UInt32 nStreams = bufferList->mNumberBuffers;
\r
830 bool monoMode = false;
\r
831 bool foundStream = false;
\r
833 // First check that the device supports the requested number of
\r
835 UInt32 deviceChannels = 0;
\r
836 for ( iStream=0; iStream<nStreams; iStream++ )
\r
837 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
839 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
840 free( bufferList );
\r
841 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
842 errorText_ = errorStream_.str();
\r
846 // Look for a single stream meeting our needs.
\r
847 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
848 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
849 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
850 if ( streamChannels >= channels + offsetCounter ) {
\r
851 firstStream = iStream;
\r
852 channelOffset = offsetCounter;
\r
853 foundStream = true;
\r
856 if ( streamChannels > offsetCounter ) break;
\r
857 offsetCounter -= streamChannels;
\r
860 // If we didn't find a single stream above, then we should be able
\r
861 // to meet the channel specification with multiple streams.
\r
862 if ( foundStream == false ) {
\r
864 offsetCounter = firstChannel;
\r
865 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
866 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
867 if ( streamChannels > offsetCounter ) break;
\r
868 offsetCounter -= streamChannels;
\r
871 firstStream = iStream;
\r
872 channelOffset = offsetCounter;
\r
873 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
875 if ( streamChannels > 1 ) monoMode = false;
\r
876 while ( channelCounter > 0 ) {
\r
877 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
878 if ( streamChannels > 1 ) monoMode = false;
\r
879 channelCounter -= streamChannels;
\r
884 free( bufferList );
\r
886 // Determine the buffer size.
\r
887 AudioValueRange bufferRange;
\r
888 dataSize = sizeof( AudioValueRange );
\r
889 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
890 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
892 if ( result != noErr ) {
\r
893 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
894 errorText_ = errorStream_.str();
\r
898 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
899 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
900 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
902 // Set the buffer size. For multiple streams, I'm assuming we only
\r
903 // need to make this setting for the master channel.
\r
904 UInt32 theSize = (UInt32) *bufferSize;
\r
905 dataSize = sizeof( UInt32 );
\r
906 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
907 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
909 if ( result != noErr ) {
\r
910 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
911 errorText_ = errorStream_.str();
\r
915 // If attempting to setup a duplex stream, the bufferSize parameter
\r
916 // MUST be the same in both directions!
\r
917 *bufferSize = theSize;
\r
918 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
919 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
920 errorText_ = errorStream_.str();
\r
924 stream_.bufferSize = *bufferSize;
\r
925 stream_.nBuffers = 1;
\r
927 // Check and if necessary, change the sample rate for the device.
\r
928 Float64 nominalRate;
\r
929 dataSize = sizeof( Float64 );
\r
930 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
931 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
933 if ( result != noErr ) {
\r
934 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
935 errorText_ = errorStream_.str();
\r
939 // Only change the sample rate if off by more than 1 Hz.
\r
940 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
941 nominalRate = (Float64) sampleRate;
\r
942 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
944 if ( result != noErr ) {
\r
945 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
946 errorText_ = errorStream_.str();
\r
951 // Try to set "hog" mode ... it's not clear to me this is working.
\r
952 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
954 dataSize = sizeof( hog_pid );
\r
955 property.mSelector = kAudioDevicePropertyHogMode;
\r
956 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
957 if ( result != noErr ) {
\r
958 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
959 errorText_ = errorStream_.str();
\r
963 if ( hog_pid != getpid() ) {
\r
964 hog_pid = getpid();
\r
965 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
966 if ( result != noErr ) {
\r
967 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
968 errorText_ = errorStream_.str();
\r
974 // Get the stream ID(s) so we can set the stream format.
\r
975 AudioStreamID streamIDs[ nStreams ];
\r
976 dataSize = nStreams * sizeof( AudioStreamID );
\r
977 property.mSelector = kAudioDevicePropertyStreams;
\r
978 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &streamIDs );
\r
980 if ( result != noErr ) {
\r
981 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream ID(s) for device (" << device << ").";
\r
982 errorText_ = errorStream_.str();
\r
986 // Now set the stream format for each stream. Also, check the
\r
987 // physical format of the device and change that if necessary.
\r
988 AudioStreamBasicDescription description;
\r
989 dataSize = sizeof( AudioStreamBasicDescription );
\r
992 for ( UInt32 i=0; i<streamCount; i++ ) {
\r
994 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
995 result = AudioObjectGetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, &dataSize, &description );
\r
997 if ( result != noErr ) {
\r
998 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
999 errorText_ = errorStream_.str();
\r
1003 // Set the sample rate and data format id. However, only make the
\r
1004 // change if the sample rate is not within 1.0 of the desired
\r
1005 // rate and the format is not linear pcm.
\r
1006 updateFormat = false;
\r
1007 if ( fabs( description.mSampleRate - (double)sampleRate ) > 1.0 ) {
\r
1008 description.mSampleRate = (double) sampleRate;
\r
1009 updateFormat = true;
\r
1012 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1013 description.mFormatID = kAudioFormatLinearPCM;
\r
1014 updateFormat = true;
\r
1017 if ( updateFormat ) {
\r
1018 result = AudioObjectSetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, dataSize, &description );
\r
1019 if ( result != noErr ) {
\r
1020 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1021 errorText_ = errorStream_.str();
\r
1026 // Now check the physical format.
\r
1027 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1028 result = AudioObjectGetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, &dataSize, &description );
\r
1029 if ( result != noErr ) {
\r
1030 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1031 errorText_ = errorStream_.str();
\r
1035 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 24 ) {
\r
1036 description.mFormatID = kAudioFormatLinearPCM;
\r
1037 AudioStreamBasicDescription testDescription = description;
\r
1038 unsigned long formatFlags;
\r
1040 // We'll try higher bit rates first and then work our way down.
\r
1041 testDescription.mBitsPerChannel = 32;
\r
1042 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1043 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1044 formatFlags = description.mFormatFlags | kLinearPCMFormatFlagIsFloat & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1045 testDescription.mFormatFlags = formatFlags;
\r
1046 result = AudioObjectSetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, dataSize, &testDescription );
\r
1047 if ( result == noErr ) continue;
\r
1049 testDescription = description;
\r
1050 testDescription.mBitsPerChannel = 32;
\r
1051 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1052 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1053 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger) & ~kLinearPCMFormatFlagIsFloat;
\r
1054 testDescription.mFormatFlags = formatFlags;
\r
1055 result = AudioObjectSetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, dataSize, &testDescription );
\r
1056 if ( result == noErr ) continue;
\r
1058 testDescription = description;
\r
1059 testDescription.mBitsPerChannel = 24;
\r
1060 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1061 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1062 testDescription.mFormatFlags = formatFlags;
\r
1063 result = AudioObjectSetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, dataSize, &testDescription );
\r
1064 if ( result == noErr ) continue;
\r
1066 testDescription = description;
\r
1067 testDescription.mBitsPerChannel = 16;
\r
1068 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1069 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1070 testDescription.mFormatFlags = formatFlags;
\r
1071 result = AudioObjectSetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, dataSize, &testDescription );
\r
1072 if ( result == noErr ) continue;
\r
1074 testDescription = description;
\r
1075 testDescription.mBitsPerChannel = 8;
\r
1076 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1077 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1078 testDescription.mFormatFlags = formatFlags;
\r
1079 result = AudioObjectSetPropertyData( streamIDs[firstStream+i], &property, 0, NULL, dataSize, &testDescription );
\r
1080 if ( result != noErr ) {
\r
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1082 errorText_ = errorStream_.str();
\r
1089 // Get the stream latency. There can be latency in both the device
\r
1090 // and the stream. First, attempt to get the device latency on the
\r
1091 // master channel or the first open channel. Errors that might
\r
1092 // occur here are not deemed critical.
\r
1095 dataSize = sizeof( UInt32 );
\r
1096 property.mSelector = kAudioDevicePropertyLatency;
\r
1097 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1098 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1099 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1102 errorText_ = errorStream_.str();
\r
1103 error( RtError::WARNING );
\r
1107 // Now try to get the stream latency. For multiple streams, I assume the
\r
1108 // latency is equal for each.
\r
1109 result = AudioObjectGetPropertyData( streamIDs[firstStream], &property, 0, NULL, &dataSize, &latency );
\r
1110 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] += latency;
\r
1112 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream latency for device (" << device << ").";
\r
1113 errorText_ = errorStream_.str();
\r
1114 error( RtError::WARNING );
\r
1117 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1118 // always be presented in native-endian format, so we should never
\r
1119 // need to byte swap.
\r
1120 stream_.doByteSwap[mode] = false;
\r
1122 // From the CoreAudio documentation, PCM data must be supplied as
\r
1124 stream_.userFormat = format;
\r
1125 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1127 if ( streamCount == 1 )
\r
1128 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1129 else // multiple streams
\r
1130 stream_.nDeviceChannels[mode] = channels;
\r
1131 stream_.nUserChannels[mode] = channels;
\r
1132 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1133 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1134 else stream_.userInterleaved = true;
\r
1135 stream_.deviceInterleaved[mode] = true;
\r
1136 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1138 // Set flags for buffer conversion.
\r
1139 stream_.doConvertBuffer[mode] = false;
\r
1140 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1141 stream_.doConvertBuffer[mode] = true;
\r
1142 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1143 stream_.doConvertBuffer[mode] = true;
\r
1144 if ( streamCount == 1 ) {
\r
1145 if ( stream_.nUserChannels[mode] > 1 &&
\r
1146 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1147 stream_.doConvertBuffer[mode] = true;
\r
1149 else if ( monoMode && stream_.userInterleaved )
\r
1150 stream_.doConvertBuffer[mode] = true;
\r
1152 // Allocate our CoreHandle structure for the stream.
\r
1153 CoreHandle *handle = 0;
\r
1154 if ( stream_.apiHandle == 0 ) {
\r
1156 handle = new CoreHandle;
\r
1158 catch ( std::bad_alloc& ) {
\r
1159 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1163 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1164 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1167 stream_.apiHandle = (void *) handle;
\r
1170 handle = (CoreHandle *) stream_.apiHandle;
\r
1171 handle->iStream[mode] = firstStream;
\r
1172 handle->nStreams[mode] = streamCount;
\r
1173 handle->id[mode] = id;
\r
1175 // Allocate necessary internal buffers.
\r
1176 unsigned long bufferBytes;
\r
1177 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1178 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1179 stream_.userBuffer[mode] = (char *) malloc( bufferBytes );
\r
1180 if ( stream_.userBuffer[mode] == NULL ) {
\r
1181 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1185 // If possible, we will make use of the CoreAudio stream buffers as
\r
1186 // "device buffers". However, we can't do this if using multiple
\r
1188 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1190 bool makeBuffer = true;
\r
1191 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1192 if ( mode == INPUT ) {
\r
1193 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1194 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1195 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1199 if ( makeBuffer ) {
\r
1200 bufferBytes *= *bufferSize;
\r
1201 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1202 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1203 if ( stream_.deviceBuffer == NULL ) {
\r
1204 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1210 stream_.sampleRate = sampleRate;
\r
1211 stream_.device[mode] = device;
\r
1212 stream_.state = STREAM_STOPPED;
\r
1213 stream_.callbackInfo.object = (void *) this;
\r
1215 // Setup the buffer conversion information structure.
\r
1216 if ( stream_.doConvertBuffer[mode] ) {
\r
1217 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1218 else setConvertInfo( mode, channelOffset );
\r
1221 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1222 // Only one callback procedure per device.
\r
1223 stream_.mode = DUPLEX;
\r
1225 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1226 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1228 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1229 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1231 if ( result != noErr ) {
\r
1232 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1233 errorText_ = errorStream_.str();
\r
1236 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1237 stream_.mode = DUPLEX;
\r
1239 stream_.mode = mode;
\r
1242 // Setup the device property listener for over/underload.
\r
1243 property.mSelector = kAudioDeviceProcessorOverload;
\r
1244 result = AudioObjectAddPropertyListener( id, &property, deviceListener, (void *) handle );
\r
1250 pthread_cond_destroy( &handle->condition );
\r
1252 stream_.apiHandle = 0;
\r
1255 for ( int i=0; i<2; i++ ) {
\r
1256 if ( stream_.userBuffer[i] ) {
\r
1257 free( stream_.userBuffer[i] );
\r
1258 stream_.userBuffer[i] = 0;
\r
1262 if ( stream_.deviceBuffer ) {
\r
1263 free( stream_.deviceBuffer );
\r
1264 stream_.deviceBuffer = 0;
\r
1270 void RtApiCore :: closeStream( void )
\r
1272 if ( stream_.state == STREAM_CLOSED ) {
\r
1273 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1274 error( RtError::WARNING );
\r
1278 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1279 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1280 if ( stream_.state == STREAM_RUNNING )
\r
1281 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1282 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1283 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1285 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1286 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1290 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1291 if ( stream_.state == STREAM_RUNNING )
\r
1292 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1293 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1294 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1296 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1297 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1301 for ( int i=0; i<2; i++ ) {
\r
1302 if ( stream_.userBuffer[i] ) {
\r
1303 free( stream_.userBuffer[i] );
\r
1304 stream_.userBuffer[i] = 0;
\r
1308 if ( stream_.deviceBuffer ) {
\r
1309 free( stream_.deviceBuffer );
\r
1310 stream_.deviceBuffer = 0;
\r
1313 // Destroy pthread condition variable.
\r
1314 pthread_cond_destroy( &handle->condition );
\r
1316 stream_.apiHandle = 0;
\r
1318 stream_.mode = UNINITIALIZED;
\r
1319 stream_.state = STREAM_CLOSED;
\r
1322 void RtApiCore :: startStream( void )
\r
1325 if ( stream_.state == STREAM_RUNNING ) {
\r
1326 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1327 error( RtError::WARNING );
\r
1331 MUTEX_LOCK( &stream_.mutex );
\r
1333 OSStatus result = noErr;
\r
1334 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1335 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1337 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1338 if ( result != noErr ) {
\r
1339 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1340 errorText_ = errorStream_.str();
\r
1345 if ( stream_.mode == INPUT ||
\r
1346 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1348 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1349 if ( result != noErr ) {
\r
1350 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1351 errorText_ = errorStream_.str();
\r
1356 handle->drainCounter = 0;
\r
1357 handle->internalDrain = false;
\r
1358 stream_.state = STREAM_RUNNING;
\r
1361 MUTEX_UNLOCK( &stream_.mutex );
\r
1363 if ( result == noErr ) return;
\r
1364 error( RtError::SYSTEM_ERROR );
\r
1367 void RtApiCore :: stopStream( void )
\r
1370 if ( stream_.state == STREAM_STOPPED ) {
\r
1371 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1372 error( RtError::WARNING );
\r
1376 MUTEX_LOCK( &stream_.mutex );
\r
1378 if ( stream_.state == STREAM_STOPPED ) {
\r
1379 MUTEX_UNLOCK( &stream_.mutex );
\r
1383 OSStatus result = noErr;
\r
1384 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1387 if ( handle->drainCounter == 0 ) {
\r
1388 handle->drainCounter = 2;
\r
1389 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1392 MUTEX_UNLOCK( &stream_.mutex );
\r
1393 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1394 MUTEX_LOCK( &stream_.mutex );
\r
1395 if ( result != noErr ) {
\r
1396 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1397 errorText_ = errorStream_.str();
\r
1402 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1404 MUTEX_UNLOCK( &stream_.mutex );
\r
1405 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1406 MUTEX_LOCK( &stream_.mutex );
\r
1407 if ( result != noErr ) {
\r
1408 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1409 errorText_ = errorStream_.str();
\r
1414 stream_.state = STREAM_STOPPED;
\r
1417 MUTEX_UNLOCK( &stream_.mutex );
\r
1419 if ( result == noErr ) return;
\r
1420 error( RtError::SYSTEM_ERROR );
\r
1423 void RtApiCore :: abortStream( void )
\r
1426 if ( stream_.state == STREAM_STOPPED ) {
\r
1427 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1428 error( RtError::WARNING );
\r
1432 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1433 handle->drainCounter = 2;
\r
1438 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1439 const AudioBufferList *inBufferList,
\r
1440 const AudioBufferList *outBufferList )
\r
1442 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
1443 if ( stream_.state == STREAM_CLOSED ) {
\r
1444 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1445 error( RtError::WARNING );
\r
1449 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1450 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1452 // Check if we were draining the stream and signal is finished.
\r
1453 if ( handle->drainCounter > 3 ) {
\r
1454 if ( handle->internalDrain == true )
\r
1456 else // external call to stopStream()
\r
1457 pthread_cond_signal( &handle->condition );
\r
1461 MUTEX_LOCK( &stream_.mutex );
\r
1463 // The state might change while waiting on a mutex.
\r
1464 if ( stream_.state == STREAM_STOPPED ) {
\r
1465 MUTEX_UNLOCK( &stream_.mutex );
\r
1469 AudioDeviceID outputDevice = handle->id[0];
\r
1471 // Invoke user callback to get fresh output data UNLESS we are
\r
1472 // draining stream or duplex mode AND the input/output devices are
\r
1473 // different AND this function is called for the input device.
\r
1474 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1475 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1476 double streamTime = getStreamTime();
\r
1477 RtAudioStreamStatus status = 0;
\r
1478 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1479 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1480 handle->xrun[0] = false;
\r
1482 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1483 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1484 handle->xrun[1] = false;
\r
1487 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1488 stream_.bufferSize, streamTime, status, info->userData );
\r
1489 if ( handle->drainCounter == 2 ) {
\r
1490 MUTEX_UNLOCK( &stream_.mutex );
\r
1494 else if ( handle->drainCounter == 1 )
\r
1495 handle->internalDrain = true;
\r
1498 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1500 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1502 if ( handle->nStreams[0] == 1 ) {
\r
1503 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1505 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1507 else { // fill multiple streams with zeros
\r
1508 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1509 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1511 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1515 else if ( handle->nStreams[0] == 1 ) {
\r
1516 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1517 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1518 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1520 else { // copy from user buffer
\r
1521 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1522 stream_.userBuffer[0],
\r
1523 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1526 else { // fill multiple streams
\r
1527 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1528 if ( stream_.doConvertBuffer[0] ) {
\r
1529 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1530 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1533 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1534 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1535 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1536 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1537 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1540 else { // fill multiple multi-channel streams with interleaved data
\r
1541 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1542 Float32 *out, *in;
\r
1544 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1545 UInt32 inChannels = stream_.nUserChannels[0];
\r
1546 if ( stream_.doConvertBuffer[0] ) {
\r
1547 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1548 inChannels = stream_.nDeviceChannels[0];
\r
1551 if ( inInterleaved ) inOffset = 1;
\r
1552 else inOffset = stream_.bufferSize;
\r
1554 channelsLeft = inChannels;
\r
1555 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1557 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1558 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1561 // Account for possible channel offset in first stream
\r
1562 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1563 streamChannels -= stream_.channelOffset[0];
\r
1564 outJump = stream_.channelOffset[0];
\r
1568 // Account for possible unfilled channels at end of the last stream
\r
1569 if ( streamChannels > channelsLeft ) {
\r
1570 outJump = streamChannels - channelsLeft;
\r
1571 streamChannels = channelsLeft;
\r
1574 // Determine input buffer offsets and skips
\r
1575 if ( inInterleaved ) {
\r
1576 inJump = inChannels;
\r
1577 in += inChannels - channelsLeft;
\r
1581 in += (inChannels - channelsLeft) * inOffset;
\r
1584 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1585 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1586 *out++ = in[j*inOffset];
\r
1591 channelsLeft -= streamChannels;
\r
1596 if ( handle->drainCounter ) {
\r
1597 handle->drainCounter++;
\r
1602 AudioDeviceID inputDevice;
\r
1603 inputDevice = handle->id[1];
\r
1604 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1606 if ( handle->nStreams[1] == 1 ) {
\r
1607 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1608 convertBuffer( stream_.userBuffer[1],
\r
1609 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1610 stream_.convertInfo[1] );
\r
1612 else { // copy to user buffer
\r
1613 memcpy( stream_.userBuffer[1],
\r
1614 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1615 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1618 else { // read from multiple streams
\r
1619 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1620 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1622 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1623 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1624 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1625 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1626 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1629 else { // read from multiple multi-channel streams
\r
1630 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1631 Float32 *out, *in;
\r
1633 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1634 UInt32 outChannels = stream_.nUserChannels[1];
\r
1635 if ( stream_.doConvertBuffer[1] ) {
\r
1636 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1637 outChannels = stream_.nDeviceChannels[1];
\r
1640 if ( outInterleaved ) outOffset = 1;
\r
1641 else outOffset = stream_.bufferSize;
\r
1643 channelsLeft = outChannels;
\r
1644 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1646 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1647 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1650 // Account for possible channel offset in first stream
\r
1651 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1652 streamChannels -= stream_.channelOffset[1];
\r
1653 inJump = stream_.channelOffset[1];
\r
1657 // Account for possible unread channels at end of the last stream
\r
1658 if ( streamChannels > channelsLeft ) {
\r
1659 inJump = streamChannels - channelsLeft;
\r
1660 streamChannels = channelsLeft;
\r
1663 // Determine output buffer offsets and skips
\r
1664 if ( outInterleaved ) {
\r
1665 outJump = outChannels;
\r
1666 out += outChannels - channelsLeft;
\r
1670 out += (outChannels - channelsLeft) * outOffset;
\r
1673 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1674 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1675 out[j*outOffset] = *in++;
\r
1680 channelsLeft -= streamChannels;
\r
1684 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1685 convertBuffer( stream_.userBuffer[1],
\r
1686 stream_.deviceBuffer,
\r
1687 stream_.convertInfo[1] );
\r
1693 MUTEX_UNLOCK( &stream_.mutex );
\r
1695 RtApi::tickStreamTime();
\r
1699 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1703 case kAudioHardwareNotRunningError:
\r
1704 return "kAudioHardwareNotRunningError";
\r
1706 case kAudioHardwareUnspecifiedError:
\r
1707 return "kAudioHardwareUnspecifiedError";
\r
1709 case kAudioHardwareUnknownPropertyError:
\r
1710 return "kAudioHardwareUnknownPropertyError";
\r
1712 case kAudioHardwareBadPropertySizeError:
\r
1713 return "kAudioHardwareBadPropertySizeError";
\r
1715 case kAudioHardwareIllegalOperationError:
\r
1716 return "kAudioHardwareIllegalOperationError";
\r
1718 case kAudioHardwareBadObjectError:
\r
1719 return "kAudioHardwareBadObjectError";
\r
1721 case kAudioHardwareBadDeviceError:
\r
1722 return "kAudioHardwareBadDeviceError";
\r
1724 case kAudioHardwareBadStreamError:
\r
1725 return "kAudioHardwareBadStreamError";
\r
1727 case kAudioHardwareUnsupportedOperationError:
\r
1728 return "kAudioHardwareUnsupportedOperationError";
\r
1730 case kAudioDeviceUnsupportedFormatError:
\r
1731 return "kAudioDeviceUnsupportedFormatError";
\r
1733 case kAudioDevicePermissionsError:
\r
1734 return "kAudioDevicePermissionsError";
\r
1737 return "CoreAudio unknown error";
\r
1741 //******************** End of __MACOSX_CORE__ *********************//
\r
1744 #if defined(__UNIX_JACK__)
\r
1746 // JACK is a low-latency audio server, originally written for the
\r
1747 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1748 // connect a number of different applications to an audio device, as
\r
1749 // well as allowing them to share audio between themselves.
\r
1751 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1752 // have ports connected to the server. The JACK server is typically
\r
1753 // started in a terminal as follows:
\r
1755 // .jackd -d alsa -d hw:0
\r
1757 // or through an interface program such as qjackctl. Many of the
\r
1758 // parameters normally set for a stream are fixed by the JACK server
\r
1759 // and can be specified when the JACK server is started. In
\r
1762 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1764 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1765 // frames, and number of buffers = 4. Once the server is running, it
\r
1766 // is not possible to override these values. If the values are not
\r
1767 // specified in the command-line, the JACK server uses default values.
\r
1769 // The JACK server does not have to be running when an instance of
\r
1770 // RtApiJack is created, though the function getDeviceCount() will
\r
1771 // report 0 devices found until JACK has been started. When no
\r
1772 // devices are available (i.e., the JACK server is not running), a
\r
1773 // stream cannot be opened.
\r
1775 #include <jack/jack.h>
\r
1776 #include <unistd.h>
\r
1779 // A structure to hold various information related to the Jack API
\r
1780 // implementation.
\r
1781 struct JackHandle {
\r
1782 jack_client_t *client;
\r
1783 jack_port_t **ports[2];
\r
1784 std::string deviceName[2];
\r
1786 pthread_cond_t condition;
\r
1787 int drainCounter; // Tracks callback counts when draining
\r
1788 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1791 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1794 ThreadHandle threadId;
\r
1795 void jackSilentError( const char * ) {};
\r
1797 RtApiJack :: RtApiJack()
\r
1799 // Nothing to do here.
\r
1800 #if !defined(__RTAUDIO_DEBUG__)
\r
1801 // Turn off Jack's internal error reporting.
\r
1802 jack_set_error_function( &jackSilentError );
\r
1806 RtApiJack :: ~RtApiJack()
\r
1808 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1811 unsigned int RtApiJack :: getDeviceCount( void )
\r
1813 // See if we can become a jack client.
\r
1814 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1815 jack_status_t *status = NULL;
\r
1816 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1817 if ( client == 0 ) return 0;
\r
1819 const char **ports;
\r
1820 std::string port, previousPort;
\r
1821 unsigned int nChannels = 0, nDevices = 0;
\r
1822 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1824 // Parse the port names up to the first colon (:).
\r
1825 size_t iColon = 0;
\r
1827 port = (char *) ports[ nChannels ];
\r
1828 iColon = port.find(":");
\r
1829 if ( iColon != std::string::npos ) {
\r
1830 port = port.substr( 0, iColon + 1 );
\r
1831 if ( port != previousPort ) {
\r
1833 previousPort = port;
\r
1836 } while ( ports[++nChannels] );
\r
1840 jack_client_close( client );
\r
1844 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1846 RtAudio::DeviceInfo info;
\r
1847 info.probed = false;
\r
1849 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1850 jack_status_t *status = NULL;
\r
1851 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1852 if ( client == 0 ) {
\r
1853 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1854 error( RtError::WARNING );
\r
1858 const char **ports;
\r
1859 std::string port, previousPort;
\r
1860 unsigned int nPorts = 0, nDevices = 0;
\r
1861 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1863 // Parse the port names up to the first colon (:).
\r
1864 size_t iColon = 0;
\r
1866 port = (char *) ports[ nPorts ];
\r
1867 iColon = port.find(":");
\r
1868 if ( iColon != std::string::npos ) {
\r
1869 port = port.substr( 0, iColon );
\r
1870 if ( port != previousPort ) {
\r
1871 if ( nDevices == device ) info.name = port;
\r
1873 previousPort = port;
\r
1876 } while ( ports[++nPorts] );
\r
1880 if ( device >= nDevices ) {
\r
1881 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1882 error( RtError::INVALID_USE );
\r
1885 // Get the current jack server sample rate.
\r
1886 info.sampleRates.clear();
\r
1887 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1889 // Count the available ports containing the client name as device
\r
1890 // channels. Jack "input ports" equal RtAudio output channels.
\r
1891 unsigned int nChannels = 0;
\r
1892 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1894 while ( ports[ nChannels ] ) nChannels++;
\r
1896 info.outputChannels = nChannels;
\r
1899 // Jack "output ports" equal RtAudio input channels.
\r
1901 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1903 while ( ports[ nChannels ] ) nChannels++;
\r
1905 info.inputChannels = nChannels;
\r
1908 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1909 jack_client_close(client);
\r
1910 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1911 error( RtError::WARNING );
\r
1915 // If device opens for both playback and capture, we determine the channels.
\r
1916 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1917 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1919 // Jack always uses 32-bit floats.
\r
1920 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1922 // Jack doesn't provide default devices so we'll use the first available one.
\r
1923 if ( device == 0 && info.outputChannels > 0 )
\r
1924 info.isDefaultOutput = true;
\r
1925 if ( device == 0 && info.inputChannels > 0 )
\r
1926 info.isDefaultInput = true;
\r
1928 jack_client_close(client);
\r
1929 info.probed = true;
\r
1933 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1935 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1937 RtApiJack *object = (RtApiJack *) info->object;
\r
1938 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1943 // This function will be called by a spawned thread when the Jack
\r
1944 // server signals that it is shutting down. It is necessary to handle
\r
1945 // it this way because the jackShutdown() function must return before
\r
1946 // the jack_deactivate() function (in closeStream()) will return.
\r
1947 extern "C" void *jackCloseStream( void *ptr )
\r
1949 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1950 RtApiJack *object = (RtApiJack *) info->object;
\r
1952 object->closeStream();
\r
1954 pthread_exit( NULL );
\r
1956 void jackShutdown( void *infoPointer )
\r
1958 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1959 RtApiJack *object = (RtApiJack *) info->object;
\r
1961 // Check current stream state. If stopped, then we'll assume this
\r
1962 // was called as a result of a call to RtApiJack::stopStream (the
\r
1963 // deactivation of a client handle causes this function to be called).
\r
1964 // If not, we'll assume the Jack server is shutting down or some
\r
1965 // other problem occurred and we should close the stream.
\r
1966 if ( object->isStreamRunning() == false ) return;
\r
1968 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
1969 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
1972 int jackXrun( void *infoPointer )
\r
1974 JackHandle *handle = (JackHandle *) infoPointer;
\r
1976 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
1977 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
1982 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
1983 unsigned int firstChannel, unsigned int sampleRate,
\r
1984 RtAudioFormat format, unsigned int *bufferSize,
\r
1985 RtAudio::StreamOptions *options )
\r
1987 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
1989 // Look for jack server and try to become a client (only do once per stream).
\r
1990 jack_client_t *client = 0;
\r
1991 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
1992 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1993 jack_status_t *status = NULL;
\r
1994 if ( options && !options->streamName.empty() )
\r
1995 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
1997 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
1998 if ( client == 0 ) {
\r
1999 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2000 error( RtError::WARNING );
\r
2005 // The handle must have been created on an earlier pass.
\r
2006 client = handle->client;
\r
2009 const char **ports;
\r
2010 std::string port, previousPort, deviceName;
\r
2011 unsigned int nPorts = 0, nDevices = 0;
\r
2012 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2014 // Parse the port names up to the first colon (:).
\r
2015 size_t iColon = 0;
\r
2017 port = (char *) ports[ nPorts ];
\r
2018 iColon = port.find(":");
\r
2019 if ( iColon != std::string::npos ) {
\r
2020 port = port.substr( 0, iColon );
\r
2021 if ( port != previousPort ) {
\r
2022 if ( nDevices == device ) deviceName = port;
\r
2024 previousPort = port;
\r
2027 } while ( ports[++nPorts] );
\r
2031 if ( device >= nDevices ) {
\r
2032 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2036 // Count the available ports containing the client name as device
\r
2037 // channels. Jack "input ports" equal RtAudio output channels.
\r
2038 unsigned int nChannels = 0;
\r
2039 unsigned long flag = JackPortIsInput;
\r
2040 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2041 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2043 while ( ports[ nChannels ] ) nChannels++;
\r
2047 // Compare the jack ports for specified client to the requested number of channels.
\r
2048 if ( nChannels < (channels + firstChannel) ) {
\r
2049 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2050 errorText_ = errorStream_.str();
\r
2054 // Check the jack server sample rate.
\r
2055 unsigned int jackRate = jack_get_sample_rate( client );
\r
2056 if ( sampleRate != jackRate ) {
\r
2057 jack_client_close( client );
\r
2058 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2059 errorText_ = errorStream_.str();
\r
2062 stream_.sampleRate = jackRate;
\r
2064 // Get the latency of the JACK port.
\r
2065 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2066 if ( ports[ firstChannel ] )
\r
2067 stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2070 // The jack server always uses 32-bit floating-point data.
\r
2071 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2072 stream_.userFormat = format;
\r
2074 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2075 else stream_.userInterleaved = true;
\r
2077 // Jack always uses non-interleaved buffers.
\r
2078 stream_.deviceInterleaved[mode] = false;
\r
2080 // Jack always provides host byte-ordered data.
\r
2081 stream_.doByteSwap[mode] = false;
\r
2083 // Get the buffer size. The buffer size and number of buffers
\r
2084 // (periods) is set when the jack server is started.
\r
2085 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2086 *bufferSize = stream_.bufferSize;
\r
2088 stream_.nDeviceChannels[mode] = channels;
\r
2089 stream_.nUserChannels[mode] = channels;
\r
2091 // Set flags for buffer conversion.
\r
2092 stream_.doConvertBuffer[mode] = false;
\r
2093 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2094 stream_.doConvertBuffer[mode] = true;
\r
2095 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2096 stream_.nUserChannels[mode] > 1 )
\r
2097 stream_.doConvertBuffer[mode] = true;
\r
2099 // Allocate our JackHandle structure for the stream.
\r
2100 if ( handle == 0 ) {
\r
2102 handle = new JackHandle;
\r
2104 catch ( std::bad_alloc& ) {
\r
2105 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2109 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2110 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2113 stream_.apiHandle = (void *) handle;
\r
2114 handle->client = client;
\r
2116 handle->deviceName[mode] = deviceName;
\r
2118 // Allocate necessary internal buffers.
\r
2119 unsigned long bufferBytes;
\r
2120 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2121 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2122 if ( stream_.userBuffer[mode] == NULL ) {
\r
2123 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2127 if ( stream_.doConvertBuffer[mode] ) {
\r
2129 bool makeBuffer = true;
\r
2130 if ( mode == OUTPUT )
\r
2131 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2132 else { // mode == INPUT
\r
2133 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2134 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2135 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2136 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2140 if ( makeBuffer ) {
\r
2141 bufferBytes *= *bufferSize;
\r
2142 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2143 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2144 if ( stream_.deviceBuffer == NULL ) {
\r
2145 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2151 // Allocate memory for the Jack ports (channels) identifiers.
\r
2152 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2153 if ( handle->ports[mode] == NULL ) {
\r
2154 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2158 stream_.device[mode] = device;
\r
2159 stream_.channelOffset[mode] = firstChannel;
\r
2160 stream_.state = STREAM_STOPPED;
\r
2161 stream_.callbackInfo.object = (void *) this;
\r
2163 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2164 // We had already set up the stream for output.
\r
2165 stream_.mode = DUPLEX;
\r
2167 stream_.mode = mode;
\r
2168 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2169 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2170 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2173 // Register our ports.
\r
2175 if ( mode == OUTPUT ) {
\r
2176 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2177 snprintf( label, 64, "outport %d", i );
\r
2178 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2179 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2183 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2184 snprintf( label, 64, "inport %d", i );
\r
2185 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2186 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2190 // Setup the buffer conversion information structure. We don't use
\r
2191 // buffers to do channel offsets, so we override that parameter
\r
2193 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2199 pthread_cond_destroy( &handle->condition );
\r
2200 jack_client_close( handle->client );
\r
2202 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2203 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2206 stream_.apiHandle = 0;
\r
2209 for ( int i=0; i<2; i++ ) {
\r
2210 if ( stream_.userBuffer[i] ) {
\r
2211 free( stream_.userBuffer[i] );
\r
2212 stream_.userBuffer[i] = 0;
\r
2216 if ( stream_.deviceBuffer ) {
\r
2217 free( stream_.deviceBuffer );
\r
2218 stream_.deviceBuffer = 0;
\r
2224 void RtApiJack :: closeStream( void )
\r
2226 if ( stream_.state == STREAM_CLOSED ) {
\r
2227 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2228 error( RtError::WARNING );
\r
2232 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2235 if ( stream_.state == STREAM_RUNNING )
\r
2236 jack_deactivate( handle->client );
\r
2238 jack_client_close( handle->client );
\r
2242 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2243 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2244 pthread_cond_destroy( &handle->condition );
\r
2246 stream_.apiHandle = 0;
\r
2249 for ( int i=0; i<2; i++ ) {
\r
2250 if ( stream_.userBuffer[i] ) {
\r
2251 free( stream_.userBuffer[i] );
\r
2252 stream_.userBuffer[i] = 0;
\r
2256 if ( stream_.deviceBuffer ) {
\r
2257 free( stream_.deviceBuffer );
\r
2258 stream_.deviceBuffer = 0;
\r
2261 stream_.mode = UNINITIALIZED;
\r
2262 stream_.state = STREAM_CLOSED;
\r
2265 void RtApiJack :: startStream( void )
\r
2268 if ( stream_.state == STREAM_RUNNING ) {
\r
2269 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2270 error( RtError::WARNING );
\r
2274 MUTEX_LOCK(&stream_.mutex);
\r
2276 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2277 int result = jack_activate( handle->client );
\r
2279 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2283 const char **ports;
\r
2285 // Get the list of available ports.
\r
2286 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2288 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2289 if ( ports == NULL) {
\r
2290 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2294 // Now make the port connections. Since RtAudio wasn't designed to
\r
2295 // allow the user to select particular channels of a device, we'll
\r
2296 // just open the first "nChannels" ports with offset.
\r
2297 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2299 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2300 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2303 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2310 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2312 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2313 if ( ports == NULL) {
\r
2314 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2318 // Now make the port connections. See note above.
\r
2319 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2321 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2322 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2325 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2332 handle->drainCounter = 0;
\r
2333 handle->internalDrain = false;
\r
2334 stream_.state = STREAM_RUNNING;
\r
2337 MUTEX_UNLOCK(&stream_.mutex);
\r
2339 if ( result == 0 ) return;
\r
2340 error( RtError::SYSTEM_ERROR );
\r
2343 void RtApiJack :: stopStream( void )
\r
2346 if ( stream_.state == STREAM_STOPPED ) {
\r
2347 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2348 error( RtError::WARNING );
\r
2352 MUTEX_LOCK( &stream_.mutex );
\r
2354 if ( stream_.state == STREAM_STOPPED ) {
\r
2355 MUTEX_UNLOCK( &stream_.mutex );
\r
2359 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2360 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2362 if ( handle->drainCounter == 0 ) {
\r
2363 handle->drainCounter = 2;
\r
2364 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2368 jack_deactivate( handle->client );
\r
2369 stream_.state = STREAM_STOPPED;
\r
2371 MUTEX_UNLOCK( &stream_.mutex );
\r
2374 void RtApiJack :: abortStream( void )
\r
2377 if ( stream_.state == STREAM_STOPPED ) {
\r
2378 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2379 error( RtError::WARNING );
\r
2383 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2384 handle->drainCounter = 2;
\r
2389 // This function will be called by a spawned thread when the user
\r
2390 // callback function signals that the stream should be stopped or
\r
2391 // aborted. It is necessary to handle it this way because the
\r
2392 // callbackEvent() function must return before the jack_deactivate()
\r
2393 // function will return.
\r
2394 extern "C" void *jackStopStream( void *ptr )
\r
2396 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2397 RtApiJack *object = (RtApiJack *) info->object;
\r
2399 object->stopStream();
\r
2401 pthread_exit( NULL );
\r
2404 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2406 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
2407 if ( stream_.state == STREAM_CLOSED ) {
\r
2408 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2409 error( RtError::WARNING );
\r
2412 if ( stream_.bufferSize != nframes ) {
\r
2413 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2414 error( RtError::WARNING );
\r
2418 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2419 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2421 // Check if we were draining the stream and signal is finished.
\r
2422 if ( handle->drainCounter > 3 ) {
\r
2423 if ( handle->internalDrain == true )
\r
2424 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2426 pthread_cond_signal( &handle->condition );
\r
2430 MUTEX_LOCK( &stream_.mutex );
\r
2432 // The state might change while waiting on a mutex.
\r
2433 if ( stream_.state == STREAM_STOPPED ) {
\r
2434 MUTEX_UNLOCK( &stream_.mutex );
\r
2438 // Invoke user callback first, to get fresh output data.
\r
2439 if ( handle->drainCounter == 0 ) {
\r
2440 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2441 double streamTime = getStreamTime();
\r
2442 RtAudioStreamStatus status = 0;
\r
2443 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2444 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2445 handle->xrun[0] = false;
\r
2447 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2448 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2449 handle->xrun[1] = false;
\r
2451 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2452 stream_.bufferSize, streamTime, status, info->userData );
\r
2453 if ( handle->drainCounter == 2 ) {
\r
2454 MUTEX_UNLOCK( &stream_.mutex );
\r
2456 pthread_create( &id, NULL, jackStopStream, info );
\r
2459 else if ( handle->drainCounter == 1 )
\r
2460 handle->internalDrain = true;
\r
2463 jack_default_audio_sample_t *jackbuffer;
\r
2464 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2465 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2467 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2469 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2470 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2471 memset( jackbuffer, 0, bufferBytes );
\r
2475 else if ( stream_.doConvertBuffer[0] ) {
\r
2477 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2479 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2480 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2481 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2484 else { // no buffer conversion
\r
2485 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2486 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2487 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2491 if ( handle->drainCounter ) {
\r
2492 handle->drainCounter++;
\r
2497 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2499 if ( stream_.doConvertBuffer[1] ) {
\r
2500 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2501 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2502 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2504 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2506 else { // no buffer conversion
\r
2507 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2508 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2509 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2515 MUTEX_UNLOCK(&stream_.mutex);
\r
2517 RtApi::tickStreamTime();
\r
2520 //******************** End of __UNIX_JACK__ *********************//
\r
2523 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2525 // The ASIO API is designed around a callback scheme, so this
\r
2526 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2527 // Jack. The primary constraint with ASIO is that it only allows
\r
2528 // access to a single driver at a time. Thus, it is not possible to
\r
2529 // have more than one simultaneous RtAudio stream.
\r
2531 // This implementation also requires a number of external ASIO files
\r
2532 // and a few global variables. The ASIO callback scheme does not
\r
2533 // allow for the passing of user data, so we must create a global
\r
2534 // pointer to our callbackInfo structure.
\r
2536 // On unix systems, we make use of a pthread condition variable.
\r
2537 // Since there is no equivalent in Windows, I hacked something based
\r
2538 // on information found in
\r
2539 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2541 #include "asiosys.h"
\r
2543 #include "iasiothiscallresolver.h"
\r
2544 #include "asiodrivers.h"
\r
2547 AsioDrivers drivers;
\r
2548 ASIOCallbacks asioCallbacks;
\r
2549 ASIODriverInfo driverInfo;
\r
2550 CallbackInfo *asioCallbackInfo;
\r
2553 struct AsioHandle {
\r
2554 int drainCounter; // Tracks callback counts when draining
\r
2555 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2556 ASIOBufferInfo *bufferInfos;
\r
2560 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2563 // Function declarations (definitions at end of section)
\r
2564 static const char* getAsioErrorString( ASIOError result );
\r
2565 void sampleRateChanged( ASIOSampleRate sRate );
\r
2566 long asioMessages( long selector, long value, void* message, double* opt );
\r
2568 RtApiAsio :: RtApiAsio()
\r
2570 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2571 // CoInitialize beforehand, but it must be for appartment threading
\r
2572 // (in which case, CoInitilialize will return S_FALSE here).
\r
2573 coInitialized_ = false;
\r
2574 HRESULT hr = CoInitialize( NULL );
\r
2575 if ( FAILED(hr) ) {
\r
2576 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2577 error( RtError::WARNING );
\r
2579 coInitialized_ = true;
\r
2581 drivers.removeCurrentDriver();
\r
2582 driverInfo.asioVersion = 2;
\r
2584 // See note in DirectSound implementation about GetDesktopWindow().
\r
2585 driverInfo.sysRef = GetForegroundWindow();
\r
2588 RtApiAsio :: ~RtApiAsio()
\r
2590 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2591 if ( coInitialized_ ) CoUninitialize();
\r
2594 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2596 return (unsigned int) drivers.asioGetNumDev();
\r
2599 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2601 RtAudio::DeviceInfo info;
\r
2602 info.probed = false;
\r
2605 unsigned int nDevices = getDeviceCount();
\r
2606 if ( nDevices == 0 ) {
\r
2607 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2608 error( RtError::INVALID_USE );
\r
2611 if ( device >= nDevices ) {
\r
2612 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2613 error( RtError::INVALID_USE );
\r
2616 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2617 if ( stream_.state != STREAM_CLOSED ) {
\r
2618 if ( device >= devices_.size() ) {
\r
2619 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2620 error( RtError::WARNING );
\r
2623 return devices_[ device ];
\r
2626 char driverName[32];
\r
2627 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2628 if ( result != ASE_OK ) {
\r
2629 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2630 errorText_ = errorStream_.str();
\r
2631 error( RtError::WARNING );
\r
2635 info.name = driverName;
\r
2637 if ( !drivers.loadDriver( driverName ) ) {
\r
2638 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2639 errorText_ = errorStream_.str();
\r
2640 error( RtError::WARNING );
\r
2644 result = ASIOInit( &driverInfo );
\r
2645 if ( result != ASE_OK ) {
\r
2646 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2647 errorText_ = errorStream_.str();
\r
2648 error( RtError::WARNING );
\r
2652 // Determine the device channel information.
\r
2653 long inputChannels, outputChannels;
\r
2654 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2655 if ( result != ASE_OK ) {
\r
2656 drivers.removeCurrentDriver();
\r
2657 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2658 errorText_ = errorStream_.str();
\r
2659 error( RtError::WARNING );
\r
2663 info.outputChannels = outputChannels;
\r
2664 info.inputChannels = inputChannels;
\r
2665 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2666 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2668 // Determine the supported sample rates.
\r
2669 info.sampleRates.clear();
\r
2670 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2671 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2672 if ( result == ASE_OK )
\r
2673 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2676 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2677 ASIOChannelInfo channelInfo;
\r
2678 channelInfo.channel = 0;
\r
2679 channelInfo.isInput = true;
\r
2680 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2681 result = ASIOGetChannelInfo( &channelInfo );
\r
2682 if ( result != ASE_OK ) {
\r
2683 drivers.removeCurrentDriver();
\r
2684 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2685 errorText_ = errorStream_.str();
\r
2686 error( RtError::WARNING );
\r
2690 info.nativeFormats = 0;
\r
2691 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2692 info.nativeFormats |= RTAUDIO_SINT16;
\r
2693 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2694 info.nativeFormats |= RTAUDIO_SINT32;
\r
2695 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2696 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2697 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2698 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2700 if ( info.outputChannels > 0 )
\r
2701 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2702 if ( info.inputChannels > 0 )
\r
2703 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2705 info.probed = true;
\r
2706 drivers.removeCurrentDriver();
\r
2710 void bufferSwitch( long index, ASIOBool processNow )
\r
2712 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2713 object->callbackEvent( index );
\r
2716 void RtApiAsio :: saveDeviceInfo( void )
\r
2720 unsigned int nDevices = getDeviceCount();
\r
2721 devices_.resize( nDevices );
\r
2722 for ( unsigned int i=0; i<nDevices; i++ )
\r
2723 devices_[i] = getDeviceInfo( i );
\r
2726 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2727 unsigned int firstChannel, unsigned int sampleRate,
\r
2728 RtAudioFormat format, unsigned int *bufferSize,
\r
2729 RtAudio::StreamOptions *options )
\r
2731 // For ASIO, a duplex stream MUST use the same driver.
\r
2732 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2733 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2737 char driverName[32];
\r
2738 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2739 if ( result != ASE_OK ) {
\r
2740 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2741 errorText_ = errorStream_.str();
\r
2745 // The getDeviceInfo() function will not work when a stream is open
\r
2746 // because ASIO does not allow multiple devices to run at the same
\r
2747 // time. Thus, we'll probe the system before opening a stream and
\r
2748 // save the results for use by getDeviceInfo().
\r
2749 this->saveDeviceInfo();
\r
2751 // Only load the driver once for duplex stream.
\r
2752 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2753 if ( !drivers.loadDriver( driverName ) ) {
\r
2754 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2755 errorText_ = errorStream_.str();
\r
2759 result = ASIOInit( &driverInfo );
\r
2760 if ( result != ASE_OK ) {
\r
2761 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2762 errorText_ = errorStream_.str();
\r
2767 // Check the device channel count.
\r
2768 long inputChannels, outputChannels;
\r
2769 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2770 if ( result != ASE_OK ) {
\r
2771 drivers.removeCurrentDriver();
\r
2772 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2773 errorText_ = errorStream_.str();
\r
2777 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2778 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2779 drivers.removeCurrentDriver();
\r
2780 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2784 stream_.nDeviceChannels[mode] = channels;
\r
2785 stream_.nUserChannels[mode] = channels;
\r
2786 stream_.channelOffset[mode] = firstChannel;
\r
2788 // Verify the sample rate is supported.
\r
2789 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2790 if ( result != ASE_OK ) {
\r
2791 drivers.removeCurrentDriver();
\r
2792 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2793 errorText_ = errorStream_.str();
\r
2797 // Get the current sample rate
\r
2798 ASIOSampleRate currentRate;
\r
2799 result = ASIOGetSampleRate( ¤tRate );
\r
2800 if ( result != ASE_OK ) {
\r
2801 drivers.removeCurrentDriver();
\r
2802 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2803 errorText_ = errorStream_.str();
\r
2807 // Set the sample rate only if necessary
\r
2808 if ( currentRate != sampleRate ) {
\r
2809 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2810 if ( result != ASE_OK ) {
\r
2811 drivers.removeCurrentDriver();
\r
2812 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2813 errorText_ = errorStream_.str();
\r
2818 // Determine the driver data type.
\r
2819 ASIOChannelInfo channelInfo;
\r
2820 channelInfo.channel = 0;
\r
2821 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2822 else channelInfo.isInput = true;
\r
2823 result = ASIOGetChannelInfo( &channelInfo );
\r
2824 if ( result != ASE_OK ) {
\r
2825 drivers.removeCurrentDriver();
\r
2826 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2827 errorText_ = errorStream_.str();
\r
2831 // Assuming WINDOWS host is always little-endian.
\r
2832 stream_.doByteSwap[mode] = false;
\r
2833 stream_.userFormat = format;
\r
2834 stream_.deviceFormat[mode] = 0;
\r
2835 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2836 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2837 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2839 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2840 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2841 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2843 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2844 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2845 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2847 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2848 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2849 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2852 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2853 drivers.removeCurrentDriver();
\r
2854 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2855 errorText_ = errorStream_.str();
\r
2859 // Set the buffer size. For a duplex stream, this will end up
\r
2860 // setting the buffer size based on the input constraints, which
\r
2862 long minSize, maxSize, preferSize, granularity;
\r
2863 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2864 if ( result != ASE_OK ) {
\r
2865 drivers.removeCurrentDriver();
\r
2866 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2867 errorText_ = errorStream_.str();
\r
2871 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2872 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2873 else if ( granularity == -1 ) {
\r
2874 // Make sure bufferSize is a power of two.
\r
2875 int log2_of_min_size = 0;
\r
2876 int log2_of_max_size = 0;
\r
2878 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2879 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2880 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2883 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2884 int min_delta_num = log2_of_min_size;
\r
2886 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2887 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2888 if (current_delta < min_delta) {
\r
2889 min_delta = current_delta;
\r
2890 min_delta_num = i;
\r
2894 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2895 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2896 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2898 else if ( granularity != 0 ) {
\r
2899 // Set to an even multiple of granularity, rounding up.
\r
2900 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2903 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2904 drivers.removeCurrentDriver();
\r
2905 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2909 stream_.bufferSize = *bufferSize;
\r
2910 stream_.nBuffers = 2;
\r
2912 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2913 else stream_.userInterleaved = true;
\r
2915 // ASIO always uses non-interleaved buffers.
\r
2916 stream_.deviceInterleaved[mode] = false;
\r
2918 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2919 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2920 if ( handle == 0 ) {
\r
2922 handle = new AsioHandle;
\r
2924 catch ( std::bad_alloc& ) {
\r
2925 //if ( handle == NULL ) {
\r
2926 drivers.removeCurrentDriver();
\r
2927 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2930 handle->bufferInfos = 0;
\r
2932 // Create a manual-reset event.
\r
2933 handle->condition = CreateEvent( NULL, // no security
\r
2934 TRUE, // manual-reset
\r
2935 FALSE, // non-signaled initially
\r
2936 NULL ); // unnamed
\r
2937 stream_.apiHandle = (void *) handle;
\r
2940 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2941 // and output separately, we'll have to dispose of previously
\r
2942 // created output buffers for a duplex stream.
\r
2943 long inputLatency, outputLatency;
\r
2944 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2945 ASIODisposeBuffers();
\r
2946 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2949 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2950 bool buffersAllocated = false;
\r
2951 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2952 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2953 if ( handle->bufferInfos == NULL ) {
\r
2954 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2955 errorText_ = errorStream_.str();
\r
2959 ASIOBufferInfo *infos;
\r
2960 infos = handle->bufferInfos;
\r
2961 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2962 infos->isInput = ASIOFalse;
\r
2963 infos->channelNum = i + stream_.channelOffset[0];
\r
2964 infos->buffers[0] = infos->buffers[1] = 0;
\r
2966 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2967 infos->isInput = ASIOTrue;
\r
2968 infos->channelNum = i + stream_.channelOffset[1];
\r
2969 infos->buffers[0] = infos->buffers[1] = 0;
\r
2972 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
2973 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
2974 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
2975 asioCallbacks.asioMessage = &asioMessages;
\r
2976 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
2977 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
2978 if ( result != ASE_OK ) {
\r
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
2980 errorText_ = errorStream_.str();
\r
2983 buffersAllocated = true;
\r
2985 // Set flags for buffer conversion.
\r
2986 stream_.doConvertBuffer[mode] = false;
\r
2987 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2988 stream_.doConvertBuffer[mode] = true;
\r
2989 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2990 stream_.nUserChannels[mode] > 1 )
\r
2991 stream_.doConvertBuffer[mode] = true;
\r
2993 // Allocate necessary internal buffers
\r
2994 unsigned long bufferBytes;
\r
2995 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2996 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2997 if ( stream_.userBuffer[mode] == NULL ) {
\r
2998 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3002 if ( stream_.doConvertBuffer[mode] ) {
\r
3004 bool makeBuffer = true;
\r
3005 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3006 if ( mode == INPUT ) {
\r
3007 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3008 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3009 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3013 if ( makeBuffer ) {
\r
3014 bufferBytes *= *bufferSize;
\r
3015 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3016 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3017 if ( stream_.deviceBuffer == NULL ) {
\r
3018 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3024 stream_.sampleRate = sampleRate;
\r
3025 stream_.device[mode] = device;
\r
3026 stream_.state = STREAM_STOPPED;
\r
3027 asioCallbackInfo = &stream_.callbackInfo;
\r
3028 stream_.callbackInfo.object = (void *) this;
\r
3029 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3030 // We had already set up an output stream.
\r
3031 stream_.mode = DUPLEX;
\r
3033 stream_.mode = mode;
\r
3035 // Determine device latencies
\r
3036 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3037 if ( result != ASE_OK ) {
\r
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3039 errorText_ = errorStream_.str();
\r
3040 error( RtError::WARNING); // warn but don't fail
\r
3043 stream_.latency[0] = outputLatency;
\r
3044 stream_.latency[1] = inputLatency;
\r
3047 // Setup the buffer conversion information structure. We don't use
\r
3048 // buffers to do channel offsets, so we override that parameter
\r
3050 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3055 if ( buffersAllocated )
\r
3056 ASIODisposeBuffers();
\r
3057 drivers.removeCurrentDriver();
\r
3060 CloseHandle( handle->condition );
\r
3061 if ( handle->bufferInfos )
\r
3062 free( handle->bufferInfos );
\r
3064 stream_.apiHandle = 0;
\r
3067 for ( int i=0; i<2; i++ ) {
\r
3068 if ( stream_.userBuffer[i] ) {
\r
3069 free( stream_.userBuffer[i] );
\r
3070 stream_.userBuffer[i] = 0;
\r
3074 if ( stream_.deviceBuffer ) {
\r
3075 free( stream_.deviceBuffer );
\r
3076 stream_.deviceBuffer = 0;
\r
3082 void RtApiAsio :: closeStream()
\r
3084 if ( stream_.state == STREAM_CLOSED ) {
\r
3085 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3086 error( RtError::WARNING );
\r
3090 if ( stream_.state == STREAM_RUNNING ) {
\r
3091 stream_.state = STREAM_STOPPED;
\r
3094 ASIODisposeBuffers();
\r
3095 drivers.removeCurrentDriver();
\r
3097 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3099 CloseHandle( handle->condition );
\r
3100 if ( handle->bufferInfos )
\r
3101 free( handle->bufferInfos );
\r
3103 stream_.apiHandle = 0;
\r
3106 for ( int i=0; i<2; i++ ) {
\r
3107 if ( stream_.userBuffer[i] ) {
\r
3108 free( stream_.userBuffer[i] );
\r
3109 stream_.userBuffer[i] = 0;
\r
3113 if ( stream_.deviceBuffer ) {
\r
3114 free( stream_.deviceBuffer );
\r
3115 stream_.deviceBuffer = 0;
\r
3118 stream_.mode = UNINITIALIZED;
\r
3119 stream_.state = STREAM_CLOSED;
\r
3122 bool stopThreadCalled = false;
\r
3124 void RtApiAsio :: startStream()
\r
3127 if ( stream_.state == STREAM_RUNNING ) {
\r
3128 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3129 error( RtError::WARNING );
\r
3133 //MUTEX_LOCK( &stream_.mutex );
\r
3135 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3136 ASIOError result = ASIOStart();
\r
3137 if ( result != ASE_OK ) {
\r
3138 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3139 errorText_ = errorStream_.str();
\r
3143 handle->drainCounter = 0;
\r
3144 handle->internalDrain = false;
\r
3145 ResetEvent( handle->condition );
\r
3146 stream_.state = STREAM_RUNNING;
\r
3150 //MUTEX_UNLOCK( &stream_.mutex );
\r
3152 stopThreadCalled = false;
\r
3154 if ( result == ASE_OK ) return;
\r
3155 error( RtError::SYSTEM_ERROR );
\r
3158 void RtApiAsio :: stopStream()
\r
3161 if ( stream_.state == STREAM_STOPPED ) {
\r
3162 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3163 error( RtError::WARNING );
\r
3168 MUTEX_LOCK( &stream_.mutex );
\r
3170 if ( stream_.state == STREAM_STOPPED ) {
\r
3171 MUTEX_UNLOCK( &stream_.mutex );
\r
3176 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3177 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3178 if ( handle->drainCounter == 0 ) {
\r
3179 handle->drainCounter = 2;
\r
3180 // MUTEX_UNLOCK( &stream_.mutex );
\r
3181 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3182 //ResetEvent( handle->condition );
\r
3183 // MUTEX_LOCK( &stream_.mutex );
\r
3187 stream_.state = STREAM_STOPPED;
\r
3189 ASIOError result = ASIOStop();
\r
3190 if ( result != ASE_OK ) {
\r
3191 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3192 errorText_ = errorStream_.str();
\r
3195 // MUTEX_UNLOCK( &stream_.mutex );
\r
3197 if ( result == ASE_OK ) return;
\r
3198 error( RtError::SYSTEM_ERROR );
\r
3201 void RtApiAsio :: abortStream()
\r
3204 if ( stream_.state == STREAM_STOPPED ) {
\r
3205 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3206 error( RtError::WARNING );
\r
3210 // The following lines were commented-out because some behavior was
\r
3211 // noted where the device buffers need to be zeroed to avoid
\r
3212 // continuing sound, even when the device buffers are completely
\r
3213 // disposed. So now, calling abort is the same as calling stop.
\r
3214 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3215 // handle->drainCounter = 2;
\r
3219 // This function will be called by a spawned thread when the user
\r
3220 // callback function signals that the stream should be stopped or
\r
3221 // aborted. It is necessary to handle it this way because the
\r
3222 // callbackEvent() function must return before the ASIOStop()
\r
3223 // function will return.
\r
3224 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3226 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3227 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3229 object->stopStream();
\r
3231 _endthreadex( 0 );
\r
3235 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3237 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
3238 if ( stopThreadCalled ) return SUCCESS;
\r
3239 if ( stream_.state == STREAM_CLOSED ) {
\r
3240 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3241 error( RtError::WARNING );
\r
3245 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3246 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3248 // Check if we were draining the stream and signal if finished.
\r
3249 if ( handle->drainCounter > 3 ) {
\r
3250 if ( handle->internalDrain == false )
\r
3251 SetEvent( handle->condition );
\r
3252 else { // spawn a thread to stop the stream
\r
3253 unsigned threadId;
\r
3254 stopThreadCalled = true;
\r
3255 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3256 &stream_.callbackInfo, 0, &threadId );
\r
3261 /*MUTEX_LOCK( &stream_.mutex );
\r
3263 // The state might change while waiting on a mutex.
\r
3264 if ( stream_.state == STREAM_STOPPED ) goto unlock; */
\r
3266 // Invoke user callback to get fresh output data UNLESS we are
\r
3267 // draining stream.
\r
3268 if ( handle->drainCounter == 0 ) {
\r
3269 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3270 double streamTime = getStreamTime();
\r
3271 RtAudioStreamStatus status = 0;
\r
3272 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3273 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3276 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3277 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3280 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3281 stream_.bufferSize, streamTime, status, info->userData );
\r
3282 if ( handle->drainCounter == 2 ) {
\r
3283 // MUTEX_UNLOCK( &stream_.mutex );
\r
3285 unsigned threadId;
\r
3286 stopThreadCalled = true;
\r
3287 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3288 &stream_.callbackInfo, 0, &threadId );
\r
3291 else if ( handle->drainCounter == 1 )
\r
3292 handle->internalDrain = true;
\r
3295 unsigned int nChannels, bufferBytes, i, j;
\r
3296 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3297 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3299 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3301 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3303 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3304 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3305 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3309 else if ( stream_.doConvertBuffer[0] ) {
\r
3311 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3312 if ( stream_.doByteSwap[0] )
\r
3313 byteSwapBuffer( stream_.deviceBuffer,
\r
3314 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3315 stream_.deviceFormat[0] );
\r
3317 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3318 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3319 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3320 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3326 if ( stream_.doByteSwap[0] )
\r
3327 byteSwapBuffer( stream_.userBuffer[0],
\r
3328 stream_.bufferSize * stream_.nUserChannels[0],
\r
3329 stream_.userFormat );
\r
3331 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3332 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3333 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3334 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3339 if ( handle->drainCounter ) {
\r
3340 handle->drainCounter++;
\r
3345 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3347 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3349 if (stream_.doConvertBuffer[1]) {
\r
3351 // Always interleave ASIO input data.
\r
3352 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3353 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3354 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3355 handle->bufferInfos[i].buffers[bufferIndex],
\r
3359 if ( stream_.doByteSwap[1] )
\r
3360 byteSwapBuffer( stream_.deviceBuffer,
\r
3361 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3362 stream_.deviceFormat[1] );
\r
3363 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3367 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3368 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3369 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3370 handle->bufferInfos[i].buffers[bufferIndex],
\r
3375 if ( stream_.doByteSwap[1] )
\r
3376 byteSwapBuffer( stream_.userBuffer[1],
\r
3377 stream_.bufferSize * stream_.nUserChannels[1],
\r
3378 stream_.userFormat );
\r
3383 // The following call was suggested by Malte Clasen. While the API
\r
3384 // documentation indicates it should not be required, some device
\r
3385 // drivers apparently do not function correctly without it.
\r
3386 ASIOOutputReady();
\r
3388 // MUTEX_UNLOCK( &stream_.mutex );
\r
3390 RtApi::tickStreamTime();
\r
3394 void sampleRateChanged( ASIOSampleRate sRate )
\r
3396 // The ASIO documentation says that this usually only happens during
\r
3397 // external sync. Audio processing is not stopped by the driver,
\r
3398 // actual sample rate might not have even changed, maybe only the
\r
3399 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3402 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3404 object->stopStream();
\r
3406 catch ( RtError &exception ) {
\r
3407 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3411 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3414 long asioMessages( long selector, long value, void* message, double* opt )
\r
3418 switch( selector ) {
\r
3419 case kAsioSelectorSupported:
\r
3420 if ( value == kAsioResetRequest
\r
3421 || value == kAsioEngineVersion
\r
3422 || value == kAsioResyncRequest
\r
3423 || value == kAsioLatenciesChanged
\r
3424 // The following three were added for ASIO 2.0, you don't
\r
3425 // necessarily have to support them.
\r
3426 || value == kAsioSupportsTimeInfo
\r
3427 || value == kAsioSupportsTimeCode
\r
3428 || value == kAsioSupportsInputMonitor)
\r
3431 case kAsioResetRequest:
\r
3432 // Defer the task and perform the reset of the driver during the
\r
3433 // next "safe" situation. You cannot reset the driver right now,
\r
3434 // as this code is called from the driver. Reset the driver is
\r
3435 // done by completely destruct is. I.e. ASIOStop(),
\r
3436 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3438 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3441 case kAsioResyncRequest:
\r
3442 // This informs the application that the driver encountered some
\r
3443 // non-fatal data loss. It is used for synchronization purposes
\r
3444 // of different media. Added mainly to work around the Win16Mutex
\r
3445 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3446 // which could lose data because the Mutex was held too long by
\r
3447 // another thread. However a driver can issue it in other
\r
3448 // situations, too.
\r
3449 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3453 case kAsioLatenciesChanged:
\r
3454 // This will inform the host application that the drivers were
\r
3455 // latencies changed. Beware, it this does not mean that the
\r
3456 // buffer sizes have changed! You might need to update internal
\r
3458 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3461 case kAsioEngineVersion:
\r
3462 // Return the supported ASIO version of the host application. If
\r
3463 // a host application does not implement this selector, ASIO 1.0
\r
3464 // is assumed by the driver.
\r
3467 case kAsioSupportsTimeInfo:
\r
3468 // Informs the driver whether the
\r
3469 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3470 // For compatibility with ASIO 1.0 drivers the host application
\r
3471 // should always support the "old" bufferSwitch method, too.
\r
3474 case kAsioSupportsTimeCode:
\r
3475 // Informs the driver whether application is interested in time
\r
3476 // code info. If an application does not need to know about time
\r
3477 // code, the driver has less work to do.
\r
3484 static const char* getAsioErrorString( ASIOError result )
\r
3489 const char*message;
\r
3492 static Messages m[] =
\r
3494 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3495 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3496 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3497 { ASE_InvalidMode, "Invalid mode." },
\r
3498 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3499 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3500 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3503 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3504 if ( m[i].value == result ) return m[i].message;
\r
3506 return "Unknown error.";
\r
3508 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3512 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3514 // Modified by Robin Davies, October 2005
\r
3515 // - Improvements to DirectX pointer chasing.
\r
3516 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3517 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3518 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3519 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3521 #include <dsound.h>
\r
3522 #include <assert.h>
\r
3523 #include <algorithm>
\r
3525 #if defined(__MINGW32__)
\r
3526 // missing from latest mingw winapi
\r
3527 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3528 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3529 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3530 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3533 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3535 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3536 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3539 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3541 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3542 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3543 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3544 return pointer >= earlierPointer && pointer < laterPointer;
\r
3547 // A structure to hold various information related to the DirectSound
\r
3548 // API implementation.
\r
3550 unsigned int drainCounter; // Tracks callback counts when draining
\r
3551 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3555 UINT bufferPointer[2];
\r
3556 DWORD dsBufferSize[2];
\r
3557 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3561 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3564 // Declarations for utility functions, callbacks, and structures
\r
3565 // specific to the DirectSound implementation.
\r
3566 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3567 LPCTSTR description,
\r
3569 LPVOID lpContext );
\r
3571 static const char* getErrorString( int code );
\r
3573 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3582 : found(false) { validId[0] = false; validId[1] = false; }
\r
3585 std::vector< DsDevice > dsDevices;
\r
3587 RtApiDs :: RtApiDs()
\r
3589 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3590 // accept whatever the mainline chose for a threading model.
\r
3591 coInitialized_ = false;
\r
3592 HRESULT hr = CoInitialize( NULL );
\r
3593 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3596 RtApiDs :: ~RtApiDs()
\r
3598 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3599 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3602 // The DirectSound default output is always the first device.
\r
3603 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3608 // The DirectSound default input is always the first input device,
\r
3609 // which is the first capture device enumerated.
\r
3610 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3615 unsigned int RtApiDs :: getDeviceCount( void )
\r
3617 // Set query flag for previously found devices to false, so that we
\r
3618 // can check for any devices that have disappeared.
\r
3619 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3620 dsDevices[i].found = false;
\r
3622 // Query DirectSound devices.
\r
3623 bool isInput = false;
\r
3624 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3625 if ( FAILED( result ) ) {
\r
3626 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3627 errorText_ = errorStream_.str();
\r
3628 error( RtError::WARNING );
\r
3631 // Query DirectSoundCapture devices.
\r
3633 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3634 if ( FAILED( result ) ) {
\r
3635 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3636 errorText_ = errorStream_.str();
\r
3637 error( RtError::WARNING );
\r
3640 // Clean out any devices that may have disappeared.
\r
3641 std::vector< DsDevice > :: iterator it;
\r
3642 for ( it=dsDevices.begin(); it < dsDevices.end(); it++ )
\r
3643 if ( it->found == false ) dsDevices.erase( it );
\r
3645 return dsDevices.size();
\r
3648 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3650 RtAudio::DeviceInfo info;
\r
3651 info.probed = false;
\r
3653 if ( dsDevices.size() == 0 ) {
\r
3654 // Force a query of all devices
\r
3656 if ( dsDevices.size() == 0 ) {
\r
3657 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3658 error( RtError::INVALID_USE );
\r
3662 if ( device >= dsDevices.size() ) {
\r
3663 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3664 error( RtError::INVALID_USE );
\r
3668 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3670 LPDIRECTSOUND output;
\r
3672 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3673 if ( FAILED( result ) ) {
\r
3674 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3675 errorText_ = errorStream_.str();
\r
3676 error( RtError::WARNING );
\r
3680 outCaps.dwSize = sizeof( outCaps );
\r
3681 result = output->GetCaps( &outCaps );
\r
3682 if ( FAILED( result ) ) {
\r
3683 output->Release();
\r
3684 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3685 errorText_ = errorStream_.str();
\r
3686 error( RtError::WARNING );
\r
3690 // Get output channel information.
\r
3691 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3693 // Get sample rate information.
\r
3694 info.sampleRates.clear();
\r
3695 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3696 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3697 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3698 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3701 // Get format information.
\r
3702 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3703 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3705 output->Release();
\r
3707 if ( getDefaultOutputDevice() == device )
\r
3708 info.isDefaultOutput = true;
\r
3710 if ( dsDevices[ device ].validId[1] == false ) {
\r
3711 info.name = dsDevices[ device ].name;
\r
3712 info.probed = true;
\r
3718 LPDIRECTSOUNDCAPTURE input;
\r
3719 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3720 if ( FAILED( result ) ) {
\r
3721 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3722 errorText_ = errorStream_.str();
\r
3723 error( RtError::WARNING );
\r
3728 inCaps.dwSize = sizeof( inCaps );
\r
3729 result = input->GetCaps( &inCaps );
\r
3730 if ( FAILED( result ) ) {
\r
3732 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3733 errorText_ = errorStream_.str();
\r
3734 error( RtError::WARNING );
\r
3738 // Get input channel information.
\r
3739 info.inputChannels = inCaps.dwChannels;
\r
3741 // Get sample rate and format information.
\r
3742 std::vector<unsigned int> rates;
\r
3743 if ( inCaps.dwChannels >= 2 ) {
\r
3744 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3745 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3746 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3747 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3748 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3749 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3750 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3751 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3753 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3754 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3755 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3756 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3757 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3759 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3760 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3761 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3762 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3763 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3766 else if ( inCaps.dwChannels == 1 ) {
\r
3767 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3768 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3769 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3770 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3771 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3772 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3776 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3782 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3786 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3789 else info.inputChannels = 0; // technically, this would be an error
\r
3793 if ( info.inputChannels == 0 ) return info;
\r
3795 // Copy the supported rates to the info structure but avoid duplication.
\r
3797 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3799 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3800 if ( rates[i] == info.sampleRates[j] ) {
\r
3805 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3807 sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3809 // If device opens for both playback and capture, we determine the channels.
\r
3810 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3811 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3813 if ( device == 0 ) info.isDefaultInput = true;
\r
3815 // Copy name and return.
\r
3816 info.name = dsDevices[ device ].name;
\r
3817 info.probed = true;
\r
3821 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3822 unsigned int firstChannel, unsigned int sampleRate,
\r
3823 RtAudioFormat format, unsigned int *bufferSize,
\r
3824 RtAudio::StreamOptions *options )
\r
3826 if ( channels + firstChannel > 2 ) {
\r
3827 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3831 unsigned int nDevices = dsDevices.size();
\r
3832 if ( nDevices == 0 ) {
\r
3833 // This should not happen because a check is made before this function is called.
\r
3834 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3838 if ( device >= nDevices ) {
\r
3839 // This should not happen because a check is made before this function is called.
\r
3840 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3844 if ( mode == OUTPUT ) {
\r
3845 if ( dsDevices[ device ].validId[0] == false ) {
\r
3846 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3847 errorText_ = errorStream_.str();
\r
3851 else { // mode == INPUT
\r
3852 if ( dsDevices[ device ].validId[1] == false ) {
\r
3853 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3854 errorText_ = errorStream_.str();
\r
3859 // According to a note in PortAudio, using GetDesktopWindow()
\r
3860 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3861 // that occur when the application's window is not the foreground
\r
3862 // window. Also, if the application window closes before the
\r
3863 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3864 // problems when using GetDesktopWindow() but it seems fine now
\r
3865 // (January 2010). I'll leave it commented here.
\r
3866 // HWND hWnd = GetForegroundWindow();
\r
3867 HWND hWnd = GetDesktopWindow();
\r
3869 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3870 // two. This is a judgement call and a value of two is probably too
\r
3871 // low for capture, but it should work for playback.
\r
3873 if ( options ) nBuffers = options->numberOfBuffers;
\r
3874 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3875 if ( nBuffers < 2 ) nBuffers = 3;
\r
3877 // Check the lower range of the user-specified buffer size and set
\r
3878 // (arbitrarily) to a lower bound of 32.
\r
3879 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3881 // Create the wave format structure. The data format setting will
\r
3882 // be determined later.
\r
3883 WAVEFORMATEX waveFormat;
\r
3884 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3885 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3886 waveFormat.nChannels = channels + firstChannel;
\r
3887 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3889 // Determine the device buffer size. By default, we'll use the value
\r
3890 // defined above (32K), but we will grow it to make allowances for
\r
3891 // very large software buffer sizes.
\r
3892 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;
\r
3893 DWORD dsPointerLeadTime = 0;
\r
3895 void *ohandle = 0, *bhandle = 0;
\r
3897 if ( mode == OUTPUT ) {
\r
3899 LPDIRECTSOUND output;
\r
3900 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3901 if ( FAILED( result ) ) {
\r
3902 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3903 errorText_ = errorStream_.str();
\r
3908 outCaps.dwSize = sizeof( outCaps );
\r
3909 result = output->GetCaps( &outCaps );
\r
3910 if ( FAILED( result ) ) {
\r
3911 output->Release();
\r
3912 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3913 errorText_ = errorStream_.str();
\r
3917 // Check channel information.
\r
3918 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3919 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3920 errorText_ = errorStream_.str();
\r
3924 // Check format information. Use 16-bit format unless not
\r
3925 // supported or user requests 8-bit.
\r
3926 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3927 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3928 waveFormat.wBitsPerSample = 16;
\r
3929 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3932 waveFormat.wBitsPerSample = 8;
\r
3933 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3935 stream_.userFormat = format;
\r
3937 // Update wave format structure and buffer information.
\r
3938 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3939 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3940 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3942 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3943 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3944 dsBufferSize *= 2;
\r
3946 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3947 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3948 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3949 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3950 if ( FAILED( result ) ) {
\r
3951 output->Release();
\r
3952 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3953 errorText_ = errorStream_.str();
\r
3957 // Even though we will write to the secondary buffer, we need to
\r
3958 // access the primary buffer to set the correct output format
\r
3959 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3960 // buffer description.
\r
3961 DSBUFFERDESC bufferDescription;
\r
3962 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3963 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3964 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3966 // Obtain the primary buffer
\r
3967 LPDIRECTSOUNDBUFFER buffer;
\r
3968 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3969 if ( FAILED( result ) ) {
\r
3970 output->Release();
\r
3971 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3972 errorText_ = errorStream_.str();
\r
3976 // Set the primary DS buffer sound format.
\r
3977 result = buffer->SetFormat( &waveFormat );
\r
3978 if ( FAILED( result ) ) {
\r
3979 output->Release();
\r
3980 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
3981 errorText_ = errorStream_.str();
\r
3985 // Setup the secondary DS buffer description.
\r
3986 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3987 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3988 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
3989 DSBCAPS_GLOBALFOCUS |
\r
3990 DSBCAPS_GETCURRENTPOSITION2 |
\r
3991 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
3992 bufferDescription.dwBufferBytes = dsBufferSize;
\r
3993 bufferDescription.lpwfxFormat = &waveFormat;
\r
3995 // Try to create the secondary DS buffer. If that doesn't work,
\r
3996 // try to use software mixing. Otherwise, there's a problem.
\r
3997 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3998 if ( FAILED( result ) ) {
\r
3999 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4000 DSBCAPS_GLOBALFOCUS |
\r
4001 DSBCAPS_GETCURRENTPOSITION2 |
\r
4002 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4003 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4004 if ( FAILED( result ) ) {
\r
4005 output->Release();
\r
4006 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4007 errorText_ = errorStream_.str();
\r
4012 // Get the buffer size ... might be different from what we specified.
\r
4014 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4015 result = buffer->GetCaps( &dsbcaps );
\r
4016 if ( FAILED( result ) ) {
\r
4017 output->Release();
\r
4018 buffer->Release();
\r
4019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4020 errorText_ = errorStream_.str();
\r
4024 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4026 // Lock the DS buffer
\r
4029 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4030 if ( FAILED( result ) ) {
\r
4031 output->Release();
\r
4032 buffer->Release();
\r
4033 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4034 errorText_ = errorStream_.str();
\r
4038 // Zero the DS buffer
\r
4039 ZeroMemory( audioPtr, dataLen );
\r
4041 // Unlock the DS buffer
\r
4042 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4043 if ( FAILED( result ) ) {
\r
4044 output->Release();
\r
4045 buffer->Release();
\r
4046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4047 errorText_ = errorStream_.str();
\r
4051 ohandle = (void *) output;
\r
4052 bhandle = (void *) buffer;
\r
4055 if ( mode == INPUT ) {
\r
4057 LPDIRECTSOUNDCAPTURE input;
\r
4058 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4059 if ( FAILED( result ) ) {
\r
4060 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4061 errorText_ = errorStream_.str();
\r
4066 inCaps.dwSize = sizeof( inCaps );
\r
4067 result = input->GetCaps( &inCaps );
\r
4068 if ( FAILED( result ) ) {
\r
4070 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4071 errorText_ = errorStream_.str();
\r
4075 // Check channel information.
\r
4076 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4077 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4081 // Check format information. Use 16-bit format unless user
\r
4082 // requests 8-bit.
\r
4083 DWORD deviceFormats;
\r
4084 if ( channels + firstChannel == 2 ) {
\r
4085 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4086 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4087 waveFormat.wBitsPerSample = 8;
\r
4088 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4090 else { // assume 16-bit is supported
\r
4091 waveFormat.wBitsPerSample = 16;
\r
4092 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4095 else { // channel == 1
\r
4096 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4097 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4098 waveFormat.wBitsPerSample = 8;
\r
4099 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4101 else { // assume 16-bit is supported
\r
4102 waveFormat.wBitsPerSample = 16;
\r
4103 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4106 stream_.userFormat = format;
\r
4108 // Update wave format structure and buffer information.
\r
4109 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4110 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4111 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4113 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4114 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4115 dsBufferSize *= 2;
\r
4117 // Setup the secondary DS buffer description.
\r
4118 DSCBUFFERDESC bufferDescription;
\r
4119 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4120 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4121 bufferDescription.dwFlags = 0;
\r
4122 bufferDescription.dwReserved = 0;
\r
4123 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4124 bufferDescription.lpwfxFormat = &waveFormat;
\r
4126 // Create the capture buffer.
\r
4127 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4128 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4129 if ( FAILED( result ) ) {
\r
4131 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4132 errorText_ = errorStream_.str();
\r
4136 // Get the buffer size ... might be different from what we specified.
\r
4137 DSCBCAPS dscbcaps;
\r
4138 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4139 result = buffer->GetCaps( &dscbcaps );
\r
4140 if ( FAILED( result ) ) {
\r
4142 buffer->Release();
\r
4143 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4144 errorText_ = errorStream_.str();
\r
4148 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4150 // NOTE: We could have a problem here if this is a duplex stream
\r
4151 // and the play and capture hardware buffer sizes are different
\r
4152 // (I'm actually not sure if that is a problem or not).
\r
4153 // Currently, we are not verifying that.
\r
4155 // Lock the capture buffer
\r
4158 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4159 if ( FAILED( result ) ) {
\r
4161 buffer->Release();
\r
4162 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4163 errorText_ = errorStream_.str();
\r
4167 // Zero the buffer
\r
4168 ZeroMemory( audioPtr, dataLen );
\r
4170 // Unlock the buffer
\r
4171 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4172 if ( FAILED( result ) ) {
\r
4174 buffer->Release();
\r
4175 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4176 errorText_ = errorStream_.str();
\r
4180 ohandle = (void *) input;
\r
4181 bhandle = (void *) buffer;
\r
4184 // Set various stream parameters
\r
4185 DsHandle *handle = 0;
\r
4186 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4187 stream_.nUserChannels[mode] = channels;
\r
4188 stream_.bufferSize = *bufferSize;
\r
4189 stream_.channelOffset[mode] = firstChannel;
\r
4190 stream_.deviceInterleaved[mode] = true;
\r
4191 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4192 else stream_.userInterleaved = true;
\r
4194 // Set flag for buffer conversion
\r
4195 stream_.doConvertBuffer[mode] = false;
\r
4196 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4197 stream_.doConvertBuffer[mode] = true;
\r
4198 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4199 stream_.doConvertBuffer[mode] = true;
\r
4200 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4201 stream_.nUserChannels[mode] > 1 )
\r
4202 stream_.doConvertBuffer[mode] = true;
\r
4204 // Allocate necessary internal buffers
\r
4205 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4206 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4207 if ( stream_.userBuffer[mode] == NULL ) {
\r
4208 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4212 if ( stream_.doConvertBuffer[mode] ) {
\r
4214 bool makeBuffer = true;
\r
4215 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4216 if ( mode == INPUT ) {
\r
4217 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4218 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4219 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4223 if ( makeBuffer ) {
\r
4224 bufferBytes *= *bufferSize;
\r
4225 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4226 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4227 if ( stream_.deviceBuffer == NULL ) {
\r
4228 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4234 // Allocate our DsHandle structures for the stream.
\r
4235 if ( stream_.apiHandle == 0 ) {
\r
4237 handle = new DsHandle;
\r
4239 catch ( std::bad_alloc& ) {
\r
4240 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4244 // Create a manual-reset event.
\r
4245 handle->condition = CreateEvent( NULL, // no security
\r
4246 TRUE, // manual-reset
\r
4247 FALSE, // non-signaled initially
\r
4248 NULL ); // unnamed
\r
4249 stream_.apiHandle = (void *) handle;
\r
4252 handle = (DsHandle *) stream_.apiHandle;
\r
4253 handle->id[mode] = ohandle;
\r
4254 handle->buffer[mode] = bhandle;
\r
4255 handle->dsBufferSize[mode] = dsBufferSize;
\r
4256 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4258 stream_.device[mode] = device;
\r
4259 stream_.state = STREAM_STOPPED;
\r
4260 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4261 // We had already set up an output stream.
\r
4262 stream_.mode = DUPLEX;
\r
4264 stream_.mode = mode;
\r
4265 stream_.nBuffers = nBuffers;
\r
4266 stream_.sampleRate = sampleRate;
\r
4268 // Setup the buffer conversion information structure.
\r
4269 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4271 // Setup the callback thread.
\r
4272 if ( stream_.callbackInfo.isRunning == false ) {
\r
4273 unsigned threadId;
\r
4274 stream_.callbackInfo.isRunning = true;
\r
4275 stream_.callbackInfo.object = (void *) this;
\r
4276 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4277 &stream_.callbackInfo, 0, &threadId );
\r
4278 if ( stream_.callbackInfo.thread == 0 ) {
\r
4279 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4283 // Boost DS thread priority
\r
4284 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4290 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4291 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4292 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4293 if ( buffer ) buffer->Release();
\r
4294 object->Release();
\r
4296 if ( handle->buffer[1] ) {
\r
4297 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4298 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4299 if ( buffer ) buffer->Release();
\r
4300 object->Release();
\r
4302 CloseHandle( handle->condition );
\r
4304 stream_.apiHandle = 0;
\r
4307 for ( int i=0; i<2; i++ ) {
\r
4308 if ( stream_.userBuffer[i] ) {
\r
4309 free( stream_.userBuffer[i] );
\r
4310 stream_.userBuffer[i] = 0;
\r
4314 if ( stream_.deviceBuffer ) {
\r
4315 free( stream_.deviceBuffer );
\r
4316 stream_.deviceBuffer = 0;
\r
4322 void RtApiDs :: closeStream()
\r
4324 if ( stream_.state == STREAM_CLOSED ) {
\r
4325 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4326 error( RtError::WARNING );
\r
4330 // Stop the callback thread.
\r
4331 stream_.callbackInfo.isRunning = false;
\r
4332 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4333 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4335 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4337 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4338 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4339 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4342 buffer->Release();
\r
4344 object->Release();
\r
4346 if ( handle->buffer[1] ) {
\r
4347 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4348 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4351 buffer->Release();
\r
4353 object->Release();
\r
4355 CloseHandle( handle->condition );
\r
4357 stream_.apiHandle = 0;
\r
4360 for ( int i=0; i<2; i++ ) {
\r
4361 if ( stream_.userBuffer[i] ) {
\r
4362 free( stream_.userBuffer[i] );
\r
4363 stream_.userBuffer[i] = 0;
\r
4367 if ( stream_.deviceBuffer ) {
\r
4368 free( stream_.deviceBuffer );
\r
4369 stream_.deviceBuffer = 0;
\r
4372 stream_.mode = UNINITIALIZED;
\r
4373 stream_.state = STREAM_CLOSED;
\r
4376 void RtApiDs :: startStream()
\r
4379 if ( stream_.state == STREAM_RUNNING ) {
\r
4380 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4381 error( RtError::WARNING );
\r
4385 //MUTEX_LOCK( &stream_.mutex );
\r
4387 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4389 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4390 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4391 // this is already in effect.
\r
4392 timeBeginPeriod( 1 );
\r
4394 buffersRolling = false;
\r
4395 duplexPrerollBytes = 0;
\r
4397 if ( stream_.mode == DUPLEX ) {
\r
4398 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4399 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4402 HRESULT result = 0;
\r
4403 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4405 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4406 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4407 if ( FAILED( result ) ) {
\r
4408 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4409 errorText_ = errorStream_.str();
\r
4414 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4416 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4417 result = buffer->Start( DSCBSTART_LOOPING );
\r
4418 if ( FAILED( result ) ) {
\r
4419 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4420 errorText_ = errorStream_.str();
\r
4425 handle->drainCounter = 0;
\r
4426 handle->internalDrain = false;
\r
4427 ResetEvent( handle->condition );
\r
4428 stream_.state = STREAM_RUNNING;
\r
4431 // MUTEX_UNLOCK( &stream_.mutex );
\r
4433 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4436 void RtApiDs :: stopStream()
\r
4439 if ( stream_.state == STREAM_STOPPED ) {
\r
4440 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4441 error( RtError::WARNING );
\r
4446 MUTEX_LOCK( &stream_.mutex );
\r
4448 if ( stream_.state == STREAM_STOPPED ) {
\r
4449 MUTEX_UNLOCK( &stream_.mutex );
\r
4454 HRESULT result = 0;
\r
4457 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4458 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4459 if ( handle->drainCounter == 0 ) {
\r
4460 handle->drainCounter = 2;
\r
4461 // MUTEX_UNLOCK( &stream_.mutex );
\r
4462 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4463 //ResetEvent( handle->condition );
\r
4464 // MUTEX_LOCK( &stream_.mutex );
\r
4467 stream_.state = STREAM_STOPPED;
\r
4469 // Stop the buffer and clear memory
\r
4470 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4471 result = buffer->Stop();
\r
4472 if ( FAILED( result ) ) {
\r
4473 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4474 errorText_ = errorStream_.str();
\r
4478 // Lock the buffer and clear it so that if we start to play again,
\r
4479 // we won't have old data playing.
\r
4480 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4481 if ( FAILED( result ) ) {
\r
4482 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4483 errorText_ = errorStream_.str();
\r
4487 // Zero the DS buffer
\r
4488 ZeroMemory( audioPtr, dataLen );
\r
4490 // Unlock the DS buffer
\r
4491 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4492 if ( FAILED( result ) ) {
\r
4493 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4494 errorText_ = errorStream_.str();
\r
4498 // If we start playing again, we must begin at beginning of buffer.
\r
4499 handle->bufferPointer[0] = 0;
\r
4502 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4503 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4507 stream_.state = STREAM_STOPPED;
\r
4509 result = buffer->Stop();
\r
4510 if ( FAILED( result ) ) {
\r
4511 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4512 errorText_ = errorStream_.str();
\r
4516 // Lock the buffer and clear it so that if we start to play again,
\r
4517 // we won't have old data playing.
\r
4518 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4519 if ( FAILED( result ) ) {
\r
4520 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4521 errorText_ = errorStream_.str();
\r
4525 // Zero the DS buffer
\r
4526 ZeroMemory( audioPtr, dataLen );
\r
4528 // Unlock the DS buffer
\r
4529 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4530 if ( FAILED( result ) ) {
\r
4531 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4532 errorText_ = errorStream_.str();
\r
4536 // If we start recording again, we must begin at beginning of buffer.
\r
4537 handle->bufferPointer[1] = 0;
\r
4541 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4542 // MUTEX_UNLOCK( &stream_.mutex );
\r
4544 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4547 void RtApiDs :: abortStream()
\r
4550 if ( stream_.state == STREAM_STOPPED ) {
\r
4551 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4552 error( RtError::WARNING );
\r
4556 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4557 handle->drainCounter = 2;
\r
4562 void RtApiDs :: callbackEvent()
\r
4564 if ( stream_.state == STREAM_STOPPED ) {
\r
4565 Sleep( 50 ); // sleep 50 milliseconds
\r
4569 if ( stream_.state == STREAM_CLOSED ) {
\r
4570 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4571 error( RtError::WARNING );
\r
4575 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4576 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4578 // Check if we were draining the stream and signal is finished.
\r
4579 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4580 if ( handle->internalDrain == false )
\r
4581 SetEvent( handle->condition );
\r
4588 MUTEX_LOCK( &stream_.mutex );
\r
4590 // The state might change while waiting on a mutex.
\r
4591 if ( stream_.state == STREAM_STOPPED ) {
\r
4592 MUTEX_UNLOCK( &stream_.mutex );
\r
4597 // Invoke user callback to get fresh output data UNLESS we are
\r
4598 // draining stream.
\r
4599 if ( handle->drainCounter == 0 ) {
\r
4600 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4601 double streamTime = getStreamTime();
\r
4602 RtAudioStreamStatus status = 0;
\r
4603 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4604 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4605 handle->xrun[0] = false;
\r
4607 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4608 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4609 handle->xrun[1] = false;
\r
4611 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4612 stream_.bufferSize, streamTime, status, info->userData );
\r
4613 if ( handle->drainCounter == 2 ) {
\r
4614 // MUTEX_UNLOCK( &stream_.mutex );
\r
4618 else if ( handle->drainCounter == 1 )
\r
4619 handle->internalDrain = true;
\r
4623 DWORD currentWritePointer, safeWritePointer;
\r
4624 DWORD currentReadPointer, safeReadPointer;
\r
4625 UINT nextWritePointer;
\r
4627 LPVOID buffer1 = NULL;
\r
4628 LPVOID buffer2 = NULL;
\r
4629 DWORD bufferSize1 = 0;
\r
4630 DWORD bufferSize2 = 0;
\r
4635 if ( buffersRolling == false ) {
\r
4636 if ( stream_.mode == DUPLEX ) {
\r
4637 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4639 // It takes a while for the devices to get rolling. As a result,
\r
4640 // there's no guarantee that the capture and write device pointers
\r
4641 // will move in lockstep. Wait here for both devices to start
\r
4642 // rolling, and then set our buffer pointers accordingly.
\r
4643 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4644 // bytes later than the write buffer.
\r
4646 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4647 // take place between the two GetCurrentPosition calls... but I'm
\r
4648 // really not sure how to solve the problem. Temporarily boost to
\r
4649 // Realtime priority, maybe; but I'm not sure what priority the
\r
4650 // DirectSound service threads run at. We *should* be roughly
\r
4651 // within a ms or so of correct.
\r
4653 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4654 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4656 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4658 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4659 if ( FAILED( result ) ) {
\r
4660 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4661 errorText_ = errorStream_.str();
\r
4662 error( RtError::SYSTEM_ERROR );
\r
4664 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4665 if ( FAILED( result ) ) {
\r
4666 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4667 errorText_ = errorStream_.str();
\r
4668 error( RtError::SYSTEM_ERROR );
\r
4671 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4672 if ( FAILED( result ) ) {
\r
4673 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4674 errorText_ = errorStream_.str();
\r
4675 error( RtError::SYSTEM_ERROR );
\r
4677 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4678 if ( FAILED( result ) ) {
\r
4679 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4680 errorText_ = errorStream_.str();
\r
4681 error( RtError::SYSTEM_ERROR );
\r
4683 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4687 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4689 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4690 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4691 handle->bufferPointer[1] = safeReadPointer;
\r
4693 else if ( stream_.mode == OUTPUT ) {
\r
4695 // Set the proper nextWritePosition after initial startup.
\r
4696 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4697 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4698 if ( FAILED( result ) ) {
\r
4699 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4700 errorText_ = errorStream_.str();
\r
4701 error( RtError::SYSTEM_ERROR );
\r
4703 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4704 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4707 buffersRolling = true;
\r
4710 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4712 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4714 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4715 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4716 bufferBytes *= formatBytes( stream_.userFormat );
\r
4717 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4720 // Setup parameters and do buffer conversion if necessary.
\r
4721 if ( stream_.doConvertBuffer[0] ) {
\r
4722 buffer = stream_.deviceBuffer;
\r
4723 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4724 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4725 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4728 buffer = stream_.userBuffer[0];
\r
4729 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4730 bufferBytes *= formatBytes( stream_.userFormat );
\r
4733 // No byte swapping necessary in DirectSound implementation.
\r
4735 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4736 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4738 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4739 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4741 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4742 nextWritePointer = handle->bufferPointer[0];
\r
4744 DWORD endWrite, leadPointer;
\r
4746 // Find out where the read and "safe write" pointers are.
\r
4747 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4748 if ( FAILED( result ) ) {
\r
4749 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4750 errorText_ = errorStream_.str();
\r
4751 error( RtError::SYSTEM_ERROR );
\r
4754 // We will copy our output buffer into the region between
\r
4755 // safeWritePointer and leadPointer. If leadPointer is not
\r
4756 // beyond the next endWrite position, wait until it is.
\r
4757 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4758 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4759 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4760 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4761 endWrite = nextWritePointer + bufferBytes;
\r
4763 // Check whether the entire write region is behind the play pointer.
\r
4764 if ( leadPointer >= endWrite ) break;
\r
4766 // If we are here, then we must wait until the leadPointer advances
\r
4767 // beyond the end of our next write region. We use the
\r
4768 // Sleep() function to suspend operation until that happens.
\r
4769 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4770 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4771 if ( millis < 1.0 ) millis = 1.0;
\r
4772 Sleep( (DWORD) millis );
\r
4775 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4776 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4777 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4778 handle->xrun[0] = true;
\r
4779 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4780 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4781 handle->bufferPointer[0] = nextWritePointer;
\r
4782 endWrite = nextWritePointer + bufferBytes;
\r
4785 // Lock free space in the buffer
\r
4786 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4787 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4788 if ( FAILED( result ) ) {
\r
4789 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4790 errorText_ = errorStream_.str();
\r
4791 error( RtError::SYSTEM_ERROR );
\r
4794 // Copy our buffer into the DS buffer
\r
4795 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4796 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4798 // Update our buffer offset and unlock sound buffer
\r
4799 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4800 if ( FAILED( result ) ) {
\r
4801 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4802 errorText_ = errorStream_.str();
\r
4803 error( RtError::SYSTEM_ERROR );
\r
4805 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4806 handle->bufferPointer[0] = nextWritePointer;
\r
4808 if ( handle->drainCounter ) {
\r
4809 handle->drainCounter++;
\r
4814 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4816 // Setup parameters.
\r
4817 if ( stream_.doConvertBuffer[1] ) {
\r
4818 buffer = stream_.deviceBuffer;
\r
4819 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4820 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4823 buffer = stream_.userBuffer[1];
\r
4824 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4825 bufferBytes *= formatBytes( stream_.userFormat );
\r
4828 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4829 long nextReadPointer = handle->bufferPointer[1];
\r
4830 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4832 // Find out where the write and "safe read" pointers are.
\r
4833 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4834 if ( FAILED( result ) ) {
\r
4835 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4836 errorText_ = errorStream_.str();
\r
4837 error( RtError::SYSTEM_ERROR );
\r
4840 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4841 DWORD endRead = nextReadPointer + bufferBytes;
\r
4843 // Handling depends on whether we are INPUT or DUPLEX.
\r
4844 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4845 // then a wait here will drag the write pointers into the forbidden zone.
\r
4847 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4848 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4849 // practical way to sync up the read and write pointers reliably, given the
\r
4850 // the very complex relationship between phase and increment of the read and write
\r
4853 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4854 // provide a pre-roll period of 0.5 seconds in which we return
\r
4855 // zeros from the read buffer while the pointers sync up.
\r
4857 if ( stream_.mode == DUPLEX ) {
\r
4858 if ( safeReadPointer < endRead ) {
\r
4859 if ( duplexPrerollBytes <= 0 ) {
\r
4860 // Pre-roll time over. Be more agressive.
\r
4861 int adjustment = endRead-safeReadPointer;
\r
4863 handle->xrun[1] = true;
\r
4865 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4866 // and perform fine adjustments later.
\r
4867 // - small adjustments: back off by twice as much.
\r
4868 if ( adjustment >= 2*bufferBytes )
\r
4869 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4871 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4873 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4877 // In pre=roll time. Just do it.
\r
4878 nextReadPointer = safeReadPointer - bufferBytes;
\r
4879 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4881 endRead = nextReadPointer + bufferBytes;
\r
4884 else { // mode == INPUT
\r
4885 while ( safeReadPointer < endRead ) {
\r
4886 // See comments for playback.
\r
4887 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4888 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4889 if ( millis < 1.0 ) millis = 1.0;
\r
4890 Sleep( (DWORD) millis );
\r
4892 // Wake up and find out where we are now.
\r
4893 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4894 if ( FAILED( result ) ) {
\r
4895 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4896 errorText_ = errorStream_.str();
\r
4897 error( RtError::SYSTEM_ERROR );
\r
4900 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4904 // Lock free space in the buffer
\r
4905 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4906 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4907 if ( FAILED( result ) ) {
\r
4908 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4909 errorText_ = errorStream_.str();
\r
4910 error( RtError::SYSTEM_ERROR );
\r
4913 if ( duplexPrerollBytes <= 0 ) {
\r
4914 // Copy our buffer into the DS buffer
\r
4915 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4916 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4919 memset( buffer, 0, bufferSize1 );
\r
4920 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4921 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4924 // Update our buffer offset and unlock sound buffer
\r
4925 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4926 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4927 if ( FAILED( result ) ) {
\r
4928 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4929 errorText_ = errorStream_.str();
\r
4930 error( RtError::SYSTEM_ERROR );
\r
4932 handle->bufferPointer[1] = nextReadPointer;
\r
4934 // No byte swapping necessary in DirectSound implementation.
\r
4936 // If necessary, convert 8-bit data from unsigned to signed.
\r
4937 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4938 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4940 // Do buffer conversion if necessary.
\r
4941 if ( stream_.doConvertBuffer[1] )
\r
4942 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4946 // MUTEX_UNLOCK( &stream_.mutex );
\r
4948 RtApi::tickStreamTime();
\r
4951 // Definitions for utility functions and callbacks
\r
4952 // specific to the DirectSound implementation.
\r
4954 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4956 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4957 RtApiDs *object = (RtApiDs *) info->object;
\r
4958 bool* isRunning = &info->isRunning;
\r
4960 while ( *isRunning == true ) {
\r
4961 object->callbackEvent();
\r
4964 _endthreadex( 0 );
\r
4968 #include "tchar.h"
\r
4970 std::string convertTChar( LPCTSTR name )
\r
4974 #if defined( UNICODE ) || defined( _UNICODE )
\r
4975 // Yes, this conversion doesn't make sense for two-byte characters
\r
4976 // but RtAudio is currently written to return an std::string of
\r
4977 // one-byte chars for the device name.
\r
4978 for ( unsigned int i=0; i<wcslen( name ); i++ )
\r
4979 s.push_back( name[i] );
\r
4981 s.append( std::string( name ) );
\r
4987 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4988 LPCTSTR description,
\r
4990 LPVOID lpContext )
\r
4992 bool *isInput = (bool *) lpContext;
\r
4995 bool validDevice = false;
\r
4996 if ( *isInput == true ) {
\r
4998 LPDIRECTSOUNDCAPTURE object;
\r
5000 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5001 if ( hr != DS_OK ) return TRUE;
\r
5003 caps.dwSize = sizeof(caps);
\r
5004 hr = object->GetCaps( &caps );
\r
5005 if ( hr == DS_OK ) {
\r
5006 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5007 validDevice = true;
\r
5009 object->Release();
\r
5013 LPDIRECTSOUND object;
\r
5014 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5015 if ( hr != DS_OK ) return TRUE;
\r
5017 caps.dwSize = sizeof(caps);
\r
5018 hr = object->GetCaps( &caps );
\r
5019 if ( hr == DS_OK ) {
\r
5020 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5021 validDevice = true;
\r
5023 object->Release();
\r
5026 // If good device, then save its name and guid.
\r
5027 std::string name = convertTChar( description );
\r
5028 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5029 name = "Default Device";
\r
5030 if ( validDevice ) {
\r
5031 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5032 if ( dsDevices[i].name == name ) {
\r
5033 dsDevices[i].found = true;
\r
5035 dsDevices[i].id[1] = lpguid;
\r
5036 dsDevices[i].validId[1] = true;
\r
5039 dsDevices[i].id[0] = lpguid;
\r
5040 dsDevices[i].validId[0] = true;
\r
5047 device.name = name;
\r
5048 device.found = true;
\r
5050 device.id[1] = lpguid;
\r
5051 device.validId[1] = true;
\r
5054 device.id[0] = lpguid;
\r
5055 device.validId[0] = true;
\r
5057 dsDevices.push_back( device );
\r
5063 static const char* getErrorString( int code )
\r
5067 case DSERR_ALLOCATED:
\r
5068 return "Already allocated";
\r
5070 case DSERR_CONTROLUNAVAIL:
\r
5071 return "Control unavailable";
\r
5073 case DSERR_INVALIDPARAM:
\r
5074 return "Invalid parameter";
\r
5076 case DSERR_INVALIDCALL:
\r
5077 return "Invalid call";
\r
5079 case DSERR_GENERIC:
\r
5080 return "Generic error";
\r
5082 case DSERR_PRIOLEVELNEEDED:
\r
5083 return "Priority level needed";
\r
5085 case DSERR_OUTOFMEMORY:
\r
5086 return "Out of memory";
\r
5088 case DSERR_BADFORMAT:
\r
5089 return "The sample rate or the channel format is not supported";
\r
5091 case DSERR_UNSUPPORTED:
\r
5092 return "Not supported";
\r
5094 case DSERR_NODRIVER:
\r
5095 return "No driver";
\r
5097 case DSERR_ALREADYINITIALIZED:
\r
5098 return "Already initialized";
\r
5100 case DSERR_NOAGGREGATION:
\r
5101 return "No aggregation";
\r
5103 case DSERR_BUFFERLOST:
\r
5104 return "Buffer lost";
\r
5106 case DSERR_OTHERAPPHASPRIO:
\r
5107 return "Another application already has priority";
\r
5109 case DSERR_UNINITIALIZED:
\r
5110 return "Uninitialized";
\r
5113 return "DirectSound unknown error";
\r
5116 //******************** End of __WINDOWS_DS__ *********************//
\r
5120 #if defined(__LINUX_ALSA__)
\r
5122 #include <alsa/asoundlib.h>
\r
5123 #include <unistd.h>
\r
5125 // A structure to hold various information related to the ALSA API
\r
5126 // implementation.
\r
5127 struct AlsaHandle {
\r
5128 snd_pcm_t *handles[2];
\r
5129 bool synchronized;
\r
5131 pthread_cond_t runnable;
\r
5134 :synchronized(false) { xrun[0] = false; xrun[1] = false; }
\r
5137 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5139 RtApiAlsa :: RtApiAlsa()
\r
5141 // Nothing to do here.
\r
5144 RtApiAlsa :: ~RtApiAlsa()
\r
5146 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5149 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5151 unsigned nDevices = 0;
\r
5152 int result, subdevice, card;
\r
5154 snd_ctl_t *handle;
\r
5156 // Count cards and devices
\r
5158 snd_card_next( &card );
\r
5159 while ( card >= 0 ) {
\r
5160 sprintf( name, "hw:%d", card );
\r
5161 result = snd_ctl_open( &handle, name, 0 );
\r
5162 if ( result < 0 ) {
\r
5163 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5164 errorText_ = errorStream_.str();
\r
5165 error( RtError::WARNING );
\r
5170 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5171 if ( result < 0 ) {
\r
5172 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5173 errorText_ = errorStream_.str();
\r
5174 error( RtError::WARNING );
\r
5177 if ( subdevice < 0 )
\r
5182 snd_ctl_close( handle );
\r
5183 snd_card_next( &card );
\r
5189 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5191 RtAudio::DeviceInfo info;
\r
5192 info.probed = false;
\r
5194 unsigned nDevices = 0;
\r
5195 int result, subdevice, card;
\r
5197 snd_ctl_t *chandle;
\r
5199 // Count cards and devices
\r
5201 snd_card_next( &card );
\r
5202 while ( card >= 0 ) {
\r
5203 sprintf( name, "hw:%d", card );
\r
5204 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5205 if ( result < 0 ) {
\r
5206 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5207 errorText_ = errorStream_.str();
\r
5208 error( RtError::WARNING );
\r
5213 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5214 if ( result < 0 ) {
\r
5215 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5216 errorText_ = errorStream_.str();
\r
5217 error( RtError::WARNING );
\r
5220 if ( subdevice < 0 ) break;
\r
5221 if ( nDevices == device ) {
\r
5222 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5228 snd_ctl_close( chandle );
\r
5229 snd_card_next( &card );
\r
5232 if ( nDevices == 0 ) {
\r
5233 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5234 error( RtError::INVALID_USE );
\r
5237 if ( device >= nDevices ) {
\r
5238 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5239 error( RtError::INVALID_USE );
\r
5244 // If a stream is already open, we cannot probe the stream devices.
\r
5245 // Thus, use the saved results.
\r
5246 if ( stream_.state != STREAM_CLOSED &&
\r
5247 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5248 if ( device >= devices_.size() ) {
\r
5249 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5250 error( RtError::WARNING );
\r
5253 return devices_[ device ];
\r
5256 int openMode = SND_PCM_ASYNC;
\r
5257 snd_pcm_stream_t stream;
\r
5258 snd_pcm_info_t *pcminfo;
\r
5259 snd_pcm_info_alloca( &pcminfo );
\r
5260 snd_pcm_t *phandle;
\r
5261 snd_pcm_hw_params_t *params;
\r
5262 snd_pcm_hw_params_alloca( ¶ms );
\r
5264 // First try for playback
\r
5265 stream = SND_PCM_STREAM_PLAYBACK;
\r
5266 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5267 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5268 snd_pcm_info_set_stream( pcminfo, stream );
\r
5270 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5271 if ( result < 0 ) {
\r
5272 // Device probably doesn't support playback.
\r
5273 goto captureProbe;
\r
5276 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5277 if ( result < 0 ) {
\r
5278 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5279 errorText_ = errorStream_.str();
\r
5280 error( RtError::WARNING );
\r
5281 goto captureProbe;
\r
5284 // The device is open ... fill the parameter structure.
\r
5285 result = snd_pcm_hw_params_any( phandle, params );
\r
5286 if ( result < 0 ) {
\r
5287 snd_pcm_close( phandle );
\r
5288 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5289 errorText_ = errorStream_.str();
\r
5290 error( RtError::WARNING );
\r
5291 goto captureProbe;
\r
5294 // Get output channel information.
\r
5295 unsigned int value;
\r
5296 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5297 if ( result < 0 ) {
\r
5298 snd_pcm_close( phandle );
\r
5299 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5300 errorText_ = errorStream_.str();
\r
5301 error( RtError::WARNING );
\r
5302 goto captureProbe;
\r
5304 info.outputChannels = value;
\r
5305 snd_pcm_close( phandle );
\r
5308 // Now try for capture
\r
5309 stream = SND_PCM_STREAM_CAPTURE;
\r
5310 snd_pcm_info_set_stream( pcminfo, stream );
\r
5312 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5313 snd_ctl_close( chandle );
\r
5314 if ( result < 0 ) {
\r
5315 // Device probably doesn't support capture.
\r
5316 if ( info.outputChannels == 0 ) return info;
\r
5317 goto probeParameters;
\r
5320 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5321 if ( result < 0 ) {
\r
5322 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5323 errorText_ = errorStream_.str();
\r
5324 error( RtError::WARNING );
\r
5325 if ( info.outputChannels == 0 ) return info;
\r
5326 goto probeParameters;
\r
5329 // The device is open ... fill the parameter structure.
\r
5330 result = snd_pcm_hw_params_any( phandle, params );
\r
5331 if ( result < 0 ) {
\r
5332 snd_pcm_close( phandle );
\r
5333 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5334 errorText_ = errorStream_.str();
\r
5335 error( RtError::WARNING );
\r
5336 if ( info.outputChannels == 0 ) return info;
\r
5337 goto probeParameters;
\r
5340 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5341 if ( result < 0 ) {
\r
5342 snd_pcm_close( phandle );
\r
5343 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5344 errorText_ = errorStream_.str();
\r
5345 error( RtError::WARNING );
\r
5346 if ( info.outputChannels == 0 ) return info;
\r
5347 goto probeParameters;
\r
5349 info.inputChannels = value;
\r
5350 snd_pcm_close( phandle );
\r
5352 // If device opens for both playback and capture, we determine the channels.
\r
5353 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5354 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5356 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5357 if ( device == 0 && info.outputChannels > 0 )
\r
5358 info.isDefaultOutput = true;
\r
5359 if ( device == 0 && info.inputChannels > 0 )
\r
5360 info.isDefaultInput = true;
\r
5363 // At this point, we just need to figure out the supported data
\r
5364 // formats and sample rates. We'll proceed by opening the device in
\r
5365 // the direction with the maximum number of channels, or playback if
\r
5366 // they are equal. This might limit our sample rate options, but so
\r
5369 if ( info.outputChannels >= info.inputChannels )
\r
5370 stream = SND_PCM_STREAM_PLAYBACK;
\r
5372 stream = SND_PCM_STREAM_CAPTURE;
\r
5373 snd_pcm_info_set_stream( pcminfo, stream );
\r
5375 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5376 if ( result < 0 ) {
\r
5377 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5378 errorText_ = errorStream_.str();
\r
5379 error( RtError::WARNING );
\r
5383 // The device is open ... fill the parameter structure.
\r
5384 result = snd_pcm_hw_params_any( phandle, params );
\r
5385 if ( result < 0 ) {
\r
5386 snd_pcm_close( phandle );
\r
5387 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5388 errorText_ = errorStream_.str();
\r
5389 error( RtError::WARNING );
\r
5393 // Test our discrete set of sample rate values.
\r
5394 info.sampleRates.clear();
\r
5395 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5396 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5397 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5399 if ( info.sampleRates.size() == 0 ) {
\r
5400 snd_pcm_close( phandle );
\r
5401 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5402 errorText_ = errorStream_.str();
\r
5403 error( RtError::WARNING );
\r
5407 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5408 snd_pcm_format_t format;
\r
5409 info.nativeFormats = 0;
\r
5410 format = SND_PCM_FORMAT_S8;
\r
5411 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5412 info.nativeFormats |= RTAUDIO_SINT8;
\r
5413 format = SND_PCM_FORMAT_S16;
\r
5414 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5415 info.nativeFormats |= RTAUDIO_SINT16;
\r
5416 format = SND_PCM_FORMAT_S24;
\r
5417 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5418 info.nativeFormats |= RTAUDIO_SINT24;
\r
5419 format = SND_PCM_FORMAT_S32;
\r
5420 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5421 info.nativeFormats |= RTAUDIO_SINT32;
\r
5422 format = SND_PCM_FORMAT_FLOAT;
\r
5423 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5424 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5425 format = SND_PCM_FORMAT_FLOAT64;
\r
5426 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5427 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5429 // Check that we have at least one supported format
\r
5430 if ( info.nativeFormats == 0 ) {
\r
5431 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5432 errorText_ = errorStream_.str();
\r
5433 error( RtError::WARNING );
\r
5437 // Get the device name
\r
5439 result = snd_card_get_name( card, &cardname );
\r
5440 if ( result >= 0 )
\r
5441 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5444 // That's all ... close the device and return
\r
5445 snd_pcm_close( phandle );
\r
5446 info.probed = true;
\r
5450 void RtApiAlsa :: saveDeviceInfo( void )
\r
5454 unsigned int nDevices = getDeviceCount();
\r
5455 devices_.resize( nDevices );
\r
5456 for ( unsigned int i=0; i<nDevices; i++ )
\r
5457 devices_[i] = getDeviceInfo( i );
\r
5460 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5461 unsigned int firstChannel, unsigned int sampleRate,
\r
5462 RtAudioFormat format, unsigned int *bufferSize,
\r
5463 RtAudio::StreamOptions *options )
\r
5466 #if defined(__RTAUDIO_DEBUG__)
\r
5467 snd_output_t *out;
\r
5468 snd_output_stdio_attach(&out, stderr, 0);
\r
5471 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5473 unsigned nDevices = 0;
\r
5474 int result, subdevice, card;
\r
5476 snd_ctl_t *chandle;
\r
5478 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5479 snprintf(name, sizeof(name), "%s", "default");
\r
5481 // Count cards and devices
\r
5483 snd_card_next( &card );
\r
5484 while ( card >= 0 ) {
\r
5485 sprintf( name, "hw:%d", card );
\r
5486 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5487 if ( result < 0 ) {
\r
5488 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5489 errorText_ = errorStream_.str();
\r
5494 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5495 if ( result < 0 ) break;
\r
5496 if ( subdevice < 0 ) break;
\r
5497 if ( nDevices == device ) {
\r
5498 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5499 snd_ctl_close( chandle );
\r
5504 snd_ctl_close( chandle );
\r
5505 snd_card_next( &card );
\r
5508 if ( nDevices == 0 ) {
\r
5509 // This should not happen because a check is made before this function is called.
\r
5510 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5514 if ( device >= nDevices ) {
\r
5515 // This should not happen because a check is made before this function is called.
\r
5516 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5523 // The getDeviceInfo() function will not work for a device that is
\r
5524 // already open. Thus, we'll probe the system before opening a
\r
5525 // stream and save the results for use by getDeviceInfo().
\r
5526 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5527 this->saveDeviceInfo();
\r
5529 snd_pcm_stream_t stream;
\r
5530 if ( mode == OUTPUT )
\r
5531 stream = SND_PCM_STREAM_PLAYBACK;
\r
5533 stream = SND_PCM_STREAM_CAPTURE;
\r
5535 snd_pcm_t *phandle;
\r
5536 int openMode = SND_PCM_ASYNC;
\r
5537 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5538 if ( result < 0 ) {
\r
5539 if ( mode == OUTPUT )
\r
5540 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5542 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5543 errorText_ = errorStream_.str();
\r
5547 // Fill the parameter structure.
\r
5548 snd_pcm_hw_params_t *hw_params;
\r
5549 snd_pcm_hw_params_alloca( &hw_params );
\r
5550 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5551 if ( result < 0 ) {
\r
5552 snd_pcm_close( phandle );
\r
5553 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5554 errorText_ = errorStream_.str();
\r
5558 #if defined(__RTAUDIO_DEBUG__)
\r
5559 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5560 snd_pcm_hw_params_dump( hw_params, out );
\r
5563 // Set access ... check user preference.
\r
5564 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5565 stream_.userInterleaved = false;
\r
5566 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5567 if ( result < 0 ) {
\r
5568 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5569 stream_.deviceInterleaved[mode] = true;
\r
5572 stream_.deviceInterleaved[mode] = false;
\r
5575 stream_.userInterleaved = true;
\r
5576 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5577 if ( result < 0 ) {
\r
5578 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5579 stream_.deviceInterleaved[mode] = false;
\r
5582 stream_.deviceInterleaved[mode] = true;
\r
5585 if ( result < 0 ) {
\r
5586 snd_pcm_close( phandle );
\r
5587 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5588 errorText_ = errorStream_.str();
\r
5592 // Determine how to set the device format.
\r
5593 stream_.userFormat = format;
\r
5594 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5596 if ( format == RTAUDIO_SINT8 )
\r
5597 deviceFormat = SND_PCM_FORMAT_S8;
\r
5598 else if ( format == RTAUDIO_SINT16 )
\r
5599 deviceFormat = SND_PCM_FORMAT_S16;
\r
5600 else if ( format == RTAUDIO_SINT24 )
\r
5601 deviceFormat = SND_PCM_FORMAT_S24;
\r
5602 else if ( format == RTAUDIO_SINT32 )
\r
5603 deviceFormat = SND_PCM_FORMAT_S32;
\r
5604 else if ( format == RTAUDIO_FLOAT32 )
\r
5605 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5606 else if ( format == RTAUDIO_FLOAT64 )
\r
5607 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5609 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5610 stream_.deviceFormat[mode] = format;
\r
5614 // The user requested format is not natively supported by the device.
\r
5615 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5616 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5617 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5621 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5622 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5623 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5627 deviceFormat = SND_PCM_FORMAT_S32;
\r
5628 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5629 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5633 deviceFormat = SND_PCM_FORMAT_S24;
\r
5634 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5635 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5639 deviceFormat = SND_PCM_FORMAT_S16;
\r
5640 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5641 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5645 deviceFormat = SND_PCM_FORMAT_S8;
\r
5646 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5647 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5651 // If we get here, no supported format was found.
\r
5652 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5653 errorText_ = errorStream_.str();
\r
5657 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5658 if ( result < 0 ) {
\r
5659 snd_pcm_close( phandle );
\r
5660 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5661 errorText_ = errorStream_.str();
\r
5665 // Determine whether byte-swaping is necessary.
\r
5666 stream_.doByteSwap[mode] = false;
\r
5667 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5668 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5669 if ( result == 0 )
\r
5670 stream_.doByteSwap[mode] = true;
\r
5671 else if (result < 0) {
\r
5672 snd_pcm_close( phandle );
\r
5673 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5674 errorText_ = errorStream_.str();
\r
5679 // Set the sample rate.
\r
5680 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5681 if ( result < 0 ) {
\r
5682 snd_pcm_close( phandle );
\r
5683 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5684 errorText_ = errorStream_.str();
\r
5688 // Determine the number of channels for this device. We support a possible
\r
5689 // minimum device channel number > than the value requested by the user.
\r
5690 stream_.nUserChannels[mode] = channels;
\r
5691 unsigned int value;
\r
5692 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5693 unsigned int deviceChannels = value;
\r
5694 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5695 snd_pcm_close( phandle );
\r
5696 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5697 errorText_ = errorStream_.str();
\r
5701 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5702 if ( result < 0 ) {
\r
5703 snd_pcm_close( phandle );
\r
5704 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5705 errorText_ = errorStream_.str();
\r
5708 deviceChannels = value;
\r
5709 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5710 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5712 // Set the device channels.
\r
5713 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5714 if ( result < 0 ) {
\r
5715 snd_pcm_close( phandle );
\r
5716 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5717 errorText_ = errorStream_.str();
\r
5721 // Set the buffer (or period) size.
\r
5723 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5724 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5725 if ( result < 0 ) {
\r
5726 snd_pcm_close( phandle );
\r
5727 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5728 errorText_ = errorStream_.str();
\r
5731 *bufferSize = periodSize;
\r
5733 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5734 unsigned int periods = 0;
\r
5735 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5736 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5737 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5738 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5739 if ( result < 0 ) {
\r
5740 snd_pcm_close( phandle );
\r
5741 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5742 errorText_ = errorStream_.str();
\r
5746 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5747 // MUST be the same in both directions!
\r
5748 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5749 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5750 errorText_ = errorStream_.str();
\r
5754 stream_.bufferSize = *bufferSize;
\r
5756 // Install the hardware configuration
\r
5757 result = snd_pcm_hw_params( phandle, hw_params );
\r
5758 if ( result < 0 ) {
\r
5759 snd_pcm_close( phandle );
\r
5760 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5761 errorText_ = errorStream_.str();
\r
5765 #if defined(__RTAUDIO_DEBUG__)
\r
5766 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5767 snd_pcm_hw_params_dump( hw_params, out );
\r
5770 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5771 snd_pcm_sw_params_t *sw_params = NULL;
\r
5772 snd_pcm_sw_params_alloca( &sw_params );
\r
5773 snd_pcm_sw_params_current( phandle, sw_params );
\r
5774 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5775 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5776 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5778 // The following two settings were suggested by Theo Veenker
\r
5779 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5780 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5782 // here are two options for a fix
\r
5783 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5784 snd_pcm_uframes_t val;
\r
5785 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5786 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5788 result = snd_pcm_sw_params( phandle, sw_params );
\r
5789 if ( result < 0 ) {
\r
5790 snd_pcm_close( phandle );
\r
5791 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5792 errorText_ = errorStream_.str();
\r
5796 #if defined(__RTAUDIO_DEBUG__)
\r
5797 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5798 snd_pcm_sw_params_dump( sw_params, out );
\r
5801 // Set flags for buffer conversion
\r
5802 stream_.doConvertBuffer[mode] = false;
\r
5803 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5804 stream_.doConvertBuffer[mode] = true;
\r
5805 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5806 stream_.doConvertBuffer[mode] = true;
\r
5807 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5808 stream_.nUserChannels[mode] > 1 )
\r
5809 stream_.doConvertBuffer[mode] = true;
\r
5811 // Allocate the ApiHandle if necessary and then save.
\r
5812 AlsaHandle *apiInfo = 0;
\r
5813 if ( stream_.apiHandle == 0 ) {
\r
5815 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5817 catch ( std::bad_alloc& ) {
\r
5818 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5822 if ( pthread_cond_init( &apiInfo->runnable, NULL ) ) {
\r
5823 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5827 stream_.apiHandle = (void *) apiInfo;
\r
5828 apiInfo->handles[0] = 0;
\r
5829 apiInfo->handles[1] = 0;
\r
5832 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5834 apiInfo->handles[mode] = phandle;
\r
5836 // Allocate necessary internal buffers.
\r
5837 unsigned long bufferBytes;
\r
5838 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5839 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5840 if ( stream_.userBuffer[mode] == NULL ) {
\r
5841 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5845 if ( stream_.doConvertBuffer[mode] ) {
\r
5847 bool makeBuffer = true;
\r
5848 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5849 if ( mode == INPUT ) {
\r
5850 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5851 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5852 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5856 if ( makeBuffer ) {
\r
5857 bufferBytes *= *bufferSize;
\r
5858 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5859 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5860 if ( stream_.deviceBuffer == NULL ) {
\r
5861 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5867 stream_.sampleRate = sampleRate;
\r
5868 stream_.nBuffers = periods;
\r
5869 stream_.device[mode] = device;
\r
5870 stream_.state = STREAM_STOPPED;
\r
5872 // Setup the buffer conversion information structure.
\r
5873 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5875 // Setup thread if necessary.
\r
5876 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5877 // We had already set up an output stream.
\r
5878 stream_.mode = DUPLEX;
\r
5879 // Link the streams if possible.
\r
5880 apiInfo->synchronized = false;
\r
5881 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5882 apiInfo->synchronized = true;
\r
5884 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5885 error( RtError::WARNING );
\r
5889 stream_.mode = mode;
\r
5891 // Setup callback thread.
\r
5892 stream_.callbackInfo.object = (void *) this;
\r
5894 // Set the thread attributes for joinable and realtime scheduling
\r
5895 // priority (optional). The higher priority will only take affect
\r
5896 // if the program is run as root or suid. Note, under Linux
\r
5897 // processes with CAP_SYS_NICE privilege, a user can change
\r
5898 // scheduling policy and priority (thus need not be root). See
\r
5899 // POSIX "capabilities".
\r
5900 pthread_attr_t attr;
\r
5901 pthread_attr_init( &attr );
\r
5902 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5903 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5904 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5905 struct sched_param param;
\r
5906 int priority = options->priority;
\r
5907 int min = sched_get_priority_min( SCHED_RR );
\r
5908 int max = sched_get_priority_max( SCHED_RR );
\r
5909 if ( priority < min ) priority = min;
\r
5910 else if ( priority > max ) priority = max;
\r
5911 param.sched_priority = priority;
\r
5912 pthread_attr_setschedparam( &attr, ¶m );
\r
5913 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5916 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5918 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5921 stream_.callbackInfo.isRunning = true;
\r
5922 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5923 pthread_attr_destroy( &attr );
\r
5925 stream_.callbackInfo.isRunning = false;
\r
5926 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5935 pthread_cond_destroy( &apiInfo->runnable );
\r
5936 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5937 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5939 stream_.apiHandle = 0;
\r
5942 for ( int i=0; i<2; i++ ) {
\r
5943 if ( stream_.userBuffer[i] ) {
\r
5944 free( stream_.userBuffer[i] );
\r
5945 stream_.userBuffer[i] = 0;
\r
5949 if ( stream_.deviceBuffer ) {
\r
5950 free( stream_.deviceBuffer );
\r
5951 stream_.deviceBuffer = 0;
\r
5957 void RtApiAlsa :: closeStream()
\r
5959 if ( stream_.state == STREAM_CLOSED ) {
\r
5960 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5961 error( RtError::WARNING );
\r
5965 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5966 stream_.callbackInfo.isRunning = false;
\r
5967 MUTEX_LOCK( &stream_.mutex );
\r
5968 if ( stream_.state == STREAM_STOPPED )
\r
5969 pthread_cond_signal( &apiInfo->runnable );
\r
5970 MUTEX_UNLOCK( &stream_.mutex );
\r
5971 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5973 if ( stream_.state == STREAM_RUNNING ) {
\r
5974 stream_.state = STREAM_STOPPED;
\r
5975 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
5976 snd_pcm_drop( apiInfo->handles[0] );
\r
5977 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
5978 snd_pcm_drop( apiInfo->handles[1] );
\r
5982 pthread_cond_destroy( &apiInfo->runnable );
\r
5983 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5984 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5986 stream_.apiHandle = 0;
\r
5989 for ( int i=0; i<2; i++ ) {
\r
5990 if ( stream_.userBuffer[i] ) {
\r
5991 free( stream_.userBuffer[i] );
\r
5992 stream_.userBuffer[i] = 0;
\r
5996 if ( stream_.deviceBuffer ) {
\r
5997 free( stream_.deviceBuffer );
\r
5998 stream_.deviceBuffer = 0;
\r
6001 stream_.mode = UNINITIALIZED;
\r
6002 stream_.state = STREAM_CLOSED;
\r
6005 void RtApiAlsa :: startStream()
\r
6007 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6010 if ( stream_.state == STREAM_RUNNING ) {
\r
6011 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6012 error( RtError::WARNING );
\r
6016 MUTEX_LOCK( &stream_.mutex );
\r
6019 snd_pcm_state_t state;
\r
6020 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6021 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6022 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6023 state = snd_pcm_state( handle[0] );
\r
6024 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6025 result = snd_pcm_prepare( handle[0] );
\r
6026 if ( result < 0 ) {
\r
6027 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6028 errorText_ = errorStream_.str();
\r
6034 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6035 state = snd_pcm_state( handle[1] );
\r
6036 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6037 result = snd_pcm_prepare( handle[1] );
\r
6038 if ( result < 0 ) {
\r
6039 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6040 errorText_ = errorStream_.str();
\r
6046 stream_.state = STREAM_RUNNING;
\r
6049 MUTEX_UNLOCK( &stream_.mutex );
\r
6051 pthread_cond_signal( &apiInfo->runnable );
\r
6053 if ( result >= 0 ) return;
\r
6054 error( RtError::SYSTEM_ERROR );
\r
6057 void RtApiAlsa :: stopStream()
\r
6060 if ( stream_.state == STREAM_STOPPED ) {
\r
6061 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6062 error( RtError::WARNING );
\r
6066 stream_.state = STREAM_STOPPED;
\r
6067 MUTEX_LOCK( &stream_.mutex );
\r
6069 //if ( stream_.state == STREAM_STOPPED ) {
\r
6070 // MUTEX_UNLOCK( &stream_.mutex );
\r
6075 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6076 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6077 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6078 if ( apiInfo->synchronized )
\r
6079 result = snd_pcm_drop( handle[0] );
\r
6081 result = snd_pcm_drain( handle[0] );
\r
6082 if ( result < 0 ) {
\r
6083 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6084 errorText_ = errorStream_.str();
\r
6089 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6090 result = snd_pcm_drop( handle[1] );
\r
6091 if ( result < 0 ) {
\r
6092 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6093 errorText_ = errorStream_.str();
\r
6099 stream_.state = STREAM_STOPPED;
\r
6100 MUTEX_UNLOCK( &stream_.mutex );
\r
6102 if ( result >= 0 ) return;
\r
6103 error( RtError::SYSTEM_ERROR );
\r
6106 void RtApiAlsa :: abortStream()
\r
6109 if ( stream_.state == STREAM_STOPPED ) {
\r
6110 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6111 error( RtError::WARNING );
\r
6115 stream_.state = STREAM_STOPPED;
\r
6116 MUTEX_LOCK( &stream_.mutex );
\r
6118 //if ( stream_.state == STREAM_STOPPED ) {
\r
6119 // MUTEX_UNLOCK( &stream_.mutex );
\r
6124 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6125 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6126 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6127 result = snd_pcm_drop( handle[0] );
\r
6128 if ( result < 0 ) {
\r
6129 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6130 errorText_ = errorStream_.str();
\r
6135 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6136 result = snd_pcm_drop( handle[1] );
\r
6137 if ( result < 0 ) {
\r
6138 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6139 errorText_ = errorStream_.str();
\r
6145 stream_.state = STREAM_STOPPED;
\r
6146 MUTEX_UNLOCK( &stream_.mutex );
\r
6148 if ( result >= 0 ) return;
\r
6149 error( RtError::SYSTEM_ERROR );
\r
6152 void RtApiAlsa :: callbackEvent()
\r
6154 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6155 if ( stream_.state == STREAM_STOPPED ) {
\r
6156 MUTEX_LOCK( &stream_.mutex );
\r
6157 pthread_cond_wait( &apiInfo->runnable, &stream_.mutex );
\r
6158 if ( stream_.state != STREAM_RUNNING ) {
\r
6159 MUTEX_UNLOCK( &stream_.mutex );
\r
6162 MUTEX_UNLOCK( &stream_.mutex );
\r
6165 if ( stream_.state == STREAM_CLOSED ) {
\r
6166 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6167 error( RtError::WARNING );
\r
6171 int doStopStream = 0;
\r
6172 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6173 double streamTime = getStreamTime();
\r
6174 RtAudioStreamStatus status = 0;
\r
6175 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6176 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6177 apiInfo->xrun[0] = false;
\r
6179 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6180 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6181 apiInfo->xrun[1] = false;
\r
6183 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6184 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6186 if ( doStopStream == 2 ) {
\r
6191 MUTEX_LOCK( &stream_.mutex );
\r
6193 // The state might change while waiting on a mutex.
\r
6194 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6199 snd_pcm_t **handle;
\r
6200 snd_pcm_sframes_t frames;
\r
6201 RtAudioFormat format;
\r
6202 handle = (snd_pcm_t **) apiInfo->handles;
\r
6204 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6206 // Setup parameters.
\r
6207 if ( stream_.doConvertBuffer[1] ) {
\r
6208 buffer = stream_.deviceBuffer;
\r
6209 channels = stream_.nDeviceChannels[1];
\r
6210 format = stream_.deviceFormat[1];
\r
6213 buffer = stream_.userBuffer[1];
\r
6214 channels = stream_.nUserChannels[1];
\r
6215 format = stream_.userFormat;
\r
6218 // Read samples from device in interleaved/non-interleaved format.
\r
6219 if ( stream_.deviceInterleaved[1] )
\r
6220 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6222 void *bufs[channels];
\r
6223 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6224 for ( int i=0; i<channels; i++ )
\r
6225 bufs[i] = (void *) (buffer + (i * offset));
\r
6226 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6229 if ( result < (int) stream_.bufferSize ) {
\r
6230 // Either an error or overrun occured.
\r
6231 if ( result == -EPIPE ) {
\r
6232 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6233 if ( state == SND_PCM_STATE_XRUN ) {
\r
6234 apiInfo->xrun[1] = true;
\r
6235 result = snd_pcm_prepare( handle[1] );
\r
6236 if ( result < 0 ) {
\r
6237 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6238 errorText_ = errorStream_.str();
\r
6242 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6243 errorText_ = errorStream_.str();
\r
6247 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6248 errorText_ = errorStream_.str();
\r
6250 error( RtError::WARNING );
\r
6254 // Do byte swapping if necessary.
\r
6255 if ( stream_.doByteSwap[1] )
\r
6256 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6258 // Do buffer conversion if necessary.
\r
6259 if ( stream_.doConvertBuffer[1] )
\r
6260 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6262 // Check stream latency
\r
6263 result = snd_pcm_delay( handle[1], &frames );
\r
6264 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6269 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6271 // Setup parameters and do buffer conversion if necessary.
\r
6272 if ( stream_.doConvertBuffer[0] ) {
\r
6273 buffer = stream_.deviceBuffer;
\r
6274 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6275 channels = stream_.nDeviceChannels[0];
\r
6276 format = stream_.deviceFormat[0];
\r
6279 buffer = stream_.userBuffer[0];
\r
6280 channels = stream_.nUserChannels[0];
\r
6281 format = stream_.userFormat;
\r
6284 // Do byte swapping if necessary.
\r
6285 if ( stream_.doByteSwap[0] )
\r
6286 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6288 // Write samples to device in interleaved/non-interleaved format.
\r
6289 if ( stream_.deviceInterleaved[0] )
\r
6290 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6292 void *bufs[channels];
\r
6293 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6294 for ( int i=0; i<channels; i++ )
\r
6295 bufs[i] = (void *) (buffer + (i * offset));
\r
6296 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6299 if ( result < (int) stream_.bufferSize ) {
\r
6300 // Either an error or underrun occured.
\r
6301 if ( result == -EPIPE ) {
\r
6302 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6303 if ( state == SND_PCM_STATE_XRUN ) {
\r
6304 apiInfo->xrun[0] = true;
\r
6305 result = snd_pcm_prepare( handle[0] );
\r
6306 if ( result < 0 ) {
\r
6307 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6308 errorText_ = errorStream_.str();
\r
6312 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6313 errorText_ = errorStream_.str();
\r
6317 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6318 errorText_ = errorStream_.str();
\r
6320 error( RtError::WARNING );
\r
6324 // Check stream latency
\r
6325 result = snd_pcm_delay( handle[0], &frames );
\r
6326 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6330 MUTEX_UNLOCK( &stream_.mutex );
\r
6332 RtApi::tickStreamTime();
\r
6333 if ( doStopStream == 1 ) this->stopStream();
\r
6336 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6338 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6339 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6340 bool *isRunning = &info->isRunning;
\r
6342 while ( *isRunning == true ) {
\r
6343 pthread_testcancel();
\r
6344 object->callbackEvent();
\r
6347 pthread_exit( NULL );
\r
6350 //******************** End of __LINUX_ALSA__ *********************//
\r
6354 #if defined(__LINUX_OSS__)
\r
6356 #include <unistd.h>
\r
6357 #include <sys/ioctl.h>
\r
6358 #include <unistd.h>
\r
6359 #include <fcntl.h>
\r
6360 #include "soundcard.h"
\r
6361 #include <errno.h>
\r
6364 extern "C" void *ossCallbackHandler(void * ptr);
\r
6366 // A structure to hold various information related to the OSS API
\r
6367 // implementation.
\r
6368 struct OssHandle {
\r
6369 int id[2]; // device ids
\r
6372 pthread_cond_t runnable;
\r
6375 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6378 RtApiOss :: RtApiOss()
\r
6380 // Nothing to do here.
\r
6383 RtApiOss :: ~RtApiOss()
\r
6385 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6388 unsigned int RtApiOss :: getDeviceCount( void )
\r
6390 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6391 if ( mixerfd == -1 ) {
\r
6392 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6393 error( RtError::WARNING );
\r
6397 oss_sysinfo sysinfo;
\r
6398 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6400 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6401 error( RtError::WARNING );
\r
6406 return sysinfo.numaudios;
\r
6409 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6411 RtAudio::DeviceInfo info;
\r
6412 info.probed = false;
\r
6414 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6415 if ( mixerfd == -1 ) {
\r
6416 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6417 error( RtError::WARNING );
\r
6421 oss_sysinfo sysinfo;
\r
6422 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6423 if ( result == -1 ) {
\r
6425 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6426 error( RtError::WARNING );
\r
6430 unsigned nDevices = sysinfo.numaudios;
\r
6431 if ( nDevices == 0 ) {
\r
6433 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6434 error( RtError::INVALID_USE );
\r
6437 if ( device >= nDevices ) {
\r
6439 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6440 error( RtError::INVALID_USE );
\r
6443 oss_audioinfo ainfo;
\r
6444 ainfo.dev = device;
\r
6445 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6447 if ( result == -1 ) {
\r
6448 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6449 errorText_ = errorStream_.str();
\r
6450 error( RtError::WARNING );
\r
6455 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6456 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6457 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6458 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6459 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6462 // Probe data formats ... do for input
\r
6463 unsigned long mask = ainfo.iformats;
\r
6464 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6465 info.nativeFormats |= RTAUDIO_SINT16;
\r
6466 if ( mask & AFMT_S8 )
\r
6467 info.nativeFormats |= RTAUDIO_SINT8;
\r
6468 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6469 info.nativeFormats |= RTAUDIO_SINT32;
\r
6470 if ( mask & AFMT_FLOAT )
\r
6471 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6472 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6473 info.nativeFormats |= RTAUDIO_SINT24;
\r
6475 // Check that we have at least one supported format
\r
6476 if ( info.nativeFormats == 0 ) {
\r
6477 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6478 errorText_ = errorStream_.str();
\r
6479 error( RtError::WARNING );
\r
6483 // Probe the supported sample rates.
\r
6484 info.sampleRates.clear();
\r
6485 if ( ainfo.nrates ) {
\r
6486 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6487 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6488 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6489 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6496 // Check min and max rate values;
\r
6497 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6498 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6499 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6503 if ( info.sampleRates.size() == 0 ) {
\r
6504 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6505 errorText_ = errorStream_.str();
\r
6506 error( RtError::WARNING );
\r
6509 info.probed = true;
\r
6510 info.name = ainfo.name;
\r
6517 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6518 unsigned int firstChannel, unsigned int sampleRate,
\r
6519 RtAudioFormat format, unsigned int *bufferSize,
\r
6520 RtAudio::StreamOptions *options )
\r
6522 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6523 if ( mixerfd == -1 ) {
\r
6524 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6528 oss_sysinfo sysinfo;
\r
6529 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6530 if ( result == -1 ) {
\r
6532 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6536 unsigned nDevices = sysinfo.numaudios;
\r
6537 if ( nDevices == 0 ) {
\r
6538 // This should not happen because a check is made before this function is called.
\r
6540 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6544 if ( device >= nDevices ) {
\r
6545 // This should not happen because a check is made before this function is called.
\r
6547 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6551 oss_audioinfo ainfo;
\r
6552 ainfo.dev = device;
\r
6553 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6555 if ( result == -1 ) {
\r
6556 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6557 errorText_ = errorStream_.str();
\r
6561 // Check if device supports input or output
\r
6562 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6563 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6564 if ( mode == OUTPUT )
\r
6565 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6567 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6568 errorText_ = errorStream_.str();
\r
6573 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6574 if ( mode == OUTPUT )
\r
6575 flags |= O_WRONLY;
\r
6576 else { // mode == INPUT
\r
6577 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6578 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6579 close( handle->id[0] );
\r
6580 handle->id[0] = 0;
\r
6581 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6582 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6583 errorText_ = errorStream_.str();
\r
6586 // Check that the number previously set channels is the same.
\r
6587 if ( stream_.nUserChannels[0] != channels ) {
\r
6588 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6589 errorText_ = errorStream_.str();
\r
6595 flags |= O_RDONLY;
\r
6598 // Set exclusive access if specified.
\r
6599 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
6601 // Try to open the device.
\r
6603 fd = open( ainfo.devnode, flags, 0 );
\r
6605 if ( errno == EBUSY )
\r
6606 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
6608 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
6609 errorText_ = errorStream_.str();
\r
6613 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
6615 if ( flags | O_RDWR ) {
\r
6616 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
6617 if ( result == -1) {
\r
6618 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
6619 errorText_ = errorStream_.str();
\r
6625 // Check the device channel support.
\r
6626 stream_.nUserChannels[mode] = channels;
\r
6627 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
6629 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
6630 errorText_ = errorStream_.str();
\r
6634 // Set the number of channels.
\r
6635 int deviceChannels = channels + firstChannel;
\r
6636 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
6637 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
6639 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
6640 errorText_ = errorStream_.str();
\r
6643 stream_.nDeviceChannels[mode] = deviceChannels;
\r
6645 // Get the data format mask
\r
6647 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
6648 if ( result == -1 ) {
\r
6650 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
6651 errorText_ = errorStream_.str();
\r
6655 // Determine how to set the device format.
\r
6656 stream_.userFormat = format;
\r
6657 int deviceFormat = -1;
\r
6658 stream_.doByteSwap[mode] = false;
\r
6659 if ( format == RTAUDIO_SINT8 ) {
\r
6660 if ( mask & AFMT_S8 ) {
\r
6661 deviceFormat = AFMT_S8;
\r
6662 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6665 else if ( format == RTAUDIO_SINT16 ) {
\r
6666 if ( mask & AFMT_S16_NE ) {
\r
6667 deviceFormat = AFMT_S16_NE;
\r
6668 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6670 else if ( mask & AFMT_S16_OE ) {
\r
6671 deviceFormat = AFMT_S16_OE;
\r
6672 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6673 stream_.doByteSwap[mode] = true;
\r
6676 else if ( format == RTAUDIO_SINT24 ) {
\r
6677 if ( mask & AFMT_S24_NE ) {
\r
6678 deviceFormat = AFMT_S24_NE;
\r
6679 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6681 else if ( mask & AFMT_S24_OE ) {
\r
6682 deviceFormat = AFMT_S24_OE;
\r
6683 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6684 stream_.doByteSwap[mode] = true;
\r
6687 else if ( format == RTAUDIO_SINT32 ) {
\r
6688 if ( mask & AFMT_S32_NE ) {
\r
6689 deviceFormat = AFMT_S32_NE;
\r
6690 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6692 else if ( mask & AFMT_S32_OE ) {
\r
6693 deviceFormat = AFMT_S32_OE;
\r
6694 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6695 stream_.doByteSwap[mode] = true;
\r
6699 if ( deviceFormat == -1 ) {
\r
6700 // The user requested format is not natively supported by the device.
\r
6701 if ( mask & AFMT_S16_NE ) {
\r
6702 deviceFormat = AFMT_S16_NE;
\r
6703 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6705 else if ( mask & AFMT_S32_NE ) {
\r
6706 deviceFormat = AFMT_S32_NE;
\r
6707 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6709 else if ( mask & AFMT_S24_NE ) {
\r
6710 deviceFormat = AFMT_S24_NE;
\r
6711 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6713 else if ( mask & AFMT_S16_OE ) {
\r
6714 deviceFormat = AFMT_S16_OE;
\r
6715 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6716 stream_.doByteSwap[mode] = true;
\r
6718 else if ( mask & AFMT_S32_OE ) {
\r
6719 deviceFormat = AFMT_S32_OE;
\r
6720 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6721 stream_.doByteSwap[mode] = true;
\r
6723 else if ( mask & AFMT_S24_OE ) {
\r
6724 deviceFormat = AFMT_S24_OE;
\r
6725 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6726 stream_.doByteSwap[mode] = true;
\r
6728 else if ( mask & AFMT_S8) {
\r
6729 deviceFormat = AFMT_S8;
\r
6730 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6734 if ( stream_.deviceFormat[mode] == 0 ) {
\r
6735 // This really shouldn't happen ...
\r
6737 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6738 errorText_ = errorStream_.str();
\r
6742 // Set the data format.
\r
6743 int temp = deviceFormat;
\r
6744 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
6745 if ( result == -1 || deviceFormat != temp ) {
\r
6747 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
6748 errorText_ = errorStream_.str();
\r
6752 // Attempt to set the buffer size. According to OSS, the minimum
\r
6753 // number of buffers is two. The supposed minimum buffer size is 16
\r
6754 // bytes, so that will be our lower bound. The argument to this
\r
6755 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
6756 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
6757 // We'll check the actual value used near the end of the setup
\r
6759 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
6760 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
6762 if ( options ) buffers = options->numberOfBuffers;
\r
6763 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
6764 if ( buffers < 2 ) buffers = 3;
\r
6765 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
6766 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
6767 if ( result == -1 ) {
\r
6769 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
6770 errorText_ = errorStream_.str();
\r
6773 stream_.nBuffers = buffers;
\r
6775 // Save buffer size (in sample frames).
\r
6776 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
6777 stream_.bufferSize = *bufferSize;
\r
6779 // Set the sample rate.
\r
6780 int srate = sampleRate;
\r
6781 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
6782 if ( result == -1 ) {
\r
6784 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
6785 errorText_ = errorStream_.str();
\r
6789 // Verify the sample rate setup worked.
\r
6790 if ( abs( srate - sampleRate ) > 100 ) {
\r
6792 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
6793 errorText_ = errorStream_.str();
\r
6796 stream_.sampleRate = sampleRate;
\r
6798 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6799 // We're doing duplex setup here.
\r
6800 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
6801 stream_.nDeviceChannels[0] = deviceChannels;
\r
6804 // Set interleaving parameters.
\r
6805 stream_.userInterleaved = true;
\r
6806 stream_.deviceInterleaved[mode] = true;
\r
6807 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
6808 stream_.userInterleaved = false;
\r
6810 // Set flags for buffer conversion
\r
6811 stream_.doConvertBuffer[mode] = false;
\r
6812 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
6813 stream_.doConvertBuffer[mode] = true;
\r
6814 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
6815 stream_.doConvertBuffer[mode] = true;
\r
6816 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
6817 stream_.nUserChannels[mode] > 1 )
\r
6818 stream_.doConvertBuffer[mode] = true;
\r
6820 // Allocate the stream handles if necessary and then save.
\r
6821 if ( stream_.apiHandle == 0 ) {
\r
6823 handle = new OssHandle;
\r
6825 catch ( std::bad_alloc& ) {
\r
6826 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
6830 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
6831 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
6835 stream_.apiHandle = (void *) handle;
\r
6838 handle = (OssHandle *) stream_.apiHandle;
\r
6840 handle->id[mode] = fd;
\r
6842 // Allocate necessary internal buffers.
\r
6843 unsigned long bufferBytes;
\r
6844 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6845 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6846 if ( stream_.userBuffer[mode] == NULL ) {
\r
6847 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
6851 if ( stream_.doConvertBuffer[mode] ) {
\r
6853 bool makeBuffer = true;
\r
6854 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6855 if ( mode == INPUT ) {
\r
6856 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6857 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6858 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6862 if ( makeBuffer ) {
\r
6863 bufferBytes *= *bufferSize;
\r
6864 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6865 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6866 if ( stream_.deviceBuffer == NULL ) {
\r
6867 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
6873 stream_.device[mode] = device;
\r
6874 stream_.state = STREAM_STOPPED;
\r
6876 // Setup the buffer conversion information structure.
\r
6877 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6879 // Setup thread if necessary.
\r
6880 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
6881 // We had already set up an output stream.
\r
6882 stream_.mode = DUPLEX;
\r
6883 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
6886 stream_.mode = mode;
\r
6888 // Setup callback thread.
\r
6889 stream_.callbackInfo.object = (void *) this;
\r
6891 // Set the thread attributes for joinable and realtime scheduling
\r
6892 // priority. The higher priority will only take affect if the
\r
6893 // program is run as root or suid.
\r
6894 pthread_attr_t attr;
\r
6895 pthread_attr_init( &attr );
\r
6896 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
6897 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6898 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
6899 struct sched_param param;
\r
6900 int priority = options->priority;
\r
6901 int min = sched_get_priority_min( SCHED_RR );
\r
6902 int max = sched_get_priority_max( SCHED_RR );
\r
6903 if ( priority < min ) priority = min;
\r
6904 else if ( priority > max ) priority = max;
\r
6905 param.sched_priority = priority;
\r
6906 pthread_attr_setschedparam( &attr, ¶m );
\r
6907 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
6910 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6912 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6915 stream_.callbackInfo.isRunning = true;
\r
6916 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
6917 pthread_attr_destroy( &attr );
\r
6919 stream_.callbackInfo.isRunning = false;
\r
6920 errorText_ = "RtApiOss::error creating callback thread!";
\r
6929 pthread_cond_destroy( &handle->runnable );
\r
6930 if ( handle->id[0] ) close( handle->id[0] );
\r
6931 if ( handle->id[1] ) close( handle->id[1] );
\r
6933 stream_.apiHandle = 0;
\r
6936 for ( int i=0; i<2; i++ ) {
\r
6937 if ( stream_.userBuffer[i] ) {
\r
6938 free( stream_.userBuffer[i] );
\r
6939 stream_.userBuffer[i] = 0;
\r
6943 if ( stream_.deviceBuffer ) {
\r
6944 free( stream_.deviceBuffer );
\r
6945 stream_.deviceBuffer = 0;
\r
6951 void RtApiOss :: closeStream()
\r
6953 if ( stream_.state == STREAM_CLOSED ) {
\r
6954 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
6955 error( RtError::WARNING );
\r
6959 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6960 stream_.callbackInfo.isRunning = false;
\r
6961 MUTEX_LOCK( &stream_.mutex );
\r
6962 if ( stream_.state == STREAM_STOPPED )
\r
6963 pthread_cond_signal( &handle->runnable );
\r
6964 MUTEX_UNLOCK( &stream_.mutex );
\r
6965 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6967 if ( stream_.state == STREAM_RUNNING ) {
\r
6968 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6969 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
6971 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
6972 stream_.state = STREAM_STOPPED;
\r
6976 pthread_cond_destroy( &handle->runnable );
\r
6977 if ( handle->id[0] ) close( handle->id[0] );
\r
6978 if ( handle->id[1] ) close( handle->id[1] );
\r
6980 stream_.apiHandle = 0;
\r
6983 for ( int i=0; i<2; i++ ) {
\r
6984 if ( stream_.userBuffer[i] ) {
\r
6985 free( stream_.userBuffer[i] );
\r
6986 stream_.userBuffer[i] = 0;
\r
6990 if ( stream_.deviceBuffer ) {
\r
6991 free( stream_.deviceBuffer );
\r
6992 stream_.deviceBuffer = 0;
\r
6995 stream_.mode = UNINITIALIZED;
\r
6996 stream_.state = STREAM_CLOSED;
\r
6999 void RtApiOss :: startStream()
\r
7002 if ( stream_.state == STREAM_RUNNING ) {
\r
7003 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7004 error( RtError::WARNING );
\r
7008 MUTEX_LOCK( &stream_.mutex );
\r
7010 stream_.state = STREAM_RUNNING;
\r
7012 // No need to do anything else here ... OSS automatically starts
\r
7013 // when fed samples.
\r
7015 MUTEX_UNLOCK( &stream_.mutex );
\r
7017 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7018 pthread_cond_signal( &handle->runnable );
\r
7021 void RtApiOss :: stopStream()
\r
7024 if ( stream_.state == STREAM_STOPPED ) {
\r
7025 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7026 error( RtError::WARNING );
\r
7030 MUTEX_LOCK( &stream_.mutex );
\r
7032 // The state might change while waiting on a mutex.
\r
7033 if ( stream_.state == STREAM_STOPPED ) {
\r
7034 MUTEX_UNLOCK( &stream_.mutex );
\r
7039 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7040 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7042 // Flush the output with zeros a few times.
\r
7045 RtAudioFormat format;
\r
7047 if ( stream_.doConvertBuffer[0] ) {
\r
7048 buffer = stream_.deviceBuffer;
\r
7049 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7050 format = stream_.deviceFormat[0];
\r
7053 buffer = stream_.userBuffer[0];
\r
7054 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7055 format = stream_.userFormat;
\r
7058 memset( buffer, 0, samples * formatBytes(format) );
\r
7059 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7060 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7061 if ( result == -1 ) {
\r
7062 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7063 error( RtError::WARNING );
\r
7067 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7068 if ( result == -1 ) {
\r
7069 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7070 errorText_ = errorStream_.str();
\r
7073 handle->triggered = false;
\r
7076 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7077 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7078 if ( result == -1 ) {
\r
7079 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7080 errorText_ = errorStream_.str();
\r
7086 stream_.state = STREAM_STOPPED;
\r
7087 MUTEX_UNLOCK( &stream_.mutex );
\r
7089 if ( result != -1 ) return;
\r
7090 error( RtError::SYSTEM_ERROR );
\r
7093 void RtApiOss :: abortStream()
\r
7096 if ( stream_.state == STREAM_STOPPED ) {
\r
7097 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7098 error( RtError::WARNING );
\r
7102 MUTEX_LOCK( &stream_.mutex );
\r
7104 // The state might change while waiting on a mutex.
\r
7105 if ( stream_.state == STREAM_STOPPED ) {
\r
7106 MUTEX_UNLOCK( &stream_.mutex );
\r
7111 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7112 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7113 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7114 if ( result == -1 ) {
\r
7115 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7116 errorText_ = errorStream_.str();
\r
7119 handle->triggered = false;
\r
7122 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7123 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7124 if ( result == -1 ) {
\r
7125 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7126 errorText_ = errorStream_.str();
\r
7132 stream_.state = STREAM_STOPPED;
\r
7133 MUTEX_UNLOCK( &stream_.mutex );
\r
7135 if ( result != -1 ) return;
\r
7136 error( RtError::SYSTEM_ERROR );
\r
7139 void RtApiOss :: callbackEvent()
\r
7141 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7142 if ( stream_.state == STREAM_STOPPED ) {
\r
7143 MUTEX_LOCK( &stream_.mutex );
\r
7144 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7145 if ( stream_.state != STREAM_RUNNING ) {
\r
7146 MUTEX_UNLOCK( &stream_.mutex );
\r
7149 MUTEX_UNLOCK( &stream_.mutex );
\r
7152 if ( stream_.state == STREAM_CLOSED ) {
\r
7153 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7154 error( RtError::WARNING );
\r
7158 // Invoke user callback to get fresh output data.
\r
7159 int doStopStream = 0;
\r
7160 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7161 double streamTime = getStreamTime();
\r
7162 RtAudioStreamStatus status = 0;
\r
7163 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7164 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7165 handle->xrun[0] = false;
\r
7167 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7168 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7169 handle->xrun[1] = false;
\r
7171 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7172 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7173 if ( doStopStream == 2 ) {
\r
7174 this->abortStream();
\r
7178 MUTEX_LOCK( &stream_.mutex );
\r
7180 // The state might change while waiting on a mutex.
\r
7181 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7186 RtAudioFormat format;
\r
7188 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7190 // Setup parameters and do buffer conversion if necessary.
\r
7191 if ( stream_.doConvertBuffer[0] ) {
\r
7192 buffer = stream_.deviceBuffer;
\r
7193 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7194 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7195 format = stream_.deviceFormat[0];
\r
7198 buffer = stream_.userBuffer[0];
\r
7199 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7200 format = stream_.userFormat;
\r
7203 // Do byte swapping if necessary.
\r
7204 if ( stream_.doByteSwap[0] )
\r
7205 byteSwapBuffer( buffer, samples, format );
\r
7207 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7209 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7210 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7211 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7212 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7213 handle->triggered = true;
\r
7216 // Write samples to device.
\r
7217 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7219 if ( result == -1 ) {
\r
7220 // We'll assume this is an underrun, though there isn't a
\r
7221 // specific means for determining that.
\r
7222 handle->xrun[0] = true;
\r
7223 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7224 error( RtError::WARNING );
\r
7225 // Continue on to input section.
\r
7229 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7231 // Setup parameters.
\r
7232 if ( stream_.doConvertBuffer[1] ) {
\r
7233 buffer = stream_.deviceBuffer;
\r
7234 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7235 format = stream_.deviceFormat[1];
\r
7238 buffer = stream_.userBuffer[1];
\r
7239 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7240 format = stream_.userFormat;
\r
7243 // Read samples from device.
\r
7244 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7246 if ( result == -1 ) {
\r
7247 // We'll assume this is an overrun, though there isn't a
\r
7248 // specific means for determining that.
\r
7249 handle->xrun[1] = true;
\r
7250 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7251 error( RtError::WARNING );
\r
7255 // Do byte swapping if necessary.
\r
7256 if ( stream_.doByteSwap[1] )
\r
7257 byteSwapBuffer( buffer, samples, format );
\r
7259 // Do buffer conversion if necessary.
\r
7260 if ( stream_.doConvertBuffer[1] )
\r
7261 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7265 MUTEX_UNLOCK( &stream_.mutex );
\r
7267 RtApi::tickStreamTime();
\r
7268 if ( doStopStream == 1 ) this->stopStream();
\r
7271 extern "C" void *ossCallbackHandler( void *ptr )
\r
7273 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7274 RtApiOss *object = (RtApiOss *) info->object;
\r
7275 bool *isRunning = &info->isRunning;
\r
7277 while ( *isRunning == true ) {
\r
7278 pthread_testcancel();
\r
7279 object->callbackEvent();
\r
7282 pthread_exit( NULL );
\r
7285 //******************** End of __LINUX_OSS__ *********************//
\r
7289 // *************************************************** //
\r
7291 // Protected common (OS-independent) RtAudio methods.
\r
7293 // *************************************************** //
\r
7295 // This method can be modified to control the behavior of error
\r
7296 // message printing.
\r
7297 void RtApi :: error( RtError::Type type )
\r
7299 errorStream_.str(""); // clear the ostringstream
\r
7300 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7301 std::cerr << '\n' << errorText_ << "\n\n";
\r
7302 else if ( type != RtError::WARNING )
\r
7303 throw( RtError( errorText_, type ) );
\r
7306 void RtApi :: verifyStream()
\r
7308 if ( stream_.state == STREAM_CLOSED ) {
\r
7309 errorText_ = "RtApi:: a stream is not open!";
\r
7310 error( RtError::INVALID_USE );
\r
7314 void RtApi :: clearStreamInfo()
\r
7316 stream_.mode = UNINITIALIZED;
\r
7317 stream_.state = STREAM_CLOSED;
\r
7318 stream_.sampleRate = 0;
\r
7319 stream_.bufferSize = 0;
\r
7320 stream_.nBuffers = 0;
\r
7321 stream_.userFormat = 0;
\r
7322 stream_.userInterleaved = true;
\r
7323 stream_.streamTime = 0.0;
\r
7324 stream_.apiHandle = 0;
\r
7325 stream_.deviceBuffer = 0;
\r
7326 stream_.callbackInfo.callback = 0;
\r
7327 stream_.callbackInfo.userData = 0;
\r
7328 stream_.callbackInfo.isRunning = false;
\r
7329 for ( int i=0; i<2; i++ ) {
\r
7330 stream_.device[i] = 11111;
\r
7331 stream_.doConvertBuffer[i] = false;
\r
7332 stream_.deviceInterleaved[i] = true;
\r
7333 stream_.doByteSwap[i] = false;
\r
7334 stream_.nUserChannels[i] = 0;
\r
7335 stream_.nDeviceChannels[i] = 0;
\r
7336 stream_.channelOffset[i] = 0;
\r
7337 stream_.deviceFormat[i] = 0;
\r
7338 stream_.latency[i] = 0;
\r
7339 stream_.userBuffer[i] = 0;
\r
7340 stream_.convertInfo[i].channels = 0;
\r
7341 stream_.convertInfo[i].inJump = 0;
\r
7342 stream_.convertInfo[i].outJump = 0;
\r
7343 stream_.convertInfo[i].inFormat = 0;
\r
7344 stream_.convertInfo[i].outFormat = 0;
\r
7345 stream_.convertInfo[i].inOffset.clear();
\r
7346 stream_.convertInfo[i].outOffset.clear();
\r
7350 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7352 if ( format == RTAUDIO_SINT16 )
\r
7354 else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||
\r
7355 format == RTAUDIO_FLOAT32 )
\r
7357 else if ( format == RTAUDIO_FLOAT64 )
\r
7359 else if ( format == RTAUDIO_SINT8 )
\r
7362 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7363 error( RtError::WARNING );
\r
7368 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7370 if ( mode == INPUT ) { // convert device to user buffer
\r
7371 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7372 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7373 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7374 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7376 else { // convert user to device buffer
\r
7377 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7378 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7379 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7380 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7383 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7384 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7386 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7388 // Set up the interleave/deinterleave offsets.
\r
7389 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7390 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7391 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7392 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7393 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7394 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7395 stream_.convertInfo[mode].inJump = 1;
\r
7399 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7400 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7401 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7402 stream_.convertInfo[mode].outJump = 1;
\r
7406 else { // no (de)interleaving
\r
7407 if ( stream_.userInterleaved ) {
\r
7408 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7409 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7410 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7414 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7415 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7416 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7417 stream_.convertInfo[mode].inJump = 1;
\r
7418 stream_.convertInfo[mode].outJump = 1;
\r
7423 // Add channel offset.
\r
7424 if ( firstChannel > 0 ) {
\r
7425 if ( stream_.deviceInterleaved[mode] ) {
\r
7426 if ( mode == OUTPUT ) {
\r
7427 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7428 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7431 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7432 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7436 if ( mode == OUTPUT ) {
\r
7437 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7438 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7441 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7442 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7448 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7450 // This function does format conversion, input/output channel compensation, and
\r
7451 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7452 // the lower three bytes of a 32-bit integer.
\r
7454 // Clear our device buffer when in/out duplex device channels are different
\r
7455 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7456 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7457 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7460 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7462 Float64 *out = (Float64 *)outBuffer;
\r
7464 if (info.inFormat == RTAUDIO_SINT8) {
\r
7465 signed char *in = (signed char *)inBuffer;
\r
7466 scale = 1.0 / 127.5;
\r
7467 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7468 for (j=0; j<info.channels; j++) {
\r
7469 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7470 out[info.outOffset[j]] += 0.5;
\r
7471 out[info.outOffset[j]] *= scale;
\r
7473 in += info.inJump;
\r
7474 out += info.outJump;
\r
7477 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7478 Int16 *in = (Int16 *)inBuffer;
\r
7479 scale = 1.0 / 32767.5;
\r
7480 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7481 for (j=0; j<info.channels; j++) {
\r
7482 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7483 out[info.outOffset[j]] += 0.5;
\r
7484 out[info.outOffset[j]] *= scale;
\r
7486 in += info.inJump;
\r
7487 out += info.outJump;
\r
7490 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7491 Int32 *in = (Int32 *)inBuffer;
\r
7492 scale = 1.0 / 8388607.5;
\r
7493 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7494 for (j=0; j<info.channels; j++) {
\r
7495 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);
\r
7496 out[info.outOffset[j]] += 0.5;
\r
7497 out[info.outOffset[j]] *= scale;
\r
7499 in += info.inJump;
\r
7500 out += info.outJump;
\r
7503 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7504 Int32 *in = (Int32 *)inBuffer;
\r
7505 scale = 1.0 / 2147483647.5;
\r
7506 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7507 for (j=0; j<info.channels; j++) {
\r
7508 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7509 out[info.outOffset[j]] += 0.5;
\r
7510 out[info.outOffset[j]] *= scale;
\r
7512 in += info.inJump;
\r
7513 out += info.outJump;
\r
7516 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7517 Float32 *in = (Float32 *)inBuffer;
\r
7518 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7519 for (j=0; j<info.channels; j++) {
\r
7520 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7522 in += info.inJump;
\r
7523 out += info.outJump;
\r
7526 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7527 // Channel compensation and/or (de)interleaving only.
\r
7528 Float64 *in = (Float64 *)inBuffer;
\r
7529 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7530 for (j=0; j<info.channels; j++) {
\r
7531 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7533 in += info.inJump;
\r
7534 out += info.outJump;
\r
7538 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7540 Float32 *out = (Float32 *)outBuffer;
\r
7542 if (info.inFormat == RTAUDIO_SINT8) {
\r
7543 signed char *in = (signed char *)inBuffer;
\r
7544 scale = (Float32) ( 1.0 / 127.5 );
\r
7545 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7546 for (j=0; j<info.channels; j++) {
\r
7547 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7548 out[info.outOffset[j]] += 0.5;
\r
7549 out[info.outOffset[j]] *= scale;
\r
7551 in += info.inJump;
\r
7552 out += info.outJump;
\r
7555 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7556 Int16 *in = (Int16 *)inBuffer;
\r
7557 scale = (Float32) ( 1.0 / 32767.5 );
\r
7558 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7559 for (j=0; j<info.channels; j++) {
\r
7560 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7561 out[info.outOffset[j]] += 0.5;
\r
7562 out[info.outOffset[j]] *= scale;
\r
7564 in += info.inJump;
\r
7565 out += info.outJump;
\r
7568 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7569 Int32 *in = (Int32 *)inBuffer;
\r
7570 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7571 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7572 for (j=0; j<info.channels; j++) {
\r
7573 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);
\r
7574 out[info.outOffset[j]] += 0.5;
\r
7575 out[info.outOffset[j]] *= scale;
\r
7577 in += info.inJump;
\r
7578 out += info.outJump;
\r
7581 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7582 Int32 *in = (Int32 *)inBuffer;
\r
7583 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7584 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7585 for (j=0; j<info.channels; j++) {
\r
7586 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7587 out[info.outOffset[j]] += 0.5;
\r
7588 out[info.outOffset[j]] *= scale;
\r
7590 in += info.inJump;
\r
7591 out += info.outJump;
\r
7594 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7595 // Channel compensation and/or (de)interleaving only.
\r
7596 Float32 *in = (Float32 *)inBuffer;
\r
7597 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7598 for (j=0; j<info.channels; j++) {
\r
7599 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7601 in += info.inJump;
\r
7602 out += info.outJump;
\r
7605 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7606 Float64 *in = (Float64 *)inBuffer;
\r
7607 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7608 for (j=0; j<info.channels; j++) {
\r
7609 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7611 in += info.inJump;
\r
7612 out += info.outJump;
\r
7616 else if (info.outFormat == RTAUDIO_SINT32) {
\r
7617 Int32 *out = (Int32 *)outBuffer;
\r
7618 if (info.inFormat == RTAUDIO_SINT8) {
\r
7619 signed char *in = (signed char *)inBuffer;
\r
7620 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7621 for (j=0; j<info.channels; j++) {
\r
7622 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7623 out[info.outOffset[j]] <<= 24;
\r
7625 in += info.inJump;
\r
7626 out += info.outJump;
\r
7629 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7630 Int16 *in = (Int16 *)inBuffer;
\r
7631 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7632 for (j=0; j<info.channels; j++) {
\r
7633 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7634 out[info.outOffset[j]] <<= 16;
\r
7636 in += info.inJump;
\r
7637 out += info.outJump;
\r
7640 else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes
\r
7641 Int32 *in = (Int32 *)inBuffer;
\r
7642 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7643 for (j=0; j<info.channels; j++) {
\r
7644 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7645 out[info.outOffset[j]] <<= 8;
\r
7647 in += info.inJump;
\r
7648 out += info.outJump;
\r
7651 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7652 // Channel compensation and/or (de)interleaving only.
\r
7653 Int32 *in = (Int32 *)inBuffer;
\r
7654 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7655 for (j=0; j<info.channels; j++) {
\r
7656 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7658 in += info.inJump;
\r
7659 out += info.outJump;
\r
7662 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7663 Float32 *in = (Float32 *)inBuffer;
\r
7664 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7665 for (j=0; j<info.channels; j++) {
\r
7666 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7668 in += info.inJump;
\r
7669 out += info.outJump;
\r
7672 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7673 Float64 *in = (Float64 *)inBuffer;
\r
7674 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7675 for (j=0; j<info.channels; j++) {
\r
7676 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7678 in += info.inJump;
\r
7679 out += info.outJump;
\r
7683 else if (info.outFormat == RTAUDIO_SINT24) {
\r
7684 Int32 *out = (Int32 *)outBuffer;
\r
7685 if (info.inFormat == RTAUDIO_SINT8) {
\r
7686 signed char *in = (signed char *)inBuffer;
\r
7687 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7688 for (j=0; j<info.channels; j++) {
\r
7689 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7690 out[info.outOffset[j]] <<= 16;
\r
7692 in += info.inJump;
\r
7693 out += info.outJump;
\r
7696 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7697 Int16 *in = (Int16 *)inBuffer;
\r
7698 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7699 for (j=0; j<info.channels; j++) {
\r
7700 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7701 out[info.outOffset[j]] <<= 8;
\r
7703 in += info.inJump;
\r
7704 out += info.outJump;
\r
7707 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7708 // Channel compensation and/or (de)interleaving only.
\r
7709 Int32 *in = (Int32 *)inBuffer;
\r
7710 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7711 for (j=0; j<info.channels; j++) {
\r
7712 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7714 in += info.inJump;
\r
7715 out += info.outJump;
\r
7718 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7719 Int32 *in = (Int32 *)inBuffer;
\r
7720 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7721 for (j=0; j<info.channels; j++) {
\r
7722 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7723 out[info.outOffset[j]] >>= 8;
\r
7725 in += info.inJump;
\r
7726 out += info.outJump;
\r
7729 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7730 Float32 *in = (Float32 *)inBuffer;
\r
7731 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7732 for (j=0; j<info.channels; j++) {
\r
7733 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7735 in += info.inJump;
\r
7736 out += info.outJump;
\r
7739 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7740 Float64 *in = (Float64 *)inBuffer;
\r
7741 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7742 for (j=0; j<info.channels; j++) {
\r
7743 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7745 in += info.inJump;
\r
7746 out += info.outJump;
\r
7750 else if (info.outFormat == RTAUDIO_SINT16) {
\r
7751 Int16 *out = (Int16 *)outBuffer;
\r
7752 if (info.inFormat == RTAUDIO_SINT8) {
\r
7753 signed char *in = (signed char *)inBuffer;
\r
7754 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7755 for (j=0; j<info.channels; j++) {
\r
7756 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
7757 out[info.outOffset[j]] <<= 8;
\r
7759 in += info.inJump;
\r
7760 out += info.outJump;
\r
7763 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7764 // Channel compensation and/or (de)interleaving only.
\r
7765 Int16 *in = (Int16 *)inBuffer;
\r
7766 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7767 for (j=0; j<info.channels; j++) {
\r
7768 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7770 in += info.inJump;
\r
7771 out += info.outJump;
\r
7774 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7775 Int32 *in = (Int32 *)inBuffer;
\r
7776 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7777 for (j=0; j<info.channels; j++) {
\r
7778 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);
\r
7780 in += info.inJump;
\r
7781 out += info.outJump;
\r
7784 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7785 Int32 *in = (Int32 *)inBuffer;
\r
7786 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7787 for (j=0; j<info.channels; j++) {
\r
7788 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
7790 in += info.inJump;
\r
7791 out += info.outJump;
\r
7794 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7795 Float32 *in = (Float32 *)inBuffer;
\r
7796 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7797 for (j=0; j<info.channels; j++) {
\r
7798 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7800 in += info.inJump;
\r
7801 out += info.outJump;
\r
7804 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7805 Float64 *in = (Float64 *)inBuffer;
\r
7806 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7807 for (j=0; j<info.channels; j++) {
\r
7808 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7810 in += info.inJump;
\r
7811 out += info.outJump;
\r
7815 else if (info.outFormat == RTAUDIO_SINT8) {
\r
7816 signed char *out = (signed char *)outBuffer;
\r
7817 if (info.inFormat == RTAUDIO_SINT8) {
\r
7818 // Channel compensation and/or (de)interleaving only.
\r
7819 signed char *in = (signed char *)inBuffer;
\r
7820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7821 for (j=0; j<info.channels; j++) {
\r
7822 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7824 in += info.inJump;
\r
7825 out += info.outJump;
\r
7828 if (info.inFormat == RTAUDIO_SINT16) {
\r
7829 Int16 *in = (Int16 *)inBuffer;
\r
7830 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7831 for (j=0; j<info.channels; j++) {
\r
7832 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
7834 in += info.inJump;
\r
7835 out += info.outJump;
\r
7838 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7839 Int32 *in = (Int32 *)inBuffer;
\r
7840 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7841 for (j=0; j<info.channels; j++) {
\r
7842 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);
\r
7844 in += info.inJump;
\r
7845 out += info.outJump;
\r
7848 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7849 Int32 *in = (Int32 *)inBuffer;
\r
7850 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7851 for (j=0; j<info.channels; j++) {
\r
7852 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
7854 in += info.inJump;
\r
7855 out += info.outJump;
\r
7858 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7859 Float32 *in = (Float32 *)inBuffer;
\r
7860 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7861 for (j=0; j<info.channels; j++) {
\r
7862 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7864 in += info.inJump;
\r
7865 out += info.outJump;
\r
7868 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7869 Float64 *in = (Float64 *)inBuffer;
\r
7870 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7871 for (j=0; j<info.channels; j++) {
\r
7872 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7874 in += info.inJump;
\r
7875 out += info.outJump;
\r
7881 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
7882 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
7883 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
7885 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
7887 register char val;
\r
7888 register char *ptr;
\r
7891 if ( format == RTAUDIO_SINT16 ) {
\r
7892 for ( unsigned int i=0; i<samples; i++ ) {
\r
7893 // Swap 1st and 2nd bytes.
\r
7895 *(ptr) = *(ptr+1);
\r
7898 // Increment 2 bytes.
\r
7902 else if ( format == RTAUDIO_SINT24 ||
\r
7903 format == RTAUDIO_SINT32 ||
\r
7904 format == RTAUDIO_FLOAT32 ) {
\r
7905 for ( unsigned int i=0; i<samples; i++ ) {
\r
7906 // Swap 1st and 4th bytes.
\r
7908 *(ptr) = *(ptr+3);
\r
7911 // Swap 2nd and 3rd bytes.
\r
7914 *(ptr) = *(ptr+1);
\r
7917 // Increment 3 more bytes.
\r
7921 else if ( format == RTAUDIO_FLOAT64 ) {
\r
7922 for ( unsigned int i=0; i<samples; i++ ) {
\r
7923 // Swap 1st and 8th bytes
\r
7925 *(ptr) = *(ptr+7);
\r
7928 // Swap 2nd and 7th bytes
\r
7931 *(ptr) = *(ptr+5);
\r
7934 // Swap 3rd and 6th bytes
\r
7937 *(ptr) = *(ptr+3);
\r
7940 // Swap 4th and 5th bytes
\r
7943 *(ptr) = *(ptr+1);
\r
7946 // Increment 5 more bytes.
\r
7952 // Indentation settings for Vim and Emacs
\r
7954 // Local Variables:
\r
7955 // c-basic-offset: 2
\r
7956 // indent-tabs-mode: nil
\r
7959 // vim: et sts=2 sw=2
\r