1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2011 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.9
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_OSS__)
\r
91 apis.push_back( LINUX_OSS );
\r
93 #if defined(__WINDOWS_ASIO__)
\r
94 apis.push_back( WINDOWS_ASIO );
\r
96 #if defined(__WINDOWS_DS__)
\r
97 apis.push_back( WINDOWS_DS );
\r
99 #if defined(__MACOSX_CORE__)
\r
100 apis.push_back( MACOSX_CORE );
\r
102 #if defined(__RTAUDIO_DUMMY__)
\r
103 apis.push_back( RTAUDIO_DUMMY );
\r
107 void RtAudio :: openRtApi( RtAudio::Api api )
\r
109 #if defined(__UNIX_JACK__)
\r
110 if ( api == UNIX_JACK )
\r
111 rtapi_ = new RtApiJack();
\r
113 #if defined(__LINUX_ALSA__)
\r
114 if ( api == LINUX_ALSA )
\r
115 rtapi_ = new RtApiAlsa();
\r
117 #if defined(__LINUX_OSS__)
\r
118 if ( api == LINUX_OSS )
\r
119 rtapi_ = new RtApiOss();
\r
121 #if defined(__WINDOWS_ASIO__)
\r
122 if ( api == WINDOWS_ASIO )
\r
123 rtapi_ = new RtApiAsio();
\r
125 #if defined(__WINDOWS_DS__)
\r
126 if ( api == WINDOWS_DS )
\r
127 rtapi_ = new RtApiDs();
\r
129 #if defined(__MACOSX_CORE__)
\r
130 if ( api == MACOSX_CORE )
\r
131 rtapi_ = new RtApiCore();
\r
133 #if defined(__RTAUDIO_DUMMY__)
\r
134 if ( api == RTAUDIO_DUMMY )
\r
135 rtapi_ = new RtApiDummy();
\r
139 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
143 if ( api != UNSPECIFIED ) {
\r
144 // Attempt to open the specified API.
\r
146 if ( rtapi_ ) return;
\r
148 // No compiled support for specified API value. Issue a debug
\r
149 // warning and continue as if no API was specified.
\r
150 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
153 // Iterate through the compiled APIs and return as soon as we find
\r
154 // one with at least one device or we reach the end of the list.
\r
155 std::vector< RtAudio::Api > apis;
\r
156 getCompiledApi( apis );
\r
157 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
158 openRtApi( apis[i] );
\r
159 if ( rtapi_->getDeviceCount() ) break;
\r
162 if ( rtapi_ ) return;
\r
164 // It should not be possible to get here because the preprocessor
\r
165 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
166 // API-specific definitions are passed to the compiler. But just in
\r
167 // case something weird happens, we'll print out an error message.
\r
168 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
171 RtAudio :: ~RtAudio() throw()
\r
176 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
177 RtAudio::StreamParameters *inputParameters,
\r
178 RtAudioFormat format, unsigned int sampleRate,
\r
179 unsigned int *bufferFrames,
\r
180 RtAudioCallback callback, void *userData,
\r
181 RtAudio::StreamOptions *options )
\r
183 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
184 sampleRate, bufferFrames, callback,
\r
185 userData, options );
\r
188 // *************************************************** //
\r
190 // Public RtApi definitions (see end of file for
\r
191 // private or protected utility functions).
\r
193 // *************************************************** //
\r
197 stream_.state = STREAM_CLOSED;
\r
198 stream_.mode = UNINITIALIZED;
\r
199 stream_.apiHandle = 0;
\r
200 stream_.userBuffer[0] = 0;
\r
201 stream_.userBuffer[1] = 0;
\r
202 MUTEX_INITIALIZE( &stream_.mutex );
\r
203 showWarnings_ = true;
\r
208 MUTEX_DESTROY( &stream_.mutex );
\r
211 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
212 RtAudio::StreamParameters *iParams,
\r
213 RtAudioFormat format, unsigned int sampleRate,
\r
214 unsigned int *bufferFrames,
\r
215 RtAudioCallback callback, void *userData,
\r
216 RtAudio::StreamOptions *options )
\r
218 if ( stream_.state != STREAM_CLOSED ) {
\r
219 errorText_ = "RtApi::openStream: a stream is already open!";
\r
220 error( RtError::INVALID_USE );
\r
223 if ( oParams && oParams->nChannels < 1 ) {
\r
224 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
225 error( RtError::INVALID_USE );
\r
228 if ( iParams && iParams->nChannels < 1 ) {
\r
229 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
230 error( RtError::INVALID_USE );
\r
233 if ( oParams == NULL && iParams == NULL ) {
\r
234 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
235 error( RtError::INVALID_USE );
\r
238 if ( formatBytes(format) == 0 ) {
\r
239 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
240 error( RtError::INVALID_USE );
\r
243 unsigned int nDevices = getDeviceCount();
\r
244 unsigned int oChannels = 0;
\r
246 oChannels = oParams->nChannels;
\r
247 if ( oParams->deviceId >= nDevices ) {
\r
248 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
249 error( RtError::INVALID_USE );
\r
253 unsigned int iChannels = 0;
\r
255 iChannels = iParams->nChannels;
\r
256 if ( iParams->deviceId >= nDevices ) {
\r
257 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
258 error( RtError::INVALID_USE );
\r
265 if ( oChannels > 0 ) {
\r
267 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
268 sampleRate, format, bufferFrames, options );
\r
269 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
272 if ( iChannels > 0 ) {
\r
274 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
275 sampleRate, format, bufferFrames, options );
\r
276 if ( result == false ) {
\r
277 if ( oChannels > 0 ) closeStream();
\r
278 error( RtError::SYSTEM_ERROR );
\r
282 stream_.callbackInfo.callback = (void *) callback;
\r
283 stream_.callbackInfo.userData = userData;
\r
285 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
286 stream_.state = STREAM_STOPPED;
\r
289 unsigned int RtApi :: getDefaultInputDevice( void )
\r
291 // Should be implemented in subclasses if possible.
\r
295 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
297 // Should be implemented in subclasses if possible.
\r
301 void RtApi :: closeStream( void )
\r
303 // MUST be implemented in subclasses!
\r
307 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
308 unsigned int firstChannel, unsigned int sampleRate,
\r
309 RtAudioFormat format, unsigned int *bufferSize,
\r
310 RtAudio::StreamOptions *options )
\r
312 // MUST be implemented in subclasses!
\r
316 void RtApi :: tickStreamTime( void )
\r
318 // Subclasses that do not provide their own implementation of
\r
319 // getStreamTime should call this function once per buffer I/O to
\r
320 // provide basic stream time support.
\r
322 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
324 #if defined( HAVE_GETTIMEOFDAY )
\r
325 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
329 long RtApi :: getStreamLatency( void )
\r
333 long totalLatency = 0;
\r
334 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
335 totalLatency = stream_.latency[0];
\r
336 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
337 totalLatency += stream_.latency[1];
\r
339 return totalLatency;
\r
342 double RtApi :: getStreamTime( void )
\r
346 #if defined( HAVE_GETTIMEOFDAY )
\r
347 // Return a very accurate estimate of the stream time by
\r
348 // adding in the elapsed time since the last tick.
\r
349 struct timeval then;
\r
350 struct timeval now;
\r
352 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
353 return stream_.streamTime;
\r
355 gettimeofday( &now, NULL );
\r
356 then = stream_.lastTickTimestamp;
\r
357 return stream_.streamTime +
\r
358 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
359 (then.tv_sec + 0.000001 * then.tv_usec));
\r
361 return stream_.streamTime;
\r
365 unsigned int RtApi :: getStreamSampleRate( void )
\r
369 return stream_.sampleRate;
\r
373 // *************************************************** //
\r
375 // OS/API-specific methods.
\r
377 // *************************************************** //
\r
379 #if defined(__MACOSX_CORE__)
\r
381 // The OS X CoreAudio API is designed to use a separate callback
\r
382 // procedure for each of its audio devices. A single RtAudio duplex
\r
383 // stream using two different devices is supported here, though it
\r
384 // cannot be guaranteed to always behave correctly because we cannot
\r
385 // synchronize these two callbacks.
\r
387 // A property listener is installed for over/underrun information.
\r
388 // However, no functionality is currently provided to allow property
\r
389 // listeners to trigger user handlers because it is unclear what could
\r
390 // be done if a critical stream parameter (buffer size, sample rate,
\r
391 // device disconnect) notification arrived. The listeners entail
\r
392 // quite a bit of extra code and most likely, a user program wouldn't
\r
393 // be prepared for the result anyway. However, we do provide a flag
\r
394 // to the client callback function to inform of an over/underrun.
\r
396 // A structure to hold various information related to the CoreAudio API
\r
398 struct CoreHandle {
\r
399 AudioDeviceID id[2]; // device ids
\r
400 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
401 AudioDeviceIOProcID procId[2];
\r
403 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
404 UInt32 nStreams[2]; // number of streams to use
\r
406 char *deviceBuffer;
\r
407 pthread_cond_t condition;
\r
408 int drainCounter; // Tracks callback counts when draining
\r
409 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
412 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
415 RtApiCore:: RtApiCore()
\r
417 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
418 // This is a largely undocumented but absolutely necessary
\r
419 // requirement starting with OS-X 10.6. If not called, queries and
\r
420 // updates to various audio device properties are not handled
\r
422 CFRunLoopRef theRunLoop = NULL;
\r
423 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
424 kAudioObjectPropertyScopeGlobal,
\r
425 kAudioObjectPropertyElementMaster };
\r
426 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
427 if ( result != noErr ) {
\r
428 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
429 error( RtError::WARNING );
\r
434 RtApiCore :: ~RtApiCore()
\r
436 // The subclass destructor gets called before the base class
\r
437 // destructor, so close an existing stream before deallocating
\r
438 // apiDeviceId memory.
\r
439 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
442 unsigned int RtApiCore :: getDeviceCount( void )
\r
444 // Find out how many audio devices there are, if any.
\r
446 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
447 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
448 if ( result != noErr ) {
\r
449 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
450 error( RtError::WARNING );
\r
454 return dataSize / sizeof( AudioDeviceID );
\r
457 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
459 unsigned int nDevices = getDeviceCount();
\r
460 if ( nDevices <= 1 ) return 0;
\r
463 UInt32 dataSize = sizeof( AudioDeviceID );
\r
464 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
465 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
466 if ( result != noErr ) {
\r
467 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
468 error( RtError::WARNING );
\r
472 dataSize *= nDevices;
\r
473 AudioDeviceID deviceList[ nDevices ];
\r
474 property.mSelector = kAudioHardwarePropertyDevices;
\r
475 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
476 if ( result != noErr ) {
\r
477 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
478 error( RtError::WARNING );
\r
482 for ( unsigned int i=0; i<nDevices; i++ )
\r
483 if ( id == deviceList[i] ) return i;
\r
485 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
486 error( RtError::WARNING );
\r
490 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
492 unsigned int nDevices = getDeviceCount();
\r
493 if ( nDevices <= 1 ) return 0;
\r
496 UInt32 dataSize = sizeof( AudioDeviceID );
\r
497 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
498 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
499 if ( result != noErr ) {
\r
500 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
501 error( RtError::WARNING );
\r
505 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
506 AudioDeviceID deviceList[ nDevices ];
\r
507 property.mSelector = kAudioHardwarePropertyDevices;
\r
508 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
509 if ( result != noErr ) {
\r
510 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
511 error( RtError::WARNING );
\r
515 for ( unsigned int i=0; i<nDevices; i++ )
\r
516 if ( id == deviceList[i] ) return i;
\r
518 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
519 error( RtError::WARNING );
\r
523 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
525 RtAudio::DeviceInfo info;
\r
526 info.probed = false;
\r
529 unsigned int nDevices = getDeviceCount();
\r
530 if ( nDevices == 0 ) {
\r
531 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
532 error( RtError::INVALID_USE );
\r
535 if ( device >= nDevices ) {
\r
536 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
537 error( RtError::INVALID_USE );
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
542 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
543 kAudioObjectPropertyScopeGlobal,
\r
544 kAudioObjectPropertyElementMaster };
\r
545 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
546 0, NULL, &dataSize, (void *) &deviceList );
\r
547 if ( result != noErr ) {
\r
548 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
549 error( RtError::WARNING );
\r
553 AudioDeviceID id = deviceList[ device ];
\r
555 // Get the device name.
\r
557 CFStringRef cfname;
\r
558 dataSize = sizeof( CFStringRef );
\r
559 property.mSelector = kAudioObjectPropertyManufacturer;
\r
560 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
561 if ( result != noErr ) {
\r
562 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
563 errorText_ = errorStream_.str();
\r
564 error( RtError::WARNING );
\r
568 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
569 int length = CFStringGetLength(cfname);
\r
570 char *mname = (char *)malloc(length * 3 + 1);
\r
571 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
572 info.name.append( (const char *)mname, strlen(mname) );
\r
573 info.name.append( ": " );
\r
574 CFRelease( cfname );
\r
577 property.mSelector = kAudioObjectPropertyName;
\r
578 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
579 if ( result != noErr ) {
\r
580 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
581 errorText_ = errorStream_.str();
\r
582 error( RtError::WARNING );
\r
586 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
587 length = CFStringGetLength(cfname);
\r
588 char *name = (char *)malloc(length * 3 + 1);
\r
589 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
590 info.name.append( (const char *)name, strlen(name) );
\r
591 CFRelease( cfname );
\r
594 // Get the output stream "configuration".
\r
595 AudioBufferList *bufferList = nil;
\r
596 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
597 property.mScope = kAudioDevicePropertyScopeOutput;
\r
598 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
600 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
601 if ( result != noErr || dataSize == 0 ) {
\r
602 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
603 errorText_ = errorStream_.str();
\r
604 error( RtError::WARNING );
\r
608 // Allocate the AudioBufferList.
\r
609 bufferList = (AudioBufferList *) malloc( dataSize );
\r
610 if ( bufferList == NULL ) {
\r
611 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
612 error( RtError::WARNING );
\r
616 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
617 if ( result != noErr || dataSize == 0 ) {
\r
618 free( bufferList );
\r
619 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
620 errorText_ = errorStream_.str();
\r
621 error( RtError::WARNING );
\r
625 // Get output channel information.
\r
626 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
627 for ( i=0; i<nStreams; i++ )
\r
628 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
629 free( bufferList );
\r
631 // Get the input stream "configuration".
\r
632 property.mScope = kAudioDevicePropertyScopeInput;
\r
633 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
634 if ( result != noErr || dataSize == 0 ) {
\r
635 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
636 errorText_ = errorStream_.str();
\r
637 error( RtError::WARNING );
\r
641 // Allocate the AudioBufferList.
\r
642 bufferList = (AudioBufferList *) malloc( dataSize );
\r
643 if ( bufferList == NULL ) {
\r
644 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
645 error( RtError::WARNING );
\r
649 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
650 if (result != noErr || dataSize == 0) {
\r
651 free( bufferList );
\r
652 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
653 errorText_ = errorStream_.str();
\r
654 error( RtError::WARNING );
\r
658 // Get input channel information.
\r
659 nStreams = bufferList->mNumberBuffers;
\r
660 for ( i=0; i<nStreams; i++ )
\r
661 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
662 free( bufferList );
\r
664 // If device opens for both playback and capture, we determine the channels.
\r
665 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
666 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
668 // Probe the device sample rates.
\r
669 bool isInput = false;
\r
670 if ( info.outputChannels == 0 ) isInput = true;
\r
672 // Determine the supported sample rates.
\r
673 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
674 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
676 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
677 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
678 errorText_ = errorStream_.str();
\r
679 error( RtError::WARNING );
\r
683 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
684 AudioValueRange rangeList[ nRanges ];
\r
685 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
686 if ( result != kAudioHardwareNoError ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtError::WARNING );
\r
693 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
694 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
695 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
696 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
699 info.sampleRates.clear();
\r
700 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
701 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
702 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
705 if ( info.sampleRates.size() == 0 ) {
\r
706 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
707 errorText_ = errorStream_.str();
\r
708 error( RtError::WARNING );
\r
712 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
713 // Thus, any other "physical" formats supported by the device are of
\r
714 // no interest to the client.
\r
715 info.nativeFormats = RTAUDIO_FLOAT32;
\r
717 if ( info.outputChannels > 0 )
\r
718 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
719 if ( info.inputChannels > 0 )
\r
720 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
722 info.probed = true;
\r
726 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
727 const AudioTimeStamp* inNow,
\r
728 const AudioBufferList* inInputData,
\r
729 const AudioTimeStamp* inInputTime,
\r
730 AudioBufferList* outOutputData,
\r
731 const AudioTimeStamp* inOutputTime,
\r
732 void* infoPointer )
\r
734 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
736 RtApiCore *object = (RtApiCore *) info->object;
\r
737 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
738 return kAudioHardwareUnspecifiedError;
\r
740 return kAudioHardwareNoError;
\r
743 OSStatus xrunListener( AudioObjectID inDevice,
\r
745 const AudioObjectPropertyAddress properties[],
\r
746 void* handlePointer )
\r
748 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
749 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
750 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
751 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
752 handle->xrun[1] = true;
\r
754 handle->xrun[0] = true;
\r
758 return kAudioHardwareNoError;
\r
761 OSStatus rateListener( AudioObjectID inDevice,
\r
763 const AudioObjectPropertyAddress properties[],
\r
764 void* ratePointer )
\r
767 Float64 *rate = (Float64 *) ratePointer;
\r
768 UInt32 dataSize = sizeof( Float64 );
\r
769 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
770 kAudioObjectPropertyScopeGlobal,
\r
771 kAudioObjectPropertyElementMaster };
\r
772 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
773 return kAudioHardwareNoError;
\r
776 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
777 unsigned int firstChannel, unsigned int sampleRate,
\r
778 RtAudioFormat format, unsigned int *bufferSize,
\r
779 RtAudio::StreamOptions *options )
\r
782 unsigned int nDevices = getDeviceCount();
\r
783 if ( nDevices == 0 ) {
\r
784 // This should not happen because a check is made before this function is called.
\r
785 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
789 if ( device >= nDevices ) {
\r
790 // This should not happen because a check is made before this function is called.
\r
791 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
795 AudioDeviceID deviceList[ nDevices ];
\r
796 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
797 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
798 kAudioObjectPropertyScopeGlobal,
\r
799 kAudioObjectPropertyElementMaster };
\r
800 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
801 0, NULL, &dataSize, (void *) &deviceList );
\r
802 if ( result != noErr ) {
\r
803 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
807 AudioDeviceID id = deviceList[ device ];
\r
809 // Setup for stream mode.
\r
810 bool isInput = false;
\r
811 if ( mode == INPUT ) {
\r
813 property.mScope = kAudioDevicePropertyScopeInput;
\r
816 property.mScope = kAudioDevicePropertyScopeOutput;
\r
818 // Get the stream "configuration".
\r
819 AudioBufferList *bufferList = nil;
\r
821 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
822 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
823 if ( result != noErr || dataSize == 0 ) {
\r
824 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
825 errorText_ = errorStream_.str();
\r
829 // Allocate the AudioBufferList.
\r
830 bufferList = (AudioBufferList *) malloc( dataSize );
\r
831 if ( bufferList == NULL ) {
\r
832 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
836 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
837 if (result != noErr || dataSize == 0) {
\r
838 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
839 errorText_ = errorStream_.str();
\r
843 // Search for one or more streams that contain the desired number of
\r
844 // channels. CoreAudio devices can have an arbitrary number of
\r
845 // streams and each stream can have an arbitrary number of channels.
\r
846 // For each stream, a single buffer of interleaved samples is
\r
847 // provided. RtAudio prefers the use of one stream of interleaved
\r
848 // data or multiple consecutive single-channel streams. However, we
\r
849 // now support multiple consecutive multi-channel streams of
\r
850 // interleaved data as well.
\r
851 UInt32 iStream, offsetCounter = firstChannel;
\r
852 UInt32 nStreams = bufferList->mNumberBuffers;
\r
853 bool monoMode = false;
\r
854 bool foundStream = false;
\r
856 // First check that the device supports the requested number of
\r
858 UInt32 deviceChannels = 0;
\r
859 for ( iStream=0; iStream<nStreams; iStream++ )
\r
860 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
862 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
863 free( bufferList );
\r
864 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
865 errorText_ = errorStream_.str();
\r
869 // Look for a single stream meeting our needs.
\r
870 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
871 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
872 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
873 if ( streamChannels >= channels + offsetCounter ) {
\r
874 firstStream = iStream;
\r
875 channelOffset = offsetCounter;
\r
876 foundStream = true;
\r
879 if ( streamChannels > offsetCounter ) break;
\r
880 offsetCounter -= streamChannels;
\r
883 // If we didn't find a single stream above, then we should be able
\r
884 // to meet the channel specification with multiple streams.
\r
885 if ( foundStream == false ) {
\r
887 offsetCounter = firstChannel;
\r
888 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
889 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
890 if ( streamChannels > offsetCounter ) break;
\r
891 offsetCounter -= streamChannels;
\r
894 firstStream = iStream;
\r
895 channelOffset = offsetCounter;
\r
896 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
898 if ( streamChannels > 1 ) monoMode = false;
\r
899 while ( channelCounter > 0 ) {
\r
900 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
901 if ( streamChannels > 1 ) monoMode = false;
\r
902 channelCounter -= streamChannels;
\r
907 free( bufferList );
\r
909 // Determine the buffer size.
\r
910 AudioValueRange bufferRange;
\r
911 dataSize = sizeof( AudioValueRange );
\r
912 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
913 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
915 if ( result != noErr ) {
\r
916 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
917 errorText_ = errorStream_.str();
\r
921 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
922 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
923 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
925 // Set the buffer size. For multiple streams, I'm assuming we only
\r
926 // need to make this setting for the master channel.
\r
927 UInt32 theSize = (UInt32) *bufferSize;
\r
928 dataSize = sizeof( UInt32 );
\r
929 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
930 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
932 if ( result != noErr ) {
\r
933 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
934 errorText_ = errorStream_.str();
\r
938 // If attempting to setup a duplex stream, the bufferSize parameter
\r
939 // MUST be the same in both directions!
\r
940 *bufferSize = theSize;
\r
941 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
942 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
943 errorText_ = errorStream_.str();
\r
947 stream_.bufferSize = *bufferSize;
\r
948 stream_.nBuffers = 1;
\r
950 // Try to set "hog" mode ... it's not clear to me this is working.
\r
951 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
953 dataSize = sizeof( hog_pid );
\r
954 property.mSelector = kAudioDevicePropertyHogMode;
\r
955 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
956 if ( result != noErr ) {
\r
957 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
958 errorText_ = errorStream_.str();
\r
962 if ( hog_pid != getpid() ) {
\r
963 hog_pid = getpid();
\r
964 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
965 if ( result != noErr ) {
\r
966 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
967 errorText_ = errorStream_.str();
\r
973 // Check and if necessary, change the sample rate for the device.
\r
974 Float64 nominalRate;
\r
975 dataSize = sizeof( Float64 );
\r
976 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
977 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
979 if ( result != noErr ) {
\r
980 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
981 errorText_ = errorStream_.str();
\r
985 // Only change the sample rate if off by more than 1 Hz.
\r
986 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
988 // Set a property listener for the sample rate change
\r
989 Float64 reportedRate = 0.0;
\r
990 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
991 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
992 if ( result != noErr ) {
\r
993 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
994 errorText_ = errorStream_.str();
\r
998 nominalRate = (Float64) sampleRate;
\r
999 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1001 if ( result != noErr ) {
\r
1002 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1003 errorText_ = errorStream_.str();
\r
1007 // Now wait until the reported nominal rate is what we just set.
\r
1008 UInt32 microCounter = 0;
\r
1009 while ( reportedRate != nominalRate ) {
\r
1010 microCounter += 5000;
\r
1011 if ( microCounter > 5000000 ) break;
\r
1015 // Remove the property listener.
\r
1016 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1018 if ( microCounter > 5000000 ) {
\r
1019 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1020 errorText_ = errorStream_.str();
\r
1025 // Now set the stream format for all streams. Also, check the
\r
1026 // physical format of the device and change that if necessary.
\r
1027 AudioStreamBasicDescription description;
\r
1028 dataSize = sizeof( AudioStreamBasicDescription );
\r
1029 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1030 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1031 if ( result != noErr ) {
\r
1032 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1033 errorText_ = errorStream_.str();
\r
1037 // Set the sample rate and data format id. However, only make the
\r
1038 // change if the sample rate is not within 1.0 of the desired
\r
1039 // rate and the format is not linear pcm.
\r
1040 bool updateFormat = false;
\r
1041 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1042 description.mSampleRate = (Float64) sampleRate;
\r
1043 updateFormat = true;
\r
1046 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1047 description.mFormatID = kAudioFormatLinearPCM;
\r
1048 updateFormat = true;
\r
1051 if ( updateFormat ) {
\r
1052 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1053 if ( result != noErr ) {
\r
1054 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1055 errorText_ = errorStream_.str();
\r
1060 // Now check the physical format.
\r
1061 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1062 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1063 if ( result != noErr ) {
\r
1064 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1065 errorText_ = errorStream_.str();
\r
1069 //std::cout << "Current physical stream format:" << std::endl;
\r
1070 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1071 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1072 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1073 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1075 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1076 description.mFormatID = kAudioFormatLinearPCM;
\r
1077 //description.mSampleRate = (Float64) sampleRate;
\r
1078 AudioStreamBasicDescription testDescription = description;
\r
1079 UInt32 formatFlags;
\r
1081 // We'll try higher bit rates first and then work our way down.
\r
1082 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1083 formatFlags = description.mFormatFlags | kLinearPCMFormatFlagIsFloat & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1084 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1085 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1086 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1087 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1088 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1089 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1090 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1091 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1092 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1093 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1094 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1096 bool setPhysicalFormat = false;
\r
1097 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1098 testDescription = description;
\r
1099 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1100 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1101 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1102 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1104 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1105 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1106 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1107 if ( result == noErr ) {
\r
1108 setPhysicalFormat = true;
\r
1109 //std::cout << "Updated physical stream format:" << std::endl;
\r
1110 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1111 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1112 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1113 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1118 if ( !setPhysicalFormat ) {
\r
1119 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1120 errorText_ = errorStream_.str();
\r
1123 } // done setting virtual/physical formats.
\r
1125 // Get the stream / device latency.
\r
1127 dataSize = sizeof( UInt32 );
\r
1128 property.mSelector = kAudioDevicePropertyLatency;
\r
1129 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1130 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1131 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1133 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1134 errorText_ = errorStream_.str();
\r
1135 error( RtError::WARNING );
\r
1139 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1140 // always be presented in native-endian format, so we should never
\r
1141 // need to byte swap.
\r
1142 stream_.doByteSwap[mode] = false;
\r
1144 // From the CoreAudio documentation, PCM data must be supplied as
\r
1146 stream_.userFormat = format;
\r
1147 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1149 if ( streamCount == 1 )
\r
1150 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1151 else // multiple streams
\r
1152 stream_.nDeviceChannels[mode] = channels;
\r
1153 stream_.nUserChannels[mode] = channels;
\r
1154 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1155 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1156 else stream_.userInterleaved = true;
\r
1157 stream_.deviceInterleaved[mode] = true;
\r
1158 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1160 // Set flags for buffer conversion.
\r
1161 stream_.doConvertBuffer[mode] = false;
\r
1162 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1163 stream_.doConvertBuffer[mode] = true;
\r
1164 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1165 stream_.doConvertBuffer[mode] = true;
\r
1166 if ( streamCount == 1 ) {
\r
1167 if ( stream_.nUserChannels[mode] > 1 &&
\r
1168 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1169 stream_.doConvertBuffer[mode] = true;
\r
1171 else if ( monoMode && stream_.userInterleaved )
\r
1172 stream_.doConvertBuffer[mode] = true;
\r
1174 // Allocate our CoreHandle structure for the stream.
\r
1175 CoreHandle *handle = 0;
\r
1176 if ( stream_.apiHandle == 0 ) {
\r
1178 handle = new CoreHandle;
\r
1180 catch ( std::bad_alloc& ) {
\r
1181 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1185 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1186 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1189 stream_.apiHandle = (void *) handle;
\r
1192 handle = (CoreHandle *) stream_.apiHandle;
\r
1193 handle->iStream[mode] = firstStream;
\r
1194 handle->nStreams[mode] = streamCount;
\r
1195 handle->id[mode] = id;
\r
1197 // Allocate necessary internal buffers.
\r
1198 unsigned long bufferBytes;
\r
1199 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1200 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1201 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1202 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1203 if ( stream_.userBuffer[mode] == NULL ) {
\r
1204 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1208 // If possible, we will make use of the CoreAudio stream buffers as
\r
1209 // "device buffers". However, we can't do this if using multiple
\r
1211 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1213 bool makeBuffer = true;
\r
1214 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1215 if ( mode == INPUT ) {
\r
1216 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1217 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1218 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1222 if ( makeBuffer ) {
\r
1223 bufferBytes *= *bufferSize;
\r
1224 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1225 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1226 if ( stream_.deviceBuffer == NULL ) {
\r
1227 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1233 stream_.sampleRate = sampleRate;
\r
1234 stream_.device[mode] = device;
\r
1235 stream_.state = STREAM_STOPPED;
\r
1236 stream_.callbackInfo.object = (void *) this;
\r
1238 // Setup the buffer conversion information structure.
\r
1239 if ( stream_.doConvertBuffer[mode] ) {
\r
1240 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1241 else setConvertInfo( mode, channelOffset );
\r
1244 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1245 // Only one callback procedure per device.
\r
1246 stream_.mode = DUPLEX;
\r
1248 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1249 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1251 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1252 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1254 if ( result != noErr ) {
\r
1255 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1256 errorText_ = errorStream_.str();
\r
1259 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1260 stream_.mode = DUPLEX;
\r
1262 stream_.mode = mode;
\r
1265 // Setup the device property listener for over/underload.
\r
1266 property.mSelector = kAudioDeviceProcessorOverload;
\r
1267 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1273 pthread_cond_destroy( &handle->condition );
\r
1275 stream_.apiHandle = 0;
\r
1278 for ( int i=0; i<2; i++ ) {
\r
1279 if ( stream_.userBuffer[i] ) {
\r
1280 free( stream_.userBuffer[i] );
\r
1281 stream_.userBuffer[i] = 0;
\r
1285 if ( stream_.deviceBuffer ) {
\r
1286 free( stream_.deviceBuffer );
\r
1287 stream_.deviceBuffer = 0;
\r
1293 void RtApiCore :: closeStream( void )
\r
1295 if ( stream_.state == STREAM_CLOSED ) {
\r
1296 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1297 error( RtError::WARNING );
\r
1301 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1302 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1303 if ( stream_.state == STREAM_RUNNING )
\r
1304 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1305 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1306 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1308 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1309 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1313 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1314 if ( stream_.state == STREAM_RUNNING )
\r
1315 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1316 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1317 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1319 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1320 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1324 for ( int i=0; i<2; i++ ) {
\r
1325 if ( stream_.userBuffer[i] ) {
\r
1326 free( stream_.userBuffer[i] );
\r
1327 stream_.userBuffer[i] = 0;
\r
1331 if ( stream_.deviceBuffer ) {
\r
1332 free( stream_.deviceBuffer );
\r
1333 stream_.deviceBuffer = 0;
\r
1336 // Destroy pthread condition variable.
\r
1337 pthread_cond_destroy( &handle->condition );
\r
1339 stream_.apiHandle = 0;
\r
1341 stream_.mode = UNINITIALIZED;
\r
1342 stream_.state = STREAM_CLOSED;
\r
1345 void RtApiCore :: startStream( void )
\r
1348 if ( stream_.state == STREAM_RUNNING ) {
\r
1349 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1350 error( RtError::WARNING );
\r
1354 MUTEX_LOCK( &stream_.mutex );
\r
1356 OSStatus result = noErr;
\r
1357 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1358 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1360 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1361 if ( result != noErr ) {
\r
1362 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1363 errorText_ = errorStream_.str();
\r
1368 if ( stream_.mode == INPUT ||
\r
1369 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1371 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1372 if ( result != noErr ) {
\r
1373 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1374 errorText_ = errorStream_.str();
\r
1379 handle->drainCounter = 0;
\r
1380 handle->internalDrain = false;
\r
1381 stream_.state = STREAM_RUNNING;
\r
1384 MUTEX_UNLOCK( &stream_.mutex );
\r
1386 if ( result == noErr ) return;
\r
1387 error( RtError::SYSTEM_ERROR );
\r
1390 void RtApiCore :: stopStream( void )
\r
1393 if ( stream_.state == STREAM_STOPPED ) {
\r
1394 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1395 error( RtError::WARNING );
\r
1399 MUTEX_LOCK( &stream_.mutex );
\r
1401 if ( stream_.state == STREAM_STOPPED ) {
\r
1402 MUTEX_UNLOCK( &stream_.mutex );
\r
1406 OSStatus result = noErr;
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1410 if ( handle->drainCounter == 0 ) {
\r
1411 handle->drainCounter = 2;
\r
1412 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1415 MUTEX_UNLOCK( &stream_.mutex );
\r
1416 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1417 MUTEX_LOCK( &stream_.mutex );
\r
1418 if ( result != noErr ) {
\r
1419 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1420 errorText_ = errorStream_.str();
\r
1425 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1427 MUTEX_UNLOCK( &stream_.mutex );
\r
1428 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1429 MUTEX_LOCK( &stream_.mutex );
\r
1430 if ( result != noErr ) {
\r
1431 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1432 errorText_ = errorStream_.str();
\r
1437 stream_.state = STREAM_STOPPED;
\r
1440 MUTEX_UNLOCK( &stream_.mutex );
\r
1442 if ( result == noErr ) return;
\r
1443 error( RtError::SYSTEM_ERROR );
\r
1446 void RtApiCore :: abortStream( void )
\r
1449 if ( stream_.state == STREAM_STOPPED ) {
\r
1450 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1451 error( RtError::WARNING );
\r
1455 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1456 handle->drainCounter = 2;
\r
1461 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1462 const AudioBufferList *inBufferList,
\r
1463 const AudioBufferList *outBufferList )
\r
1465 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
1466 if ( stream_.state == STREAM_CLOSED ) {
\r
1467 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1468 error( RtError::WARNING );
\r
1472 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1473 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1475 // Check if we were draining the stream and signal is finished.
\r
1476 if ( handle->drainCounter > 3 ) {
\r
1477 if ( handle->internalDrain == true )
\r
1479 else // external call to stopStream()
\r
1480 pthread_cond_signal( &handle->condition );
\r
1484 MUTEX_LOCK( &stream_.mutex );
\r
1486 // The state might change while waiting on a mutex.
\r
1487 if ( stream_.state == STREAM_STOPPED ) {
\r
1488 MUTEX_UNLOCK( &stream_.mutex );
\r
1492 AudioDeviceID outputDevice = handle->id[0];
\r
1494 // Invoke user callback to get fresh output data UNLESS we are
\r
1495 // draining stream or duplex mode AND the input/output devices are
\r
1496 // different AND this function is called for the input device.
\r
1497 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1498 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1499 double streamTime = getStreamTime();
\r
1500 RtAudioStreamStatus status = 0;
\r
1501 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1502 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1503 handle->xrun[0] = false;
\r
1505 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1506 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1507 handle->xrun[1] = false;
\r
1510 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1511 stream_.bufferSize, streamTime, status, info->userData );
\r
1512 if ( handle->drainCounter == 2 ) {
\r
1513 MUTEX_UNLOCK( &stream_.mutex );
\r
1517 else if ( handle->drainCounter == 1 )
\r
1518 handle->internalDrain = true;
\r
1521 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1523 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1525 if ( handle->nStreams[0] == 1 ) {
\r
1526 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1528 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1530 else { // fill multiple streams with zeros
\r
1531 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1532 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1534 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1538 else if ( handle->nStreams[0] == 1 ) {
\r
1539 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1540 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1541 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1543 else { // copy from user buffer
\r
1544 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1545 stream_.userBuffer[0],
\r
1546 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1549 else { // fill multiple streams
\r
1550 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1551 if ( stream_.doConvertBuffer[0] ) {
\r
1552 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1553 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1556 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1557 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1558 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1559 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1560 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1563 else { // fill multiple multi-channel streams with interleaved data
\r
1564 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1565 Float32 *out, *in;
\r
1567 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1568 UInt32 inChannels = stream_.nUserChannels[0];
\r
1569 if ( stream_.doConvertBuffer[0] ) {
\r
1570 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1571 inChannels = stream_.nDeviceChannels[0];
\r
1574 if ( inInterleaved ) inOffset = 1;
\r
1575 else inOffset = stream_.bufferSize;
\r
1577 channelsLeft = inChannels;
\r
1578 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1580 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1581 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1584 // Account for possible channel offset in first stream
\r
1585 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1586 streamChannels -= stream_.channelOffset[0];
\r
1587 outJump = stream_.channelOffset[0];
\r
1591 // Account for possible unfilled channels at end of the last stream
\r
1592 if ( streamChannels > channelsLeft ) {
\r
1593 outJump = streamChannels - channelsLeft;
\r
1594 streamChannels = channelsLeft;
\r
1597 // Determine input buffer offsets and skips
\r
1598 if ( inInterleaved ) {
\r
1599 inJump = inChannels;
\r
1600 in += inChannels - channelsLeft;
\r
1604 in += (inChannels - channelsLeft) * inOffset;
\r
1607 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1608 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1609 *out++ = in[j*inOffset];
\r
1614 channelsLeft -= streamChannels;
\r
1619 if ( handle->drainCounter ) {
\r
1620 handle->drainCounter++;
\r
1625 AudioDeviceID inputDevice;
\r
1626 inputDevice = handle->id[1];
\r
1627 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1629 if ( handle->nStreams[1] == 1 ) {
\r
1630 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1631 convertBuffer( stream_.userBuffer[1],
\r
1632 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1633 stream_.convertInfo[1] );
\r
1635 else { // copy to user buffer
\r
1636 memcpy( stream_.userBuffer[1],
\r
1637 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1638 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1641 else { // read from multiple streams
\r
1642 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1643 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1645 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1646 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1647 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1648 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1649 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1652 else { // read from multiple multi-channel streams
\r
1653 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1654 Float32 *out, *in;
\r
1656 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1657 UInt32 outChannels = stream_.nUserChannels[1];
\r
1658 if ( stream_.doConvertBuffer[1] ) {
\r
1659 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1660 outChannels = stream_.nDeviceChannels[1];
\r
1663 if ( outInterleaved ) outOffset = 1;
\r
1664 else outOffset = stream_.bufferSize;
\r
1666 channelsLeft = outChannels;
\r
1667 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1669 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1670 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1673 // Account for possible channel offset in first stream
\r
1674 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1675 streamChannels -= stream_.channelOffset[1];
\r
1676 inJump = stream_.channelOffset[1];
\r
1680 // Account for possible unread channels at end of the last stream
\r
1681 if ( streamChannels > channelsLeft ) {
\r
1682 inJump = streamChannels - channelsLeft;
\r
1683 streamChannels = channelsLeft;
\r
1686 // Determine output buffer offsets and skips
\r
1687 if ( outInterleaved ) {
\r
1688 outJump = outChannels;
\r
1689 out += outChannels - channelsLeft;
\r
1693 out += (outChannels - channelsLeft) * outOffset;
\r
1696 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1697 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1698 out[j*outOffset] = *in++;
\r
1703 channelsLeft -= streamChannels;
\r
1707 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1708 convertBuffer( stream_.userBuffer[1],
\r
1709 stream_.deviceBuffer,
\r
1710 stream_.convertInfo[1] );
\r
1716 MUTEX_UNLOCK( &stream_.mutex );
\r
1718 RtApi::tickStreamTime();
\r
1722 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1726 case kAudioHardwareNotRunningError:
\r
1727 return "kAudioHardwareNotRunningError";
\r
1729 case kAudioHardwareUnspecifiedError:
\r
1730 return "kAudioHardwareUnspecifiedError";
\r
1732 case kAudioHardwareUnknownPropertyError:
\r
1733 return "kAudioHardwareUnknownPropertyError";
\r
1735 case kAudioHardwareBadPropertySizeError:
\r
1736 return "kAudioHardwareBadPropertySizeError";
\r
1738 case kAudioHardwareIllegalOperationError:
\r
1739 return "kAudioHardwareIllegalOperationError";
\r
1741 case kAudioHardwareBadObjectError:
\r
1742 return "kAudioHardwareBadObjectError";
\r
1744 case kAudioHardwareBadDeviceError:
\r
1745 return "kAudioHardwareBadDeviceError";
\r
1747 case kAudioHardwareBadStreamError:
\r
1748 return "kAudioHardwareBadStreamError";
\r
1750 case kAudioHardwareUnsupportedOperationError:
\r
1751 return "kAudioHardwareUnsupportedOperationError";
\r
1753 case kAudioDeviceUnsupportedFormatError:
\r
1754 return "kAudioDeviceUnsupportedFormatError";
\r
1756 case kAudioDevicePermissionsError:
\r
1757 return "kAudioDevicePermissionsError";
\r
1760 return "CoreAudio unknown error";
\r
1764 //******************** End of __MACOSX_CORE__ *********************//
\r
1767 #if defined(__UNIX_JACK__)
\r
1769 // JACK is a low-latency audio server, originally written for the
\r
1770 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1771 // connect a number of different applications to an audio device, as
\r
1772 // well as allowing them to share audio between themselves.
\r
1774 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1775 // have ports connected to the server. The JACK server is typically
\r
1776 // started in a terminal as follows:
\r
1778 // .jackd -d alsa -d hw:0
\r
1780 // or through an interface program such as qjackctl. Many of the
\r
1781 // parameters normally set for a stream are fixed by the JACK server
\r
1782 // and can be specified when the JACK server is started. In
\r
1785 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1787 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1788 // frames, and number of buffers = 4. Once the server is running, it
\r
1789 // is not possible to override these values. If the values are not
\r
1790 // specified in the command-line, the JACK server uses default values.
\r
1792 // The JACK server does not have to be running when an instance of
\r
1793 // RtApiJack is created, though the function getDeviceCount() will
\r
1794 // report 0 devices found until JACK has been started. When no
\r
1795 // devices are available (i.e., the JACK server is not running), a
\r
1796 // stream cannot be opened.
\r
1798 #include <jack/jack.h>
\r
1799 #include <unistd.h>
\r
1802 // A structure to hold various information related to the Jack API
\r
1803 // implementation.
\r
1804 struct JackHandle {
\r
1805 jack_client_t *client;
\r
1806 jack_port_t **ports[2];
\r
1807 std::string deviceName[2];
\r
1809 pthread_cond_t condition;
\r
1810 int drainCounter; // Tracks callback counts when draining
\r
1811 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1814 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1817 ThreadHandle threadId;
\r
1818 void jackSilentError( const char * ) {};
\r
1820 RtApiJack :: RtApiJack()
\r
1822 // Nothing to do here.
\r
1823 #if !defined(__RTAUDIO_DEBUG__)
\r
1824 // Turn off Jack's internal error reporting.
\r
1825 jack_set_error_function( &jackSilentError );
\r
1829 RtApiJack :: ~RtApiJack()
\r
1831 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1834 unsigned int RtApiJack :: getDeviceCount( void )
\r
1836 // See if we can become a jack client.
\r
1837 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1838 jack_status_t *status = NULL;
\r
1839 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1840 if ( client == 0 ) return 0;
\r
1842 const char **ports;
\r
1843 std::string port, previousPort;
\r
1844 unsigned int nChannels = 0, nDevices = 0;
\r
1845 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1847 // Parse the port names up to the first colon (:).
\r
1848 size_t iColon = 0;
\r
1850 port = (char *) ports[ nChannels ];
\r
1851 iColon = port.find(":");
\r
1852 if ( iColon != std::string::npos ) {
\r
1853 port = port.substr( 0, iColon + 1 );
\r
1854 if ( port != previousPort ) {
\r
1856 previousPort = port;
\r
1859 } while ( ports[++nChannels] );
\r
1863 jack_client_close( client );
\r
1867 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1869 RtAudio::DeviceInfo info;
\r
1870 info.probed = false;
\r
1872 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1873 jack_status_t *status = NULL;
\r
1874 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1875 if ( client == 0 ) {
\r
1876 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1877 error( RtError::WARNING );
\r
1881 const char **ports;
\r
1882 std::string port, previousPort;
\r
1883 unsigned int nPorts = 0, nDevices = 0;
\r
1884 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1886 // Parse the port names up to the first colon (:).
\r
1887 size_t iColon = 0;
\r
1889 port = (char *) ports[ nPorts ];
\r
1890 iColon = port.find(":");
\r
1891 if ( iColon != std::string::npos ) {
\r
1892 port = port.substr( 0, iColon );
\r
1893 if ( port != previousPort ) {
\r
1894 if ( nDevices == device ) info.name = port;
\r
1896 previousPort = port;
\r
1899 } while ( ports[++nPorts] );
\r
1903 if ( device >= nDevices ) {
\r
1904 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1905 error( RtError::INVALID_USE );
\r
1908 // Get the current jack server sample rate.
\r
1909 info.sampleRates.clear();
\r
1910 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1912 // Count the available ports containing the client name as device
\r
1913 // channels. Jack "input ports" equal RtAudio output channels.
\r
1914 unsigned int nChannels = 0;
\r
1915 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1917 while ( ports[ nChannels ] ) nChannels++;
\r
1919 info.outputChannels = nChannels;
\r
1922 // Jack "output ports" equal RtAudio input channels.
\r
1924 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1926 while ( ports[ nChannels ] ) nChannels++;
\r
1928 info.inputChannels = nChannels;
\r
1931 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1932 jack_client_close(client);
\r
1933 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1934 error( RtError::WARNING );
\r
1938 // If device opens for both playback and capture, we determine the channels.
\r
1939 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1940 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1942 // Jack always uses 32-bit floats.
\r
1943 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1945 // Jack doesn't provide default devices so we'll use the first available one.
\r
1946 if ( device == 0 && info.outputChannels > 0 )
\r
1947 info.isDefaultOutput = true;
\r
1948 if ( device == 0 && info.inputChannels > 0 )
\r
1949 info.isDefaultInput = true;
\r
1951 jack_client_close(client);
\r
1952 info.probed = true;
\r
1956 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1958 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1960 RtApiJack *object = (RtApiJack *) info->object;
\r
1961 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1966 // This function will be called by a spawned thread when the Jack
\r
1967 // server signals that it is shutting down. It is necessary to handle
\r
1968 // it this way because the jackShutdown() function must return before
\r
1969 // the jack_deactivate() function (in closeStream()) will return.
\r
1970 extern "C" void *jackCloseStream( void *ptr )
\r
1972 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1973 RtApiJack *object = (RtApiJack *) info->object;
\r
1975 object->closeStream();
\r
1977 pthread_exit( NULL );
\r
1979 void jackShutdown( void *infoPointer )
\r
1981 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1982 RtApiJack *object = (RtApiJack *) info->object;
\r
1984 // Check current stream state. If stopped, then we'll assume this
\r
1985 // was called as a result of a call to RtApiJack::stopStream (the
\r
1986 // deactivation of a client handle causes this function to be called).
\r
1987 // If not, we'll assume the Jack server is shutting down or some
\r
1988 // other problem occurred and we should close the stream.
\r
1989 if ( object->isStreamRunning() == false ) return;
\r
1991 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
1992 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
1995 int jackXrun( void *infoPointer )
\r
1997 JackHandle *handle = (JackHandle *) infoPointer;
\r
1999 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2000 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2005 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2006 unsigned int firstChannel, unsigned int sampleRate,
\r
2007 RtAudioFormat format, unsigned int *bufferSize,
\r
2008 RtAudio::StreamOptions *options )
\r
2010 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2012 // Look for jack server and try to become a client (only do once per stream).
\r
2013 jack_client_t *client = 0;
\r
2014 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2015 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2016 jack_status_t *status = NULL;
\r
2017 if ( options && !options->streamName.empty() )
\r
2018 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2020 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2021 if ( client == 0 ) {
\r
2022 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2023 error( RtError::WARNING );
\r
2028 // The handle must have been created on an earlier pass.
\r
2029 client = handle->client;
\r
2032 const char **ports;
\r
2033 std::string port, previousPort, deviceName;
\r
2034 unsigned int nPorts = 0, nDevices = 0;
\r
2035 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2037 // Parse the port names up to the first colon (:).
\r
2038 size_t iColon = 0;
\r
2040 port = (char *) ports[ nPorts ];
\r
2041 iColon = port.find(":");
\r
2042 if ( iColon != std::string::npos ) {
\r
2043 port = port.substr( 0, iColon );
\r
2044 if ( port != previousPort ) {
\r
2045 if ( nDevices == device ) deviceName = port;
\r
2047 previousPort = port;
\r
2050 } while ( ports[++nPorts] );
\r
2054 if ( device >= nDevices ) {
\r
2055 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2059 // Count the available ports containing the client name as device
\r
2060 // channels. Jack "input ports" equal RtAudio output channels.
\r
2061 unsigned int nChannels = 0;
\r
2062 unsigned long flag = JackPortIsInput;
\r
2063 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2064 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2066 while ( ports[ nChannels ] ) nChannels++;
\r
2070 // Compare the jack ports for specified client to the requested number of channels.
\r
2071 if ( nChannels < (channels + firstChannel) ) {
\r
2072 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2073 errorText_ = errorStream_.str();
\r
2077 // Check the jack server sample rate.
\r
2078 unsigned int jackRate = jack_get_sample_rate( client );
\r
2079 if ( sampleRate != jackRate ) {
\r
2080 jack_client_close( client );
\r
2081 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2082 errorText_ = errorStream_.str();
\r
2085 stream_.sampleRate = jackRate;
\r
2087 // Get the latency of the JACK port.
\r
2088 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2089 if ( ports[ firstChannel ] )
\r
2090 stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2093 // The jack server always uses 32-bit floating-point data.
\r
2094 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2095 stream_.userFormat = format;
\r
2097 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2098 else stream_.userInterleaved = true;
\r
2100 // Jack always uses non-interleaved buffers.
\r
2101 stream_.deviceInterleaved[mode] = false;
\r
2103 // Jack always provides host byte-ordered data.
\r
2104 stream_.doByteSwap[mode] = false;
\r
2106 // Get the buffer size. The buffer size and number of buffers
\r
2107 // (periods) is set when the jack server is started.
\r
2108 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2109 *bufferSize = stream_.bufferSize;
\r
2111 stream_.nDeviceChannels[mode] = channels;
\r
2112 stream_.nUserChannels[mode] = channels;
\r
2114 // Set flags for buffer conversion.
\r
2115 stream_.doConvertBuffer[mode] = false;
\r
2116 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2117 stream_.doConvertBuffer[mode] = true;
\r
2118 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2119 stream_.nUserChannels[mode] > 1 )
\r
2120 stream_.doConvertBuffer[mode] = true;
\r
2122 // Allocate our JackHandle structure for the stream.
\r
2123 if ( handle == 0 ) {
\r
2125 handle = new JackHandle;
\r
2127 catch ( std::bad_alloc& ) {
\r
2128 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2132 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2133 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2136 stream_.apiHandle = (void *) handle;
\r
2137 handle->client = client;
\r
2139 handle->deviceName[mode] = deviceName;
\r
2141 // Allocate necessary internal buffers.
\r
2142 unsigned long bufferBytes;
\r
2143 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2144 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2145 if ( stream_.userBuffer[mode] == NULL ) {
\r
2146 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2150 if ( stream_.doConvertBuffer[mode] ) {
\r
2152 bool makeBuffer = true;
\r
2153 if ( mode == OUTPUT )
\r
2154 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2155 else { // mode == INPUT
\r
2156 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2157 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2158 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2159 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2163 if ( makeBuffer ) {
\r
2164 bufferBytes *= *bufferSize;
\r
2165 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2166 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2167 if ( stream_.deviceBuffer == NULL ) {
\r
2168 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2174 // Allocate memory for the Jack ports (channels) identifiers.
\r
2175 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2176 if ( handle->ports[mode] == NULL ) {
\r
2177 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2181 stream_.device[mode] = device;
\r
2182 stream_.channelOffset[mode] = firstChannel;
\r
2183 stream_.state = STREAM_STOPPED;
\r
2184 stream_.callbackInfo.object = (void *) this;
\r
2186 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2187 // We had already set up the stream for output.
\r
2188 stream_.mode = DUPLEX;
\r
2190 stream_.mode = mode;
\r
2191 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2192 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2193 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2196 // Register our ports.
\r
2198 if ( mode == OUTPUT ) {
\r
2199 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2200 snprintf( label, 64, "outport %d", i );
\r
2201 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2202 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2206 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2207 snprintf( label, 64, "inport %d", i );
\r
2208 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2209 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2213 // Setup the buffer conversion information structure. We don't use
\r
2214 // buffers to do channel offsets, so we override that parameter
\r
2216 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2222 pthread_cond_destroy( &handle->condition );
\r
2223 jack_client_close( handle->client );
\r
2225 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2226 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2229 stream_.apiHandle = 0;
\r
2232 for ( int i=0; i<2; i++ ) {
\r
2233 if ( stream_.userBuffer[i] ) {
\r
2234 free( stream_.userBuffer[i] );
\r
2235 stream_.userBuffer[i] = 0;
\r
2239 if ( stream_.deviceBuffer ) {
\r
2240 free( stream_.deviceBuffer );
\r
2241 stream_.deviceBuffer = 0;
\r
2247 void RtApiJack :: closeStream( void )
\r
2249 if ( stream_.state == STREAM_CLOSED ) {
\r
2250 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2251 error( RtError::WARNING );
\r
2255 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2258 if ( stream_.state == STREAM_RUNNING )
\r
2259 jack_deactivate( handle->client );
\r
2261 jack_client_close( handle->client );
\r
2265 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2266 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2267 pthread_cond_destroy( &handle->condition );
\r
2269 stream_.apiHandle = 0;
\r
2272 for ( int i=0; i<2; i++ ) {
\r
2273 if ( stream_.userBuffer[i] ) {
\r
2274 free( stream_.userBuffer[i] );
\r
2275 stream_.userBuffer[i] = 0;
\r
2279 if ( stream_.deviceBuffer ) {
\r
2280 free( stream_.deviceBuffer );
\r
2281 stream_.deviceBuffer = 0;
\r
2284 stream_.mode = UNINITIALIZED;
\r
2285 stream_.state = STREAM_CLOSED;
\r
2288 void RtApiJack :: startStream( void )
\r
2291 if ( stream_.state == STREAM_RUNNING ) {
\r
2292 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2293 error( RtError::WARNING );
\r
2297 MUTEX_LOCK(&stream_.mutex);
\r
2299 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2300 int result = jack_activate( handle->client );
\r
2302 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2306 const char **ports;
\r
2308 // Get the list of available ports.
\r
2309 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2311 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2312 if ( ports == NULL) {
\r
2313 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2317 // Now make the port connections. Since RtAudio wasn't designed to
\r
2318 // allow the user to select particular channels of a device, we'll
\r
2319 // just open the first "nChannels" ports with offset.
\r
2320 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2322 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2323 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2326 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2333 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2335 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2336 if ( ports == NULL) {
\r
2337 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2341 // Now make the port connections. See note above.
\r
2342 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2344 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2345 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2348 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2355 handle->drainCounter = 0;
\r
2356 handle->internalDrain = false;
\r
2357 stream_.state = STREAM_RUNNING;
\r
2360 MUTEX_UNLOCK(&stream_.mutex);
\r
2362 if ( result == 0 ) return;
\r
2363 error( RtError::SYSTEM_ERROR );
\r
2366 void RtApiJack :: stopStream( void )
\r
2369 if ( stream_.state == STREAM_STOPPED ) {
\r
2370 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2371 error( RtError::WARNING );
\r
2375 MUTEX_LOCK( &stream_.mutex );
\r
2377 if ( stream_.state == STREAM_STOPPED ) {
\r
2378 MUTEX_UNLOCK( &stream_.mutex );
\r
2382 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2383 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2385 if ( handle->drainCounter == 0 ) {
\r
2386 handle->drainCounter = 2;
\r
2387 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2391 jack_deactivate( handle->client );
\r
2392 stream_.state = STREAM_STOPPED;
\r
2394 MUTEX_UNLOCK( &stream_.mutex );
\r
2397 void RtApiJack :: abortStream( void )
\r
2400 if ( stream_.state == STREAM_STOPPED ) {
\r
2401 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2402 error( RtError::WARNING );
\r
2406 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2407 handle->drainCounter = 2;
\r
2412 // This function will be called by a spawned thread when the user
\r
2413 // callback function signals that the stream should be stopped or
\r
2414 // aborted. It is necessary to handle it this way because the
\r
2415 // callbackEvent() function must return before the jack_deactivate()
\r
2416 // function will return.
\r
2417 extern "C" void *jackStopStream( void *ptr )
\r
2419 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2420 RtApiJack *object = (RtApiJack *) info->object;
\r
2422 object->stopStream();
\r
2424 pthread_exit( NULL );
\r
2427 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2429 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
2430 if ( stream_.state == STREAM_CLOSED ) {
\r
2431 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2432 error( RtError::WARNING );
\r
2435 if ( stream_.bufferSize != nframes ) {
\r
2436 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2437 error( RtError::WARNING );
\r
2441 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2442 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2444 // Check if we were draining the stream and signal is finished.
\r
2445 if ( handle->drainCounter > 3 ) {
\r
2446 if ( handle->internalDrain == true )
\r
2447 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2449 pthread_cond_signal( &handle->condition );
\r
2453 MUTEX_LOCK( &stream_.mutex );
\r
2455 // The state might change while waiting on a mutex.
\r
2456 if ( stream_.state == STREAM_STOPPED ) {
\r
2457 MUTEX_UNLOCK( &stream_.mutex );
\r
2461 // Invoke user callback first, to get fresh output data.
\r
2462 if ( handle->drainCounter == 0 ) {
\r
2463 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2464 double streamTime = getStreamTime();
\r
2465 RtAudioStreamStatus status = 0;
\r
2466 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2467 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2468 handle->xrun[0] = false;
\r
2470 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2471 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2472 handle->xrun[1] = false;
\r
2474 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2475 stream_.bufferSize, streamTime, status, info->userData );
\r
2476 if ( handle->drainCounter == 2 ) {
\r
2477 MUTEX_UNLOCK( &stream_.mutex );
\r
2479 pthread_create( &id, NULL, jackStopStream, info );
\r
2482 else if ( handle->drainCounter == 1 )
\r
2483 handle->internalDrain = true;
\r
2486 jack_default_audio_sample_t *jackbuffer;
\r
2487 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2490 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2492 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2493 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2494 memset( jackbuffer, 0, bufferBytes );
\r
2498 else if ( stream_.doConvertBuffer[0] ) {
\r
2500 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2502 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2503 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2504 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2507 else { // no buffer conversion
\r
2508 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2509 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2510 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2514 if ( handle->drainCounter ) {
\r
2515 handle->drainCounter++;
\r
2520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2522 if ( stream_.doConvertBuffer[1] ) {
\r
2523 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2524 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2525 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2527 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2529 else { // no buffer conversion
\r
2530 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2531 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2532 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2538 MUTEX_UNLOCK(&stream_.mutex);
\r
2540 RtApi::tickStreamTime();
\r
2543 //******************** End of __UNIX_JACK__ *********************//
\r
2546 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2548 // The ASIO API is designed around a callback scheme, so this
\r
2549 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2550 // Jack. The primary constraint with ASIO is that it only allows
\r
2551 // access to a single driver at a time. Thus, it is not possible to
\r
2552 // have more than one simultaneous RtAudio stream.
\r
2554 // This implementation also requires a number of external ASIO files
\r
2555 // and a few global variables. The ASIO callback scheme does not
\r
2556 // allow for the passing of user data, so we must create a global
\r
2557 // pointer to our callbackInfo structure.
\r
2559 // On unix systems, we make use of a pthread condition variable.
\r
2560 // Since there is no equivalent in Windows, I hacked something based
\r
2561 // on information found in
\r
2562 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2564 #include "asiosys.h"
\r
2566 #include "iasiothiscallresolver.h"
\r
2567 #include "asiodrivers.h"
\r
2570 AsioDrivers drivers;
\r
2571 ASIOCallbacks asioCallbacks;
\r
2572 ASIODriverInfo driverInfo;
\r
2573 CallbackInfo *asioCallbackInfo;
\r
2576 struct AsioHandle {
\r
2577 int drainCounter; // Tracks callback counts when draining
\r
2578 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2579 ASIOBufferInfo *bufferInfos;
\r
2583 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2586 // Function declarations (definitions at end of section)
\r
2587 static const char* getAsioErrorString( ASIOError result );
\r
2588 void sampleRateChanged( ASIOSampleRate sRate );
\r
2589 long asioMessages( long selector, long value, void* message, double* opt );
\r
2591 RtApiAsio :: RtApiAsio()
\r
2593 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2594 // CoInitialize beforehand, but it must be for appartment threading
\r
2595 // (in which case, CoInitilialize will return S_FALSE here).
\r
2596 coInitialized_ = false;
\r
2597 HRESULT hr = CoInitialize( NULL );
\r
2598 if ( FAILED(hr) ) {
\r
2599 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2600 error( RtError::WARNING );
\r
2602 coInitialized_ = true;
\r
2604 drivers.removeCurrentDriver();
\r
2605 driverInfo.asioVersion = 2;
\r
2607 // See note in DirectSound implementation about GetDesktopWindow().
\r
2608 driverInfo.sysRef = GetForegroundWindow();
\r
2611 RtApiAsio :: ~RtApiAsio()
\r
2613 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2614 if ( coInitialized_ ) CoUninitialize();
\r
2617 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2619 return (unsigned int) drivers.asioGetNumDev();
\r
2622 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2624 RtAudio::DeviceInfo info;
\r
2625 info.probed = false;
\r
2628 unsigned int nDevices = getDeviceCount();
\r
2629 if ( nDevices == 0 ) {
\r
2630 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2631 error( RtError::INVALID_USE );
\r
2634 if ( device >= nDevices ) {
\r
2635 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2636 error( RtError::INVALID_USE );
\r
2639 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2640 if ( stream_.state != STREAM_CLOSED ) {
\r
2641 if ( device >= devices_.size() ) {
\r
2642 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2643 error( RtError::WARNING );
\r
2646 return devices_[ device ];
\r
2649 char driverName[32];
\r
2650 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2651 if ( result != ASE_OK ) {
\r
2652 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2653 errorText_ = errorStream_.str();
\r
2654 error( RtError::WARNING );
\r
2658 info.name = driverName;
\r
2660 if ( !drivers.loadDriver( driverName ) ) {
\r
2661 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2662 errorText_ = errorStream_.str();
\r
2663 error( RtError::WARNING );
\r
2667 result = ASIOInit( &driverInfo );
\r
2668 if ( result != ASE_OK ) {
\r
2669 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2670 errorText_ = errorStream_.str();
\r
2671 error( RtError::WARNING );
\r
2675 // Determine the device channel information.
\r
2676 long inputChannels, outputChannels;
\r
2677 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2678 if ( result != ASE_OK ) {
\r
2679 drivers.removeCurrentDriver();
\r
2680 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2681 errorText_ = errorStream_.str();
\r
2682 error( RtError::WARNING );
\r
2686 info.outputChannels = outputChannels;
\r
2687 info.inputChannels = inputChannels;
\r
2688 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2689 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2691 // Determine the supported sample rates.
\r
2692 info.sampleRates.clear();
\r
2693 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2694 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2695 if ( result == ASE_OK )
\r
2696 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2699 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2700 ASIOChannelInfo channelInfo;
\r
2701 channelInfo.channel = 0;
\r
2702 channelInfo.isInput = true;
\r
2703 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2704 result = ASIOGetChannelInfo( &channelInfo );
\r
2705 if ( result != ASE_OK ) {
\r
2706 drivers.removeCurrentDriver();
\r
2707 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2708 errorText_ = errorStream_.str();
\r
2709 error( RtError::WARNING );
\r
2713 info.nativeFormats = 0;
\r
2714 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2715 info.nativeFormats |= RTAUDIO_SINT16;
\r
2716 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2717 info.nativeFormats |= RTAUDIO_SINT32;
\r
2718 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2719 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2720 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2721 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2723 if ( info.outputChannels > 0 )
\r
2724 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2725 if ( info.inputChannels > 0 )
\r
2726 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2728 info.probed = true;
\r
2729 drivers.removeCurrentDriver();
\r
2733 void bufferSwitch( long index, ASIOBool processNow )
\r
2735 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2736 object->callbackEvent( index );
\r
2739 void RtApiAsio :: saveDeviceInfo( void )
\r
2743 unsigned int nDevices = getDeviceCount();
\r
2744 devices_.resize( nDevices );
\r
2745 for ( unsigned int i=0; i<nDevices; i++ )
\r
2746 devices_[i] = getDeviceInfo( i );
\r
2749 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2750 unsigned int firstChannel, unsigned int sampleRate,
\r
2751 RtAudioFormat format, unsigned int *bufferSize,
\r
2752 RtAudio::StreamOptions *options )
\r
2754 // For ASIO, a duplex stream MUST use the same driver.
\r
2755 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2756 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2760 char driverName[32];
\r
2761 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2762 if ( result != ASE_OK ) {
\r
2763 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2764 errorText_ = errorStream_.str();
\r
2768 // Only load the driver once for duplex stream.
\r
2769 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2770 // The getDeviceInfo() function will not work when a stream is open
\r
2771 // because ASIO does not allow multiple devices to run at the same
\r
2772 // time. Thus, we'll probe the system before opening a stream and
\r
2773 // save the results for use by getDeviceInfo().
\r
2774 this->saveDeviceInfo();
\r
2776 if ( !drivers.loadDriver( driverName ) ) {
\r
2777 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2782 result = ASIOInit( &driverInfo );
\r
2783 if ( result != ASE_OK ) {
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2790 // Check the device channel count.
\r
2791 long inputChannels, outputChannels;
\r
2792 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2793 if ( result != ASE_OK ) {
\r
2794 drivers.removeCurrentDriver();
\r
2795 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2800 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2801 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2802 drivers.removeCurrentDriver();
\r
2803 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2804 errorText_ = errorStream_.str();
\r
2807 stream_.nDeviceChannels[mode] = channels;
\r
2808 stream_.nUserChannels[mode] = channels;
\r
2809 stream_.channelOffset[mode] = firstChannel;
\r
2811 // Verify the sample rate is supported.
\r
2812 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2813 if ( result != ASE_OK ) {
\r
2814 drivers.removeCurrentDriver();
\r
2815 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2816 errorText_ = errorStream_.str();
\r
2820 // Get the current sample rate
\r
2821 ASIOSampleRate currentRate;
\r
2822 result = ASIOGetSampleRate( ¤tRate );
\r
2823 if ( result != ASE_OK ) {
\r
2824 drivers.removeCurrentDriver();
\r
2825 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2826 errorText_ = errorStream_.str();
\r
2830 // Set the sample rate only if necessary
\r
2831 if ( currentRate != sampleRate ) {
\r
2832 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2841 // Determine the driver data type.
\r
2842 ASIOChannelInfo channelInfo;
\r
2843 channelInfo.channel = 0;
\r
2844 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2845 else channelInfo.isInput = true;
\r
2846 result = ASIOGetChannelInfo( &channelInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 drivers.removeCurrentDriver();
\r
2849 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2850 errorText_ = errorStream_.str();
\r
2854 // Assuming WINDOWS host is always little-endian.
\r
2855 stream_.doByteSwap[mode] = false;
\r
2856 stream_.userFormat = format;
\r
2857 stream_.deviceFormat[mode] = 0;
\r
2858 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2859 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2860 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2862 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2863 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2864 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2866 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2867 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2868 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2870 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2871 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2872 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2875 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2876 drivers.removeCurrentDriver();
\r
2877 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2878 errorText_ = errorStream_.str();
\r
2882 // Set the buffer size. For a duplex stream, this will end up
\r
2883 // setting the buffer size based on the input constraints, which
\r
2885 long minSize, maxSize, preferSize, granularity;
\r
2886 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2890 errorText_ = errorStream_.str();
\r
2894 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2895 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2896 else if ( granularity == -1 ) {
\r
2897 // Make sure bufferSize is a power of two.
\r
2898 int log2_of_min_size = 0;
\r
2899 int log2_of_max_size = 0;
\r
2901 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2902 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2903 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2906 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2907 int min_delta_num = log2_of_min_size;
\r
2909 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2910 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2911 if (current_delta < min_delta) {
\r
2912 min_delta = current_delta;
\r
2913 min_delta_num = i;
\r
2917 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2918 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2919 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2921 else if ( granularity != 0 ) {
\r
2922 // Set to an even multiple of granularity, rounding up.
\r
2923 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2926 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2927 drivers.removeCurrentDriver();
\r
2928 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2932 stream_.bufferSize = *bufferSize;
\r
2933 stream_.nBuffers = 2;
\r
2935 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2936 else stream_.userInterleaved = true;
\r
2938 // ASIO always uses non-interleaved buffers.
\r
2939 stream_.deviceInterleaved[mode] = false;
\r
2941 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2942 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2943 if ( handle == 0 ) {
\r
2945 handle = new AsioHandle;
\r
2947 catch ( std::bad_alloc& ) {
\r
2948 //if ( handle == NULL ) {
\r
2949 drivers.removeCurrentDriver();
\r
2950 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2953 handle->bufferInfos = 0;
\r
2955 // Create a manual-reset event.
\r
2956 handle->condition = CreateEvent( NULL, // no security
\r
2957 TRUE, // manual-reset
\r
2958 FALSE, // non-signaled initially
\r
2959 NULL ); // unnamed
\r
2960 stream_.apiHandle = (void *) handle;
\r
2963 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2964 // and output separately, we'll have to dispose of previously
\r
2965 // created output buffers for a duplex stream.
\r
2966 long inputLatency, outputLatency;
\r
2967 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2968 ASIODisposeBuffers();
\r
2969 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2972 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2973 bool buffersAllocated = false;
\r
2974 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2975 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2976 if ( handle->bufferInfos == NULL ) {
\r
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2978 errorText_ = errorStream_.str();
\r
2982 ASIOBufferInfo *infos;
\r
2983 infos = handle->bufferInfos;
\r
2984 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2985 infos->isInput = ASIOFalse;
\r
2986 infos->channelNum = i + stream_.channelOffset[0];
\r
2987 infos->buffers[0] = infos->buffers[1] = 0;
\r
2989 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2990 infos->isInput = ASIOTrue;
\r
2991 infos->channelNum = i + stream_.channelOffset[1];
\r
2992 infos->buffers[0] = infos->buffers[1] = 0;
\r
2995 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
2996 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
2997 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
2998 asioCallbacks.asioMessage = &asioMessages;
\r
2999 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3000 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3001 if ( result != ASE_OK ) {
\r
3002 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3003 errorText_ = errorStream_.str();
\r
3006 buffersAllocated = true;
\r
3008 // Set flags for buffer conversion.
\r
3009 stream_.doConvertBuffer[mode] = false;
\r
3010 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3011 stream_.doConvertBuffer[mode] = true;
\r
3012 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3013 stream_.nUserChannels[mode] > 1 )
\r
3014 stream_.doConvertBuffer[mode] = true;
\r
3016 // Allocate necessary internal buffers
\r
3017 unsigned long bufferBytes;
\r
3018 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3019 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3020 if ( stream_.userBuffer[mode] == NULL ) {
\r
3021 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3025 if ( stream_.doConvertBuffer[mode] ) {
\r
3027 bool makeBuffer = true;
\r
3028 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3029 if ( mode == INPUT ) {
\r
3030 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3031 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3032 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3036 if ( makeBuffer ) {
\r
3037 bufferBytes *= *bufferSize;
\r
3038 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3039 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3040 if ( stream_.deviceBuffer == NULL ) {
\r
3041 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3047 stream_.sampleRate = sampleRate;
\r
3048 stream_.device[mode] = device;
\r
3049 stream_.state = STREAM_STOPPED;
\r
3050 asioCallbackInfo = &stream_.callbackInfo;
\r
3051 stream_.callbackInfo.object = (void *) this;
\r
3052 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3053 // We had already set up an output stream.
\r
3054 stream_.mode = DUPLEX;
\r
3056 stream_.mode = mode;
\r
3058 // Determine device latencies
\r
3059 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3060 if ( result != ASE_OK ) {
\r
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3062 errorText_ = errorStream_.str();
\r
3063 error( RtError::WARNING); // warn but don't fail
\r
3066 stream_.latency[0] = outputLatency;
\r
3067 stream_.latency[1] = inputLatency;
\r
3070 // Setup the buffer conversion information structure. We don't use
\r
3071 // buffers to do channel offsets, so we override that parameter
\r
3073 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3078 if ( buffersAllocated )
\r
3079 ASIODisposeBuffers();
\r
3080 drivers.removeCurrentDriver();
\r
3083 CloseHandle( handle->condition );
\r
3084 if ( handle->bufferInfos )
\r
3085 free( handle->bufferInfos );
\r
3087 stream_.apiHandle = 0;
\r
3090 for ( int i=0; i<2; i++ ) {
\r
3091 if ( stream_.userBuffer[i] ) {
\r
3092 free( stream_.userBuffer[i] );
\r
3093 stream_.userBuffer[i] = 0;
\r
3097 if ( stream_.deviceBuffer ) {
\r
3098 free( stream_.deviceBuffer );
\r
3099 stream_.deviceBuffer = 0;
\r
3105 void RtApiAsio :: closeStream()
\r
3107 if ( stream_.state == STREAM_CLOSED ) {
\r
3108 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3109 error( RtError::WARNING );
\r
3113 if ( stream_.state == STREAM_RUNNING ) {
\r
3114 stream_.state = STREAM_STOPPED;
\r
3117 ASIODisposeBuffers();
\r
3118 drivers.removeCurrentDriver();
\r
3120 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3122 CloseHandle( handle->condition );
\r
3123 if ( handle->bufferInfos )
\r
3124 free( handle->bufferInfos );
\r
3126 stream_.apiHandle = 0;
\r
3129 for ( int i=0; i<2; i++ ) {
\r
3130 if ( stream_.userBuffer[i] ) {
\r
3131 free( stream_.userBuffer[i] );
\r
3132 stream_.userBuffer[i] = 0;
\r
3136 if ( stream_.deviceBuffer ) {
\r
3137 free( stream_.deviceBuffer );
\r
3138 stream_.deviceBuffer = 0;
\r
3141 stream_.mode = UNINITIALIZED;
\r
3142 stream_.state = STREAM_CLOSED;
\r
3145 bool stopThreadCalled = false;
\r
3147 void RtApiAsio :: startStream()
\r
3150 if ( stream_.state == STREAM_RUNNING ) {
\r
3151 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3152 error( RtError::WARNING );
\r
3156 //MUTEX_LOCK( &stream_.mutex );
\r
3158 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3159 ASIOError result = ASIOStart();
\r
3160 if ( result != ASE_OK ) {
\r
3161 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3162 errorText_ = errorStream_.str();
\r
3166 handle->drainCounter = 0;
\r
3167 handle->internalDrain = false;
\r
3168 ResetEvent( handle->condition );
\r
3169 stream_.state = STREAM_RUNNING;
\r
3173 //MUTEX_UNLOCK( &stream_.mutex );
\r
3175 stopThreadCalled = false;
\r
3177 if ( result == ASE_OK ) return;
\r
3178 error( RtError::SYSTEM_ERROR );
\r
3181 void RtApiAsio :: stopStream()
\r
3184 if ( stream_.state == STREAM_STOPPED ) {
\r
3185 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3186 error( RtError::WARNING );
\r
3191 MUTEX_LOCK( &stream_.mutex );
\r
3193 if ( stream_.state == STREAM_STOPPED ) {
\r
3194 MUTEX_UNLOCK( &stream_.mutex );
\r
3199 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3200 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3201 if ( handle->drainCounter == 0 ) {
\r
3202 handle->drainCounter = 2;
\r
3203 // MUTEX_UNLOCK( &stream_.mutex );
\r
3204 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3205 //ResetEvent( handle->condition );
\r
3206 // MUTEX_LOCK( &stream_.mutex );
\r
3210 stream_.state = STREAM_STOPPED;
\r
3212 ASIOError result = ASIOStop();
\r
3213 if ( result != ASE_OK ) {
\r
3214 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3215 errorText_ = errorStream_.str();
\r
3218 // MUTEX_UNLOCK( &stream_.mutex );
\r
3220 if ( result == ASE_OK ) return;
\r
3221 error( RtError::SYSTEM_ERROR );
\r
3224 void RtApiAsio :: abortStream()
\r
3227 if ( stream_.state == STREAM_STOPPED ) {
\r
3228 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3229 error( RtError::WARNING );
\r
3233 // The following lines were commented-out because some behavior was
\r
3234 // noted where the device buffers need to be zeroed to avoid
\r
3235 // continuing sound, even when the device buffers are completely
\r
3236 // disposed. So now, calling abort is the same as calling stop.
\r
3237 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3238 // handle->drainCounter = 2;
\r
3242 // This function will be called by a spawned thread when the user
\r
3243 // callback function signals that the stream should be stopped or
\r
3244 // aborted. It is necessary to handle it this way because the
\r
3245 // callbackEvent() function must return before the ASIOStop()
\r
3246 // function will return.
\r
3247 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3249 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3250 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3252 object->stopStream();
\r
3254 _endthreadex( 0 );
\r
3258 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3260 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
3261 if ( stopThreadCalled ) return SUCCESS;
\r
3262 if ( stream_.state == STREAM_CLOSED ) {
\r
3263 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3264 error( RtError::WARNING );
\r
3268 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3269 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3271 // Check if we were draining the stream and signal if finished.
\r
3272 if ( handle->drainCounter > 3 ) {
\r
3273 if ( handle->internalDrain == false )
\r
3274 SetEvent( handle->condition );
\r
3275 else { // spawn a thread to stop the stream
\r
3276 unsigned threadId;
\r
3277 stopThreadCalled = true;
\r
3278 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3279 &stream_.callbackInfo, 0, &threadId );
\r
3284 /*MUTEX_LOCK( &stream_.mutex );
\r
3286 // The state might change while waiting on a mutex.
\r
3287 if ( stream_.state == STREAM_STOPPED ) goto unlock; */
\r
3289 // Invoke user callback to get fresh output data UNLESS we are
\r
3290 // draining stream.
\r
3291 if ( handle->drainCounter == 0 ) {
\r
3292 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3293 double streamTime = getStreamTime();
\r
3294 RtAudioStreamStatus status = 0;
\r
3295 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3296 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3299 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3300 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3303 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3304 stream_.bufferSize, streamTime, status, info->userData );
\r
3305 if ( handle->drainCounter == 2 ) {
\r
3306 // MUTEX_UNLOCK( &stream_.mutex );
\r
3308 unsigned threadId;
\r
3309 stopThreadCalled = true;
\r
3310 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3311 &stream_.callbackInfo, 0, &threadId );
\r
3314 else if ( handle->drainCounter == 1 )
\r
3315 handle->internalDrain = true;
\r
3318 unsigned int nChannels, bufferBytes, i, j;
\r
3319 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3320 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3322 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3324 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3326 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3327 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3328 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3332 else if ( stream_.doConvertBuffer[0] ) {
\r
3334 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3335 if ( stream_.doByteSwap[0] )
\r
3336 byteSwapBuffer( stream_.deviceBuffer,
\r
3337 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3338 stream_.deviceFormat[0] );
\r
3340 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3341 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3342 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3343 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3349 if ( stream_.doByteSwap[0] )
\r
3350 byteSwapBuffer( stream_.userBuffer[0],
\r
3351 stream_.bufferSize * stream_.nUserChannels[0],
\r
3352 stream_.userFormat );
\r
3354 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3355 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3356 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3357 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3362 if ( handle->drainCounter ) {
\r
3363 handle->drainCounter++;
\r
3368 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3370 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3372 if (stream_.doConvertBuffer[1]) {
\r
3374 // Always interleave ASIO input data.
\r
3375 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3376 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3377 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3378 handle->bufferInfos[i].buffers[bufferIndex],
\r
3382 if ( stream_.doByteSwap[1] )
\r
3383 byteSwapBuffer( stream_.deviceBuffer,
\r
3384 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3385 stream_.deviceFormat[1] );
\r
3386 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3390 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3391 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3392 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3393 handle->bufferInfos[i].buffers[bufferIndex],
\r
3398 if ( stream_.doByteSwap[1] )
\r
3399 byteSwapBuffer( stream_.userBuffer[1],
\r
3400 stream_.bufferSize * stream_.nUserChannels[1],
\r
3401 stream_.userFormat );
\r
3406 // The following call was suggested by Malte Clasen. While the API
\r
3407 // documentation indicates it should not be required, some device
\r
3408 // drivers apparently do not function correctly without it.
\r
3409 ASIOOutputReady();
\r
3411 // MUTEX_UNLOCK( &stream_.mutex );
\r
3413 RtApi::tickStreamTime();
\r
3417 void sampleRateChanged( ASIOSampleRate sRate )
\r
3419 // The ASIO documentation says that this usually only happens during
\r
3420 // external sync. Audio processing is not stopped by the driver,
\r
3421 // actual sample rate might not have even changed, maybe only the
\r
3422 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3425 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3427 object->stopStream();
\r
3429 catch ( RtError &exception ) {
\r
3430 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3434 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3437 long asioMessages( long selector, long value, void* message, double* opt )
\r
3441 switch( selector ) {
\r
3442 case kAsioSelectorSupported:
\r
3443 if ( value == kAsioResetRequest
\r
3444 || value == kAsioEngineVersion
\r
3445 || value == kAsioResyncRequest
\r
3446 || value == kAsioLatenciesChanged
\r
3447 // The following three were added for ASIO 2.0, you don't
\r
3448 // necessarily have to support them.
\r
3449 || value == kAsioSupportsTimeInfo
\r
3450 || value == kAsioSupportsTimeCode
\r
3451 || value == kAsioSupportsInputMonitor)
\r
3454 case kAsioResetRequest:
\r
3455 // Defer the task and perform the reset of the driver during the
\r
3456 // next "safe" situation. You cannot reset the driver right now,
\r
3457 // as this code is called from the driver. Reset the driver is
\r
3458 // done by completely destruct is. I.e. ASIOStop(),
\r
3459 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3461 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3464 case kAsioResyncRequest:
\r
3465 // This informs the application that the driver encountered some
\r
3466 // non-fatal data loss. It is used for synchronization purposes
\r
3467 // of different media. Added mainly to work around the Win16Mutex
\r
3468 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3469 // which could lose data because the Mutex was held too long by
\r
3470 // another thread. However a driver can issue it in other
\r
3471 // situations, too.
\r
3472 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3476 case kAsioLatenciesChanged:
\r
3477 // This will inform the host application that the drivers were
\r
3478 // latencies changed. Beware, it this does not mean that the
\r
3479 // buffer sizes have changed! You might need to update internal
\r
3481 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3484 case kAsioEngineVersion:
\r
3485 // Return the supported ASIO version of the host application. If
\r
3486 // a host application does not implement this selector, ASIO 1.0
\r
3487 // is assumed by the driver.
\r
3490 case kAsioSupportsTimeInfo:
\r
3491 // Informs the driver whether the
\r
3492 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3493 // For compatibility with ASIO 1.0 drivers the host application
\r
3494 // should always support the "old" bufferSwitch method, too.
\r
3497 case kAsioSupportsTimeCode:
\r
3498 // Informs the driver whether application is interested in time
\r
3499 // code info. If an application does not need to know about time
\r
3500 // code, the driver has less work to do.
\r
3507 static const char* getAsioErrorString( ASIOError result )
\r
3512 const char*message;
\r
3515 static Messages m[] =
\r
3517 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3518 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3519 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3520 { ASE_InvalidMode, "Invalid mode." },
\r
3521 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3522 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3523 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3526 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3527 if ( m[i].value == result ) return m[i].message;
\r
3529 return "Unknown error.";
\r
3531 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3535 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3537 // Modified by Robin Davies, October 2005
\r
3538 // - Improvements to DirectX pointer chasing.
\r
3539 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3540 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3541 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3542 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3544 #include <dsound.h>
\r
3545 #include <assert.h>
\r
3546 #include <algorithm>
\r
3548 #if defined(__MINGW32__)
\r
3549 // missing from latest mingw winapi
\r
3550 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3551 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3552 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3553 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3556 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3558 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3559 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3562 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3564 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3565 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3566 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3567 return pointer >= earlierPointer && pointer < laterPointer;
\r
3570 // A structure to hold various information related to the DirectSound
\r
3571 // API implementation.
\r
3573 unsigned int drainCounter; // Tracks callback counts when draining
\r
3574 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3578 UINT bufferPointer[2];
\r
3579 DWORD dsBufferSize[2];
\r
3580 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3584 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3587 // Declarations for utility functions, callbacks, and structures
\r
3588 // specific to the DirectSound implementation.
\r
3589 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3590 LPCTSTR description,
\r
3592 LPVOID lpContext );
\r
3594 static const char* getErrorString( int code );
\r
3596 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3605 : found(false) { validId[0] = false; validId[1] = false; }
\r
3608 std::vector< DsDevice > dsDevices;
\r
3610 RtApiDs :: RtApiDs()
\r
3612 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3613 // accept whatever the mainline chose for a threading model.
\r
3614 coInitialized_ = false;
\r
3615 HRESULT hr = CoInitialize( NULL );
\r
3616 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3619 RtApiDs :: ~RtApiDs()
\r
3621 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3622 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3625 // The DirectSound default output is always the first device.
\r
3626 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3631 // The DirectSound default input is always the first input device,
\r
3632 // which is the first capture device enumerated.
\r
3633 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3638 unsigned int RtApiDs :: getDeviceCount( void )
\r
3640 // Set query flag for previously found devices to false, so that we
\r
3641 // can check for any devices that have disappeared.
\r
3642 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3643 dsDevices[i].found = false;
\r
3645 // Query DirectSound devices.
\r
3646 bool isInput = false;
\r
3647 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3648 if ( FAILED( result ) ) {
\r
3649 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3650 errorText_ = errorStream_.str();
\r
3651 error( RtError::WARNING );
\r
3654 // Query DirectSoundCapture devices.
\r
3656 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3657 if ( FAILED( result ) ) {
\r
3658 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3659 errorText_ = errorStream_.str();
\r
3660 error( RtError::WARNING );
\r
3663 // Clean out any devices that may have disappeared.
\r
3664 std::vector< DsDevice > :: iterator it;
\r
3665 for ( it=dsDevices.begin(); it < dsDevices.end(); it++ )
\r
3666 if ( it->found == false ) dsDevices.erase( it );
\r
3668 return dsDevices.size();
\r
3671 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3673 RtAudio::DeviceInfo info;
\r
3674 info.probed = false;
\r
3676 if ( dsDevices.size() == 0 ) {
\r
3677 // Force a query of all devices
\r
3679 if ( dsDevices.size() == 0 ) {
\r
3680 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3681 error( RtError::INVALID_USE );
\r
3685 if ( device >= dsDevices.size() ) {
\r
3686 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3687 error( RtError::INVALID_USE );
\r
3691 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3693 LPDIRECTSOUND output;
\r
3695 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3696 if ( FAILED( result ) ) {
\r
3697 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3698 errorText_ = errorStream_.str();
\r
3699 error( RtError::WARNING );
\r
3703 outCaps.dwSize = sizeof( outCaps );
\r
3704 result = output->GetCaps( &outCaps );
\r
3705 if ( FAILED( result ) ) {
\r
3706 output->Release();
\r
3707 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3708 errorText_ = errorStream_.str();
\r
3709 error( RtError::WARNING );
\r
3713 // Get output channel information.
\r
3714 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3716 // Get sample rate information.
\r
3717 info.sampleRates.clear();
\r
3718 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3719 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3720 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3721 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3724 // Get format information.
\r
3725 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3726 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3728 output->Release();
\r
3730 if ( getDefaultOutputDevice() == device )
\r
3731 info.isDefaultOutput = true;
\r
3733 if ( dsDevices[ device ].validId[1] == false ) {
\r
3734 info.name = dsDevices[ device ].name;
\r
3735 info.probed = true;
\r
3741 LPDIRECTSOUNDCAPTURE input;
\r
3742 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3743 if ( FAILED( result ) ) {
\r
3744 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3745 errorText_ = errorStream_.str();
\r
3746 error( RtError::WARNING );
\r
3751 inCaps.dwSize = sizeof( inCaps );
\r
3752 result = input->GetCaps( &inCaps );
\r
3753 if ( FAILED( result ) ) {
\r
3755 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3756 errorText_ = errorStream_.str();
\r
3757 error( RtError::WARNING );
\r
3761 // Get input channel information.
\r
3762 info.inputChannels = inCaps.dwChannels;
\r
3764 // Get sample rate and format information.
\r
3765 std::vector<unsigned int> rates;
\r
3766 if ( inCaps.dwChannels >= 2 ) {
\r
3767 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3768 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3769 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3770 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3771 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3772 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3776 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3782 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3786 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3789 else if ( inCaps.dwChannels == 1 ) {
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3791 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3792 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3793 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3794 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3795 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3796 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3797 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3799 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3800 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3801 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3802 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3803 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3805 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3806 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3807 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3808 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3809 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3812 else info.inputChannels = 0; // technically, this would be an error
\r
3816 if ( info.inputChannels == 0 ) return info;
\r
3818 // Copy the supported rates to the info structure but avoid duplication.
\r
3820 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3822 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3823 if ( rates[i] == info.sampleRates[j] ) {
\r
3828 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3830 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3832 // If device opens for both playback and capture, we determine the channels.
\r
3833 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3834 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3836 if ( device == 0 ) info.isDefaultInput = true;
\r
3838 // Copy name and return.
\r
3839 info.name = dsDevices[ device ].name;
\r
3840 info.probed = true;
\r
3844 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3845 unsigned int firstChannel, unsigned int sampleRate,
\r
3846 RtAudioFormat format, unsigned int *bufferSize,
\r
3847 RtAudio::StreamOptions *options )
\r
3849 if ( channels + firstChannel > 2 ) {
\r
3850 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3854 unsigned int nDevices = dsDevices.size();
\r
3855 if ( nDevices == 0 ) {
\r
3856 // This should not happen because a check is made before this function is called.
\r
3857 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3861 if ( device >= nDevices ) {
\r
3862 // This should not happen because a check is made before this function is called.
\r
3863 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3867 if ( mode == OUTPUT ) {
\r
3868 if ( dsDevices[ device ].validId[0] == false ) {
\r
3869 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3870 errorText_ = errorStream_.str();
\r
3874 else { // mode == INPUT
\r
3875 if ( dsDevices[ device ].validId[1] == false ) {
\r
3876 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3877 errorText_ = errorStream_.str();
\r
3882 // According to a note in PortAudio, using GetDesktopWindow()
\r
3883 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3884 // that occur when the application's window is not the foreground
\r
3885 // window. Also, if the application window closes before the
\r
3886 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3887 // problems when using GetDesktopWindow() but it seems fine now
\r
3888 // (January 2010). I'll leave it commented here.
\r
3889 // HWND hWnd = GetForegroundWindow();
\r
3890 HWND hWnd = GetDesktopWindow();
\r
3892 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3893 // two. This is a judgement call and a value of two is probably too
\r
3894 // low for capture, but it should work for playback.
\r
3896 if ( options ) nBuffers = options->numberOfBuffers;
\r
3897 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3898 if ( nBuffers < 2 ) nBuffers = 3;
\r
3900 // Check the lower range of the user-specified buffer size and set
\r
3901 // (arbitrarily) to a lower bound of 32.
\r
3902 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3904 // Create the wave format structure. The data format setting will
\r
3905 // be determined later.
\r
3906 WAVEFORMATEX waveFormat;
\r
3907 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3908 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3909 waveFormat.nChannels = channels + firstChannel;
\r
3910 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3912 // Determine the device buffer size. By default, we'll use the value
\r
3913 // defined above (32K), but we will grow it to make allowances for
\r
3914 // very large software buffer sizes.
\r
3915 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;
\r
3916 DWORD dsPointerLeadTime = 0;
\r
3918 void *ohandle = 0, *bhandle = 0;
\r
3920 if ( mode == OUTPUT ) {
\r
3922 LPDIRECTSOUND output;
\r
3923 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3924 if ( FAILED( result ) ) {
\r
3925 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3926 errorText_ = errorStream_.str();
\r
3931 outCaps.dwSize = sizeof( outCaps );
\r
3932 result = output->GetCaps( &outCaps );
\r
3933 if ( FAILED( result ) ) {
\r
3934 output->Release();
\r
3935 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3936 errorText_ = errorStream_.str();
\r
3940 // Check channel information.
\r
3941 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3942 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3943 errorText_ = errorStream_.str();
\r
3947 // Check format information. Use 16-bit format unless not
\r
3948 // supported or user requests 8-bit.
\r
3949 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3950 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3951 waveFormat.wBitsPerSample = 16;
\r
3952 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3955 waveFormat.wBitsPerSample = 8;
\r
3956 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3958 stream_.userFormat = format;
\r
3960 // Update wave format structure and buffer information.
\r
3961 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3962 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3963 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3965 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3966 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3967 dsBufferSize *= 2;
\r
3969 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3970 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3971 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3972 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3973 if ( FAILED( result ) ) {
\r
3974 output->Release();
\r
3975 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3976 errorText_ = errorStream_.str();
\r
3980 // Even though we will write to the secondary buffer, we need to
\r
3981 // access the primary buffer to set the correct output format
\r
3982 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3983 // buffer description.
\r
3984 DSBUFFERDESC bufferDescription;
\r
3985 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3986 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3987 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3989 // Obtain the primary buffer
\r
3990 LPDIRECTSOUNDBUFFER buffer;
\r
3991 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3992 if ( FAILED( result ) ) {
\r
3993 output->Release();
\r
3994 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3995 errorText_ = errorStream_.str();
\r
3999 // Set the primary DS buffer sound format.
\r
4000 result = buffer->SetFormat( &waveFormat );
\r
4001 if ( FAILED( result ) ) {
\r
4002 output->Release();
\r
4003 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4004 errorText_ = errorStream_.str();
\r
4008 // Setup the secondary DS buffer description.
\r
4009 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4010 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4011 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4012 DSBCAPS_GLOBALFOCUS |
\r
4013 DSBCAPS_GETCURRENTPOSITION2 |
\r
4014 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4015 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4016 bufferDescription.lpwfxFormat = &waveFormat;
\r
4018 // Try to create the secondary DS buffer. If that doesn't work,
\r
4019 // try to use software mixing. Otherwise, there's a problem.
\r
4020 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4021 if ( FAILED( result ) ) {
\r
4022 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4023 DSBCAPS_GLOBALFOCUS |
\r
4024 DSBCAPS_GETCURRENTPOSITION2 |
\r
4025 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4026 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4027 if ( FAILED( result ) ) {
\r
4028 output->Release();
\r
4029 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4030 errorText_ = errorStream_.str();
\r
4035 // Get the buffer size ... might be different from what we specified.
\r
4037 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4038 result = buffer->GetCaps( &dsbcaps );
\r
4039 if ( FAILED( result ) ) {
\r
4040 output->Release();
\r
4041 buffer->Release();
\r
4042 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4043 errorText_ = errorStream_.str();
\r
4047 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4049 // Lock the DS buffer
\r
4052 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4053 if ( FAILED( result ) ) {
\r
4054 output->Release();
\r
4055 buffer->Release();
\r
4056 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4057 errorText_ = errorStream_.str();
\r
4061 // Zero the DS buffer
\r
4062 ZeroMemory( audioPtr, dataLen );
\r
4064 // Unlock the DS buffer
\r
4065 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4066 if ( FAILED( result ) ) {
\r
4067 output->Release();
\r
4068 buffer->Release();
\r
4069 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4070 errorText_ = errorStream_.str();
\r
4074 ohandle = (void *) output;
\r
4075 bhandle = (void *) buffer;
\r
4078 if ( mode == INPUT ) {
\r
4080 LPDIRECTSOUNDCAPTURE input;
\r
4081 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4082 if ( FAILED( result ) ) {
\r
4083 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4084 errorText_ = errorStream_.str();
\r
4089 inCaps.dwSize = sizeof( inCaps );
\r
4090 result = input->GetCaps( &inCaps );
\r
4091 if ( FAILED( result ) ) {
\r
4093 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4094 errorText_ = errorStream_.str();
\r
4098 // Check channel information.
\r
4099 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4100 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4104 // Check format information. Use 16-bit format unless user
\r
4105 // requests 8-bit.
\r
4106 DWORD deviceFormats;
\r
4107 if ( channels + firstChannel == 2 ) {
\r
4108 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4109 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4110 waveFormat.wBitsPerSample = 8;
\r
4111 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4113 else { // assume 16-bit is supported
\r
4114 waveFormat.wBitsPerSample = 16;
\r
4115 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4118 else { // channel == 1
\r
4119 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4120 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4121 waveFormat.wBitsPerSample = 8;
\r
4122 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4124 else { // assume 16-bit is supported
\r
4125 waveFormat.wBitsPerSample = 16;
\r
4126 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4129 stream_.userFormat = format;
\r
4131 // Update wave format structure and buffer information.
\r
4132 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4133 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4134 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4136 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4137 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4138 dsBufferSize *= 2;
\r
4140 // Setup the secondary DS buffer description.
\r
4141 DSCBUFFERDESC bufferDescription;
\r
4142 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4143 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4144 bufferDescription.dwFlags = 0;
\r
4145 bufferDescription.dwReserved = 0;
\r
4146 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4147 bufferDescription.lpwfxFormat = &waveFormat;
\r
4149 // Create the capture buffer.
\r
4150 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4151 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4152 if ( FAILED( result ) ) {
\r
4154 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4155 errorText_ = errorStream_.str();
\r
4159 // Get the buffer size ... might be different from what we specified.
\r
4160 DSCBCAPS dscbcaps;
\r
4161 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4162 result = buffer->GetCaps( &dscbcaps );
\r
4163 if ( FAILED( result ) ) {
\r
4165 buffer->Release();
\r
4166 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4167 errorText_ = errorStream_.str();
\r
4171 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4173 // NOTE: We could have a problem here if this is a duplex stream
\r
4174 // and the play and capture hardware buffer sizes are different
\r
4175 // (I'm actually not sure if that is a problem or not).
\r
4176 // Currently, we are not verifying that.
\r
4178 // Lock the capture buffer
\r
4181 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4182 if ( FAILED( result ) ) {
\r
4184 buffer->Release();
\r
4185 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4186 errorText_ = errorStream_.str();
\r
4190 // Zero the buffer
\r
4191 ZeroMemory( audioPtr, dataLen );
\r
4193 // Unlock the buffer
\r
4194 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4195 if ( FAILED( result ) ) {
\r
4197 buffer->Release();
\r
4198 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4199 errorText_ = errorStream_.str();
\r
4203 ohandle = (void *) input;
\r
4204 bhandle = (void *) buffer;
\r
4207 // Set various stream parameters
\r
4208 DsHandle *handle = 0;
\r
4209 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4210 stream_.nUserChannels[mode] = channels;
\r
4211 stream_.bufferSize = *bufferSize;
\r
4212 stream_.channelOffset[mode] = firstChannel;
\r
4213 stream_.deviceInterleaved[mode] = true;
\r
4214 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4215 else stream_.userInterleaved = true;
\r
4217 // Set flag for buffer conversion
\r
4218 stream_.doConvertBuffer[mode] = false;
\r
4219 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4220 stream_.doConvertBuffer[mode] = true;
\r
4221 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4222 stream_.doConvertBuffer[mode] = true;
\r
4223 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4224 stream_.nUserChannels[mode] > 1 )
\r
4225 stream_.doConvertBuffer[mode] = true;
\r
4227 // Allocate necessary internal buffers
\r
4228 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4229 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4230 if ( stream_.userBuffer[mode] == NULL ) {
\r
4231 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4235 if ( stream_.doConvertBuffer[mode] ) {
\r
4237 bool makeBuffer = true;
\r
4238 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4239 if ( mode == INPUT ) {
\r
4240 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4241 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4242 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4246 if ( makeBuffer ) {
\r
4247 bufferBytes *= *bufferSize;
\r
4248 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4249 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4250 if ( stream_.deviceBuffer == NULL ) {
\r
4251 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4257 // Allocate our DsHandle structures for the stream.
\r
4258 if ( stream_.apiHandle == 0 ) {
\r
4260 handle = new DsHandle;
\r
4262 catch ( std::bad_alloc& ) {
\r
4263 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4267 // Create a manual-reset event.
\r
4268 handle->condition = CreateEvent( NULL, // no security
\r
4269 TRUE, // manual-reset
\r
4270 FALSE, // non-signaled initially
\r
4271 NULL ); // unnamed
\r
4272 stream_.apiHandle = (void *) handle;
\r
4275 handle = (DsHandle *) stream_.apiHandle;
\r
4276 handle->id[mode] = ohandle;
\r
4277 handle->buffer[mode] = bhandle;
\r
4278 handle->dsBufferSize[mode] = dsBufferSize;
\r
4279 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4281 stream_.device[mode] = device;
\r
4282 stream_.state = STREAM_STOPPED;
\r
4283 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4284 // We had already set up an output stream.
\r
4285 stream_.mode = DUPLEX;
\r
4287 stream_.mode = mode;
\r
4288 stream_.nBuffers = nBuffers;
\r
4289 stream_.sampleRate = sampleRate;
\r
4291 // Setup the buffer conversion information structure.
\r
4292 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4294 // Setup the callback thread.
\r
4295 if ( stream_.callbackInfo.isRunning == false ) {
\r
4296 unsigned threadId;
\r
4297 stream_.callbackInfo.isRunning = true;
\r
4298 stream_.callbackInfo.object = (void *) this;
\r
4299 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4300 &stream_.callbackInfo, 0, &threadId );
\r
4301 if ( stream_.callbackInfo.thread == 0 ) {
\r
4302 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4306 // Boost DS thread priority
\r
4307 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4313 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4314 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4315 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4316 if ( buffer ) buffer->Release();
\r
4317 object->Release();
\r
4319 if ( handle->buffer[1] ) {
\r
4320 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4321 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4322 if ( buffer ) buffer->Release();
\r
4323 object->Release();
\r
4325 CloseHandle( handle->condition );
\r
4327 stream_.apiHandle = 0;
\r
4330 for ( int i=0; i<2; i++ ) {
\r
4331 if ( stream_.userBuffer[i] ) {
\r
4332 free( stream_.userBuffer[i] );
\r
4333 stream_.userBuffer[i] = 0;
\r
4337 if ( stream_.deviceBuffer ) {
\r
4338 free( stream_.deviceBuffer );
\r
4339 stream_.deviceBuffer = 0;
\r
4345 void RtApiDs :: closeStream()
\r
4347 if ( stream_.state == STREAM_CLOSED ) {
\r
4348 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4349 error( RtError::WARNING );
\r
4353 // Stop the callback thread.
\r
4354 stream_.callbackInfo.isRunning = false;
\r
4355 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4356 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4358 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4360 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4361 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4362 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4365 buffer->Release();
\r
4367 object->Release();
\r
4369 if ( handle->buffer[1] ) {
\r
4370 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4371 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4374 buffer->Release();
\r
4376 object->Release();
\r
4378 CloseHandle( handle->condition );
\r
4380 stream_.apiHandle = 0;
\r
4383 for ( int i=0; i<2; i++ ) {
\r
4384 if ( stream_.userBuffer[i] ) {
\r
4385 free( stream_.userBuffer[i] );
\r
4386 stream_.userBuffer[i] = 0;
\r
4390 if ( stream_.deviceBuffer ) {
\r
4391 free( stream_.deviceBuffer );
\r
4392 stream_.deviceBuffer = 0;
\r
4395 stream_.mode = UNINITIALIZED;
\r
4396 stream_.state = STREAM_CLOSED;
\r
4399 void RtApiDs :: startStream()
\r
4402 if ( stream_.state == STREAM_RUNNING ) {
\r
4403 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4404 error( RtError::WARNING );
\r
4408 //MUTEX_LOCK( &stream_.mutex );
\r
4410 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4412 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4413 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4414 // this is already in effect.
\r
4415 timeBeginPeriod( 1 );
\r
4417 buffersRolling = false;
\r
4418 duplexPrerollBytes = 0;
\r
4420 if ( stream_.mode == DUPLEX ) {
\r
4421 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4422 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4425 HRESULT result = 0;
\r
4426 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4428 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4429 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4430 if ( FAILED( result ) ) {
\r
4431 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4432 errorText_ = errorStream_.str();
\r
4437 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4439 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4440 result = buffer->Start( DSCBSTART_LOOPING );
\r
4441 if ( FAILED( result ) ) {
\r
4442 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4443 errorText_ = errorStream_.str();
\r
4448 handle->drainCounter = 0;
\r
4449 handle->internalDrain = false;
\r
4450 ResetEvent( handle->condition );
\r
4451 stream_.state = STREAM_RUNNING;
\r
4454 // MUTEX_UNLOCK( &stream_.mutex );
\r
4456 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4459 void RtApiDs :: stopStream()
\r
4462 if ( stream_.state == STREAM_STOPPED ) {
\r
4463 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4464 error( RtError::WARNING );
\r
4469 MUTEX_LOCK( &stream_.mutex );
\r
4471 if ( stream_.state == STREAM_STOPPED ) {
\r
4472 MUTEX_UNLOCK( &stream_.mutex );
\r
4477 HRESULT result = 0;
\r
4480 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4481 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4482 if ( handle->drainCounter == 0 ) {
\r
4483 handle->drainCounter = 2;
\r
4484 // MUTEX_UNLOCK( &stream_.mutex );
\r
4485 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4486 //ResetEvent( handle->condition );
\r
4487 // MUTEX_LOCK( &stream_.mutex );
\r
4490 stream_.state = STREAM_STOPPED;
\r
4492 // Stop the buffer and clear memory
\r
4493 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4494 result = buffer->Stop();
\r
4495 if ( FAILED( result ) ) {
\r
4496 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4497 errorText_ = errorStream_.str();
\r
4501 // Lock the buffer and clear it so that if we start to play again,
\r
4502 // we won't have old data playing.
\r
4503 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4504 if ( FAILED( result ) ) {
\r
4505 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4506 errorText_ = errorStream_.str();
\r
4510 // Zero the DS buffer
\r
4511 ZeroMemory( audioPtr, dataLen );
\r
4513 // Unlock the DS buffer
\r
4514 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4515 if ( FAILED( result ) ) {
\r
4516 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4517 errorText_ = errorStream_.str();
\r
4521 // If we start playing again, we must begin at beginning of buffer.
\r
4522 handle->bufferPointer[0] = 0;
\r
4525 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4526 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4530 stream_.state = STREAM_STOPPED;
\r
4532 result = buffer->Stop();
\r
4533 if ( FAILED( result ) ) {
\r
4534 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4535 errorText_ = errorStream_.str();
\r
4539 // Lock the buffer and clear it so that if we start to play again,
\r
4540 // we won't have old data playing.
\r
4541 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4542 if ( FAILED( result ) ) {
\r
4543 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4544 errorText_ = errorStream_.str();
\r
4548 // Zero the DS buffer
\r
4549 ZeroMemory( audioPtr, dataLen );
\r
4551 // Unlock the DS buffer
\r
4552 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4553 if ( FAILED( result ) ) {
\r
4554 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4555 errorText_ = errorStream_.str();
\r
4559 // If we start recording again, we must begin at beginning of buffer.
\r
4560 handle->bufferPointer[1] = 0;
\r
4564 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4565 // MUTEX_UNLOCK( &stream_.mutex );
\r
4567 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4570 void RtApiDs :: abortStream()
\r
4573 if ( stream_.state == STREAM_STOPPED ) {
\r
4574 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4575 error( RtError::WARNING );
\r
4579 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4580 handle->drainCounter = 2;
\r
4585 void RtApiDs :: callbackEvent()
\r
4587 if ( stream_.state == STREAM_STOPPED ) {
\r
4588 Sleep( 50 ); // sleep 50 milliseconds
\r
4592 if ( stream_.state == STREAM_CLOSED ) {
\r
4593 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4594 error( RtError::WARNING );
\r
4598 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4599 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4601 // Check if we were draining the stream and signal is finished.
\r
4602 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4603 if ( handle->internalDrain == false )
\r
4604 SetEvent( handle->condition );
\r
4611 MUTEX_LOCK( &stream_.mutex );
\r
4613 // The state might change while waiting on a mutex.
\r
4614 if ( stream_.state == STREAM_STOPPED ) {
\r
4615 MUTEX_UNLOCK( &stream_.mutex );
\r
4620 // Invoke user callback to get fresh output data UNLESS we are
\r
4621 // draining stream.
\r
4622 if ( handle->drainCounter == 0 ) {
\r
4623 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4624 double streamTime = getStreamTime();
\r
4625 RtAudioStreamStatus status = 0;
\r
4626 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4627 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4628 handle->xrun[0] = false;
\r
4630 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4631 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4632 handle->xrun[1] = false;
\r
4634 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4635 stream_.bufferSize, streamTime, status, info->userData );
\r
4636 if ( handle->drainCounter == 2 ) {
\r
4637 // MUTEX_UNLOCK( &stream_.mutex );
\r
4641 else if ( handle->drainCounter == 1 )
\r
4642 handle->internalDrain = true;
\r
4646 DWORD currentWritePointer, safeWritePointer;
\r
4647 DWORD currentReadPointer, safeReadPointer;
\r
4648 UINT nextWritePointer;
\r
4650 LPVOID buffer1 = NULL;
\r
4651 LPVOID buffer2 = NULL;
\r
4652 DWORD bufferSize1 = 0;
\r
4653 DWORD bufferSize2 = 0;
\r
4658 if ( buffersRolling == false ) {
\r
4659 if ( stream_.mode == DUPLEX ) {
\r
4660 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4662 // It takes a while for the devices to get rolling. As a result,
\r
4663 // there's no guarantee that the capture and write device pointers
\r
4664 // will move in lockstep. Wait here for both devices to start
\r
4665 // rolling, and then set our buffer pointers accordingly.
\r
4666 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4667 // bytes later than the write buffer.
\r
4669 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4670 // take place between the two GetCurrentPosition calls... but I'm
\r
4671 // really not sure how to solve the problem. Temporarily boost to
\r
4672 // Realtime priority, maybe; but I'm not sure what priority the
\r
4673 // DirectSound service threads run at. We *should* be roughly
\r
4674 // within a ms or so of correct.
\r
4676 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4677 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4679 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4681 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4682 if ( FAILED( result ) ) {
\r
4683 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4684 errorText_ = errorStream_.str();
\r
4685 error( RtError::SYSTEM_ERROR );
\r
4687 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4688 if ( FAILED( result ) ) {
\r
4689 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4690 errorText_ = errorStream_.str();
\r
4691 error( RtError::SYSTEM_ERROR );
\r
4694 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4695 if ( FAILED( result ) ) {
\r
4696 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4697 errorText_ = errorStream_.str();
\r
4698 error( RtError::SYSTEM_ERROR );
\r
4700 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4701 if ( FAILED( result ) ) {
\r
4702 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4703 errorText_ = errorStream_.str();
\r
4704 error( RtError::SYSTEM_ERROR );
\r
4706 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4710 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4712 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4713 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4714 handle->bufferPointer[1] = safeReadPointer;
\r
4716 else if ( stream_.mode == OUTPUT ) {
\r
4718 // Set the proper nextWritePosition after initial startup.
\r
4719 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4720 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4721 if ( FAILED( result ) ) {
\r
4722 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4723 errorText_ = errorStream_.str();
\r
4724 error( RtError::SYSTEM_ERROR );
\r
4726 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4727 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4730 buffersRolling = true;
\r
4733 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4735 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4737 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4738 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4739 bufferBytes *= formatBytes( stream_.userFormat );
\r
4740 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4743 // Setup parameters and do buffer conversion if necessary.
\r
4744 if ( stream_.doConvertBuffer[0] ) {
\r
4745 buffer = stream_.deviceBuffer;
\r
4746 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4747 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4748 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4751 buffer = stream_.userBuffer[0];
\r
4752 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4753 bufferBytes *= formatBytes( stream_.userFormat );
\r
4756 // No byte swapping necessary in DirectSound implementation.
\r
4758 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4759 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4761 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4762 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4764 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4765 nextWritePointer = handle->bufferPointer[0];
\r
4767 DWORD endWrite, leadPointer;
\r
4769 // Find out where the read and "safe write" pointers are.
\r
4770 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4771 if ( FAILED( result ) ) {
\r
4772 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4773 errorText_ = errorStream_.str();
\r
4774 error( RtError::SYSTEM_ERROR );
\r
4777 // We will copy our output buffer into the region between
\r
4778 // safeWritePointer and leadPointer. If leadPointer is not
\r
4779 // beyond the next endWrite position, wait until it is.
\r
4780 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4781 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4782 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4783 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4784 endWrite = nextWritePointer + bufferBytes;
\r
4786 // Check whether the entire write region is behind the play pointer.
\r
4787 if ( leadPointer >= endWrite ) break;
\r
4789 // If we are here, then we must wait until the leadPointer advances
\r
4790 // beyond the end of our next write region. We use the
\r
4791 // Sleep() function to suspend operation until that happens.
\r
4792 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4793 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4794 if ( millis < 1.0 ) millis = 1.0;
\r
4795 Sleep( (DWORD) millis );
\r
4798 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4799 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4800 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4801 handle->xrun[0] = true;
\r
4802 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4803 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4804 handle->bufferPointer[0] = nextWritePointer;
\r
4805 endWrite = nextWritePointer + bufferBytes;
\r
4808 // Lock free space in the buffer
\r
4809 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4810 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4811 if ( FAILED( result ) ) {
\r
4812 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4813 errorText_ = errorStream_.str();
\r
4814 error( RtError::SYSTEM_ERROR );
\r
4817 // Copy our buffer into the DS buffer
\r
4818 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4819 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4821 // Update our buffer offset and unlock sound buffer
\r
4822 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4823 if ( FAILED( result ) ) {
\r
4824 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4825 errorText_ = errorStream_.str();
\r
4826 error( RtError::SYSTEM_ERROR );
\r
4828 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4829 handle->bufferPointer[0] = nextWritePointer;
\r
4831 if ( handle->drainCounter ) {
\r
4832 handle->drainCounter++;
\r
4837 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4839 // Setup parameters.
\r
4840 if ( stream_.doConvertBuffer[1] ) {
\r
4841 buffer = stream_.deviceBuffer;
\r
4842 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4843 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4846 buffer = stream_.userBuffer[1];
\r
4847 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4848 bufferBytes *= formatBytes( stream_.userFormat );
\r
4851 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4852 long nextReadPointer = handle->bufferPointer[1];
\r
4853 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4855 // Find out where the write and "safe read" pointers are.
\r
4856 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4857 if ( FAILED( result ) ) {
\r
4858 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4859 errorText_ = errorStream_.str();
\r
4860 error( RtError::SYSTEM_ERROR );
\r
4863 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4864 DWORD endRead = nextReadPointer + bufferBytes;
\r
4866 // Handling depends on whether we are INPUT or DUPLEX.
\r
4867 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4868 // then a wait here will drag the write pointers into the forbidden zone.
\r
4870 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4871 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4872 // practical way to sync up the read and write pointers reliably, given the
\r
4873 // the very complex relationship between phase and increment of the read and write
\r
4876 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4877 // provide a pre-roll period of 0.5 seconds in which we return
\r
4878 // zeros from the read buffer while the pointers sync up.
\r
4880 if ( stream_.mode == DUPLEX ) {
\r
4881 if ( safeReadPointer < endRead ) {
\r
4882 if ( duplexPrerollBytes <= 0 ) {
\r
4883 // Pre-roll time over. Be more agressive.
\r
4884 int adjustment = endRead-safeReadPointer;
\r
4886 handle->xrun[1] = true;
\r
4888 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4889 // and perform fine adjustments later.
\r
4890 // - small adjustments: back off by twice as much.
\r
4891 if ( adjustment >= 2*bufferBytes )
\r
4892 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4894 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4896 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4900 // In pre=roll time. Just do it.
\r
4901 nextReadPointer = safeReadPointer - bufferBytes;
\r
4902 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4904 endRead = nextReadPointer + bufferBytes;
\r
4907 else { // mode == INPUT
\r
4908 while ( safeReadPointer < endRead ) {
\r
4909 // See comments for playback.
\r
4910 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4911 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4912 if ( millis < 1.0 ) millis = 1.0;
\r
4913 Sleep( (DWORD) millis );
\r
4915 // Wake up and find out where we are now.
\r
4916 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4917 if ( FAILED( result ) ) {
\r
4918 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4919 errorText_ = errorStream_.str();
\r
4920 error( RtError::SYSTEM_ERROR );
\r
4923 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4927 // Lock free space in the buffer
\r
4928 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4929 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4930 if ( FAILED( result ) ) {
\r
4931 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4932 errorText_ = errorStream_.str();
\r
4933 error( RtError::SYSTEM_ERROR );
\r
4936 if ( duplexPrerollBytes <= 0 ) {
\r
4937 // Copy our buffer into the DS buffer
\r
4938 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4939 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4942 memset( buffer, 0, bufferSize1 );
\r
4943 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4944 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4947 // Update our buffer offset and unlock sound buffer
\r
4948 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4949 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4950 if ( FAILED( result ) ) {
\r
4951 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4952 errorText_ = errorStream_.str();
\r
4953 error( RtError::SYSTEM_ERROR );
\r
4955 handle->bufferPointer[1] = nextReadPointer;
\r
4957 // No byte swapping necessary in DirectSound implementation.
\r
4959 // If necessary, convert 8-bit data from unsigned to signed.
\r
4960 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4961 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4963 // Do buffer conversion if necessary.
\r
4964 if ( stream_.doConvertBuffer[1] )
\r
4965 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4969 // MUTEX_UNLOCK( &stream_.mutex );
\r
4971 RtApi::tickStreamTime();
\r
4974 // Definitions for utility functions and callbacks
\r
4975 // specific to the DirectSound implementation.
\r
4977 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4979 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4980 RtApiDs *object = (RtApiDs *) info->object;
\r
4981 bool* isRunning = &info->isRunning;
\r
4983 while ( *isRunning == true ) {
\r
4984 object->callbackEvent();
\r
4987 _endthreadex( 0 );
\r
4991 #include "tchar.h"
\r
4993 std::string convertTChar( LPCTSTR name )
\r
4995 #if defined( UNICODE ) || defined( _UNICODE )
\r
4996 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4997 std::string s( length, 0 );
\r
4998 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
5000 std::string s( name );
\r
5006 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5007 LPCTSTR description,
\r
5009 LPVOID lpContext )
\r
5011 bool *isInput = (bool *) lpContext;
\r
5014 bool validDevice = false;
\r
5015 if ( *isInput == true ) {
\r
5017 LPDIRECTSOUNDCAPTURE object;
\r
5019 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5020 if ( hr != DS_OK ) return TRUE;
\r
5022 caps.dwSize = sizeof(caps);
\r
5023 hr = object->GetCaps( &caps );
\r
5024 if ( hr == DS_OK ) {
\r
5025 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5026 validDevice = true;
\r
5028 object->Release();
\r
5032 LPDIRECTSOUND object;
\r
5033 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5034 if ( hr != DS_OK ) return TRUE;
\r
5036 caps.dwSize = sizeof(caps);
\r
5037 hr = object->GetCaps( &caps );
\r
5038 if ( hr == DS_OK ) {
\r
5039 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5040 validDevice = true;
\r
5042 object->Release();
\r
5045 // If good device, then save its name and guid.
\r
5046 std::string name = convertTChar( description );
\r
5047 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5048 name = "Default Device";
\r
5049 if ( validDevice ) {
\r
5050 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5051 if ( dsDevices[i].name == name ) {
\r
5052 dsDevices[i].found = true;
\r
5054 dsDevices[i].id[1] = lpguid;
\r
5055 dsDevices[i].validId[1] = true;
\r
5058 dsDevices[i].id[0] = lpguid;
\r
5059 dsDevices[i].validId[0] = true;
\r
5066 device.name = name;
\r
5067 device.found = true;
\r
5069 device.id[1] = lpguid;
\r
5070 device.validId[1] = true;
\r
5073 device.id[0] = lpguid;
\r
5074 device.validId[0] = true;
\r
5076 dsDevices.push_back( device );
\r
5082 static const char* getErrorString( int code )
\r
5086 case DSERR_ALLOCATED:
\r
5087 return "Already allocated";
\r
5089 case DSERR_CONTROLUNAVAIL:
\r
5090 return "Control unavailable";
\r
5092 case DSERR_INVALIDPARAM:
\r
5093 return "Invalid parameter";
\r
5095 case DSERR_INVALIDCALL:
\r
5096 return "Invalid call";
\r
5098 case DSERR_GENERIC:
\r
5099 return "Generic error";
\r
5101 case DSERR_PRIOLEVELNEEDED:
\r
5102 return "Priority level needed";
\r
5104 case DSERR_OUTOFMEMORY:
\r
5105 return "Out of memory";
\r
5107 case DSERR_BADFORMAT:
\r
5108 return "The sample rate or the channel format is not supported";
\r
5110 case DSERR_UNSUPPORTED:
\r
5111 return "Not supported";
\r
5113 case DSERR_NODRIVER:
\r
5114 return "No driver";
\r
5116 case DSERR_ALREADYINITIALIZED:
\r
5117 return "Already initialized";
\r
5119 case DSERR_NOAGGREGATION:
\r
5120 return "No aggregation";
\r
5122 case DSERR_BUFFERLOST:
\r
5123 return "Buffer lost";
\r
5125 case DSERR_OTHERAPPHASPRIO:
\r
5126 return "Another application already has priority";
\r
5128 case DSERR_UNINITIALIZED:
\r
5129 return "Uninitialized";
\r
5132 return "DirectSound unknown error";
\r
5135 //******************** End of __WINDOWS_DS__ *********************//
\r
5139 #if defined(__LINUX_ALSA__)
\r
5141 #include <alsa/asoundlib.h>
\r
5142 #include <unistd.h>
\r
5144 // A structure to hold various information related to the ALSA API
\r
5145 // implementation.
\r
5146 struct AlsaHandle {
\r
5147 snd_pcm_t *handles[2];
\r
5148 bool synchronized;
\r
5150 pthread_cond_t runnable_cv;
\r
5154 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5157 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5159 RtApiAlsa :: RtApiAlsa()
\r
5161 // Nothing to do here.
\r
5164 RtApiAlsa :: ~RtApiAlsa()
\r
5166 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5169 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5171 unsigned nDevices = 0;
\r
5172 int result, subdevice, card;
\r
5174 snd_ctl_t *handle;
\r
5176 // Count cards and devices
\r
5178 snd_card_next( &card );
\r
5179 while ( card >= 0 ) {
\r
5180 sprintf( name, "hw:%d", card );
\r
5181 result = snd_ctl_open( &handle, name, 0 );
\r
5182 if ( result < 0 ) {
\r
5183 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5184 errorText_ = errorStream_.str();
\r
5185 error( RtError::WARNING );
\r
5190 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5191 if ( result < 0 ) {
\r
5192 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5193 errorText_ = errorStream_.str();
\r
5194 error( RtError::WARNING );
\r
5197 if ( subdevice < 0 )
\r
5202 snd_ctl_close( handle );
\r
5203 snd_card_next( &card );
\r
5209 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5211 RtAudio::DeviceInfo info;
\r
5212 info.probed = false;
\r
5214 unsigned nDevices = 0;
\r
5215 int result, subdevice, card;
\r
5217 snd_ctl_t *chandle;
\r
5219 // Count cards and devices
\r
5221 snd_card_next( &card );
\r
5222 while ( card >= 0 ) {
\r
5223 sprintf( name, "hw:%d", card );
\r
5224 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5225 if ( result < 0 ) {
\r
5226 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5227 errorText_ = errorStream_.str();
\r
5228 error( RtError::WARNING );
\r
5233 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5234 if ( result < 0 ) {
\r
5235 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5236 errorText_ = errorStream_.str();
\r
5237 error( RtError::WARNING );
\r
5240 if ( subdevice < 0 ) break;
\r
5241 if ( nDevices == device ) {
\r
5242 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5248 snd_ctl_close( chandle );
\r
5249 snd_card_next( &card );
\r
5252 if ( nDevices == 0 ) {
\r
5253 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5254 error( RtError::INVALID_USE );
\r
5257 if ( device >= nDevices ) {
\r
5258 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5259 error( RtError::INVALID_USE );
\r
5264 // If a stream is already open, we cannot probe the stream devices.
\r
5265 // Thus, use the saved results.
\r
5266 if ( stream_.state != STREAM_CLOSED &&
\r
5267 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5268 if ( device >= devices_.size() ) {
\r
5269 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5270 error( RtError::WARNING );
\r
5273 return devices_[ device ];
\r
5276 int openMode = SND_PCM_ASYNC;
\r
5277 snd_pcm_stream_t stream;
\r
5278 snd_pcm_info_t *pcminfo;
\r
5279 snd_pcm_info_alloca( &pcminfo );
\r
5280 snd_pcm_t *phandle;
\r
5281 snd_pcm_hw_params_t *params;
\r
5282 snd_pcm_hw_params_alloca( ¶ms );
\r
5284 // First try for playback
\r
5285 stream = SND_PCM_STREAM_PLAYBACK;
\r
5286 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5287 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5288 snd_pcm_info_set_stream( pcminfo, stream );
\r
5290 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5291 if ( result < 0 ) {
\r
5292 // Device probably doesn't support playback.
\r
5293 goto captureProbe;
\r
5296 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5297 if ( result < 0 ) {
\r
5298 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5299 errorText_ = errorStream_.str();
\r
5300 error( RtError::WARNING );
\r
5301 goto captureProbe;
\r
5304 // The device is open ... fill the parameter structure.
\r
5305 result = snd_pcm_hw_params_any( phandle, params );
\r
5306 if ( result < 0 ) {
\r
5307 snd_pcm_close( phandle );
\r
5308 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5309 errorText_ = errorStream_.str();
\r
5310 error( RtError::WARNING );
\r
5311 goto captureProbe;
\r
5314 // Get output channel information.
\r
5315 unsigned int value;
\r
5316 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5317 if ( result < 0 ) {
\r
5318 snd_pcm_close( phandle );
\r
5319 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5320 errorText_ = errorStream_.str();
\r
5321 error( RtError::WARNING );
\r
5322 goto captureProbe;
\r
5324 info.outputChannels = value;
\r
5325 snd_pcm_close( phandle );
\r
5328 // Now try for capture
\r
5329 stream = SND_PCM_STREAM_CAPTURE;
\r
5330 snd_pcm_info_set_stream( pcminfo, stream );
\r
5332 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5333 snd_ctl_close( chandle );
\r
5334 if ( result < 0 ) {
\r
5335 // Device probably doesn't support capture.
\r
5336 if ( info.outputChannels == 0 ) return info;
\r
5337 goto probeParameters;
\r
5340 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5341 if ( result < 0 ) {
\r
5342 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5343 errorText_ = errorStream_.str();
\r
5344 error( RtError::WARNING );
\r
5345 if ( info.outputChannels == 0 ) return info;
\r
5346 goto probeParameters;
\r
5349 // The device is open ... fill the parameter structure.
\r
5350 result = snd_pcm_hw_params_any( phandle, params );
\r
5351 if ( result < 0 ) {
\r
5352 snd_pcm_close( phandle );
\r
5353 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5354 errorText_ = errorStream_.str();
\r
5355 error( RtError::WARNING );
\r
5356 if ( info.outputChannels == 0 ) return info;
\r
5357 goto probeParameters;
\r
5360 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5361 if ( result < 0 ) {
\r
5362 snd_pcm_close( phandle );
\r
5363 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5364 errorText_ = errorStream_.str();
\r
5365 error( RtError::WARNING );
\r
5366 if ( info.outputChannels == 0 ) return info;
\r
5367 goto probeParameters;
\r
5369 info.inputChannels = value;
\r
5370 snd_pcm_close( phandle );
\r
5372 // If device opens for both playback and capture, we determine the channels.
\r
5373 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5374 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5376 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5377 if ( device == 0 && info.outputChannels > 0 )
\r
5378 info.isDefaultOutput = true;
\r
5379 if ( device == 0 && info.inputChannels > 0 )
\r
5380 info.isDefaultInput = true;
\r
5383 // At this point, we just need to figure out the supported data
\r
5384 // formats and sample rates. We'll proceed by opening the device in
\r
5385 // the direction with the maximum number of channels, or playback if
\r
5386 // they are equal. This might limit our sample rate options, but so
\r
5389 if ( info.outputChannels >= info.inputChannels )
\r
5390 stream = SND_PCM_STREAM_PLAYBACK;
\r
5392 stream = SND_PCM_STREAM_CAPTURE;
\r
5393 snd_pcm_info_set_stream( pcminfo, stream );
\r
5395 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5396 if ( result < 0 ) {
\r
5397 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5398 errorText_ = errorStream_.str();
\r
5399 error( RtError::WARNING );
\r
5403 // The device is open ... fill the parameter structure.
\r
5404 result = snd_pcm_hw_params_any( phandle, params );
\r
5405 if ( result < 0 ) {
\r
5406 snd_pcm_close( phandle );
\r
5407 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5408 errorText_ = errorStream_.str();
\r
5409 error( RtError::WARNING );
\r
5413 // Test our discrete set of sample rate values.
\r
5414 info.sampleRates.clear();
\r
5415 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5416 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5417 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5419 if ( info.sampleRates.size() == 0 ) {
\r
5420 snd_pcm_close( phandle );
\r
5421 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5422 errorText_ = errorStream_.str();
\r
5423 error( RtError::WARNING );
\r
5427 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5428 snd_pcm_format_t format;
\r
5429 info.nativeFormats = 0;
\r
5430 format = SND_PCM_FORMAT_S8;
\r
5431 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5432 info.nativeFormats |= RTAUDIO_SINT8;
\r
5433 format = SND_PCM_FORMAT_S16;
\r
5434 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5435 info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 format = SND_PCM_FORMAT_S24;
\r
5437 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5438 info.nativeFormats |= RTAUDIO_SINT24;
\r
5439 format = SND_PCM_FORMAT_S32;
\r
5440 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5441 info.nativeFormats |= RTAUDIO_SINT32;
\r
5442 format = SND_PCM_FORMAT_FLOAT;
\r
5443 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5444 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5445 format = SND_PCM_FORMAT_FLOAT64;
\r
5446 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5447 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5449 // Check that we have at least one supported format
\r
5450 if ( info.nativeFormats == 0 ) {
\r
5451 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5452 errorText_ = errorStream_.str();
\r
5453 error( RtError::WARNING );
\r
5457 // Get the device name
\r
5459 result = snd_card_get_name( card, &cardname );
\r
5460 if ( result >= 0 )
\r
5461 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5464 // That's all ... close the device and return
\r
5465 snd_pcm_close( phandle );
\r
5466 info.probed = true;
\r
5470 void RtApiAlsa :: saveDeviceInfo( void )
\r
5474 unsigned int nDevices = getDeviceCount();
\r
5475 devices_.resize( nDevices );
\r
5476 for ( unsigned int i=0; i<nDevices; i++ )
\r
5477 devices_[i] = getDeviceInfo( i );
\r
5480 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5481 unsigned int firstChannel, unsigned int sampleRate,
\r
5482 RtAudioFormat format, unsigned int *bufferSize,
\r
5483 RtAudio::StreamOptions *options )
\r
5486 #if defined(__RTAUDIO_DEBUG__)
\r
5487 snd_output_t *out;
\r
5488 snd_output_stdio_attach(&out, stderr, 0);
\r
5491 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5493 unsigned nDevices = 0;
\r
5494 int result, subdevice, card;
\r
5496 snd_ctl_t *chandle;
\r
5498 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5499 snprintf(name, sizeof(name), "%s", "default");
\r
5501 // Count cards and devices
\r
5503 snd_card_next( &card );
\r
5504 while ( card >= 0 ) {
\r
5505 sprintf( name, "hw:%d", card );
\r
5506 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5507 if ( result < 0 ) {
\r
5508 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5509 errorText_ = errorStream_.str();
\r
5514 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5515 if ( result < 0 ) break;
\r
5516 if ( subdevice < 0 ) break;
\r
5517 if ( nDevices == device ) {
\r
5518 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5519 snd_ctl_close( chandle );
\r
5524 snd_ctl_close( chandle );
\r
5525 snd_card_next( &card );
\r
5528 if ( nDevices == 0 ) {
\r
5529 // This should not happen because a check is made before this function is called.
\r
5530 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5534 if ( device >= nDevices ) {
\r
5535 // This should not happen because a check is made before this function is called.
\r
5536 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5543 // The getDeviceInfo() function will not work for a device that is
\r
5544 // already open. Thus, we'll probe the system before opening a
\r
5545 // stream and save the results for use by getDeviceInfo().
\r
5546 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5547 this->saveDeviceInfo();
\r
5549 snd_pcm_stream_t stream;
\r
5550 if ( mode == OUTPUT )
\r
5551 stream = SND_PCM_STREAM_PLAYBACK;
\r
5553 stream = SND_PCM_STREAM_CAPTURE;
\r
5555 snd_pcm_t *phandle;
\r
5556 int openMode = SND_PCM_ASYNC;
\r
5557 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5558 if ( result < 0 ) {
\r
5559 if ( mode == OUTPUT )
\r
5560 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5562 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5563 errorText_ = errorStream_.str();
\r
5567 // Fill the parameter structure.
\r
5568 snd_pcm_hw_params_t *hw_params;
\r
5569 snd_pcm_hw_params_alloca( &hw_params );
\r
5570 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5571 if ( result < 0 ) {
\r
5572 snd_pcm_close( phandle );
\r
5573 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5574 errorText_ = errorStream_.str();
\r
5578 #if defined(__RTAUDIO_DEBUG__)
\r
5579 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5580 snd_pcm_hw_params_dump( hw_params, out );
\r
5583 // Set access ... check user preference.
\r
5584 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5585 stream_.userInterleaved = false;
\r
5586 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5587 if ( result < 0 ) {
\r
5588 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5589 stream_.deviceInterleaved[mode] = true;
\r
5592 stream_.deviceInterleaved[mode] = false;
\r
5595 stream_.userInterleaved = true;
\r
5596 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5597 if ( result < 0 ) {
\r
5598 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5599 stream_.deviceInterleaved[mode] = false;
\r
5602 stream_.deviceInterleaved[mode] = true;
\r
5605 if ( result < 0 ) {
\r
5606 snd_pcm_close( phandle );
\r
5607 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5608 errorText_ = errorStream_.str();
\r
5612 // Determine how to set the device format.
\r
5613 stream_.userFormat = format;
\r
5614 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5616 if ( format == RTAUDIO_SINT8 )
\r
5617 deviceFormat = SND_PCM_FORMAT_S8;
\r
5618 else if ( format == RTAUDIO_SINT16 )
\r
5619 deviceFormat = SND_PCM_FORMAT_S16;
\r
5620 else if ( format == RTAUDIO_SINT24 )
\r
5621 deviceFormat = SND_PCM_FORMAT_S24;
\r
5622 else if ( format == RTAUDIO_SINT32 )
\r
5623 deviceFormat = SND_PCM_FORMAT_S32;
\r
5624 else if ( format == RTAUDIO_FLOAT32 )
\r
5625 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5626 else if ( format == RTAUDIO_FLOAT64 )
\r
5627 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5629 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5630 stream_.deviceFormat[mode] = format;
\r
5634 // The user requested format is not natively supported by the device.
\r
5635 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5636 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5637 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5641 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5642 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5643 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5647 deviceFormat = SND_PCM_FORMAT_S32;
\r
5648 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5649 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5653 deviceFormat = SND_PCM_FORMAT_S24;
\r
5654 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5655 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5659 deviceFormat = SND_PCM_FORMAT_S16;
\r
5660 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5661 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5665 deviceFormat = SND_PCM_FORMAT_S8;
\r
5666 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5667 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5671 // If we get here, no supported format was found.
\r
5672 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5673 errorText_ = errorStream_.str();
\r
5677 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5678 if ( result < 0 ) {
\r
5679 snd_pcm_close( phandle );
\r
5680 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5681 errorText_ = errorStream_.str();
\r
5685 // Determine whether byte-swaping is necessary.
\r
5686 stream_.doByteSwap[mode] = false;
\r
5687 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5688 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5689 if ( result == 0 )
\r
5690 stream_.doByteSwap[mode] = true;
\r
5691 else if (result < 0) {
\r
5692 snd_pcm_close( phandle );
\r
5693 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5694 errorText_ = errorStream_.str();
\r
5699 // Set the sample rate.
\r
5700 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5701 if ( result < 0 ) {
\r
5702 snd_pcm_close( phandle );
\r
5703 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5704 errorText_ = errorStream_.str();
\r
5708 // Determine the number of channels for this device. We support a possible
\r
5709 // minimum device channel number > than the value requested by the user.
\r
5710 stream_.nUserChannels[mode] = channels;
\r
5711 unsigned int value;
\r
5712 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5713 unsigned int deviceChannels = value;
\r
5714 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5715 snd_pcm_close( phandle );
\r
5716 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5717 errorText_ = errorStream_.str();
\r
5721 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5722 if ( result < 0 ) {
\r
5723 snd_pcm_close( phandle );
\r
5724 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5725 errorText_ = errorStream_.str();
\r
5728 deviceChannels = value;
\r
5729 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5730 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5732 // Set the device channels.
\r
5733 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5734 if ( result < 0 ) {
\r
5735 snd_pcm_close( phandle );
\r
5736 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5737 errorText_ = errorStream_.str();
\r
5741 // Set the buffer (or period) size.
\r
5743 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5744 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5745 if ( result < 0 ) {
\r
5746 snd_pcm_close( phandle );
\r
5747 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5748 errorText_ = errorStream_.str();
\r
5751 *bufferSize = periodSize;
\r
5753 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5754 unsigned int periods = 0;
\r
5755 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5756 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5757 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5758 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5759 if ( result < 0 ) {
\r
5760 snd_pcm_close( phandle );
\r
5761 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5762 errorText_ = errorStream_.str();
\r
5766 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5767 // MUST be the same in both directions!
\r
5768 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5769 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5770 errorText_ = errorStream_.str();
\r
5774 stream_.bufferSize = *bufferSize;
\r
5776 // Install the hardware configuration
\r
5777 result = snd_pcm_hw_params( phandle, hw_params );
\r
5778 if ( result < 0 ) {
\r
5779 snd_pcm_close( phandle );
\r
5780 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5781 errorText_ = errorStream_.str();
\r
5785 #if defined(__RTAUDIO_DEBUG__)
\r
5786 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5787 snd_pcm_hw_params_dump( hw_params, out );
\r
5790 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5791 snd_pcm_sw_params_t *sw_params = NULL;
\r
5792 snd_pcm_sw_params_alloca( &sw_params );
\r
5793 snd_pcm_sw_params_current( phandle, sw_params );
\r
5794 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5795 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5796 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5798 // The following two settings were suggested by Theo Veenker
\r
5799 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5800 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5802 // here are two options for a fix
\r
5803 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5804 snd_pcm_uframes_t val;
\r
5805 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5806 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5808 result = snd_pcm_sw_params( phandle, sw_params );
\r
5809 if ( result < 0 ) {
\r
5810 snd_pcm_close( phandle );
\r
5811 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5812 errorText_ = errorStream_.str();
\r
5816 #if defined(__RTAUDIO_DEBUG__)
\r
5817 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5818 snd_pcm_sw_params_dump( sw_params, out );
\r
5821 // Set flags for buffer conversion
\r
5822 stream_.doConvertBuffer[mode] = false;
\r
5823 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5824 stream_.doConvertBuffer[mode] = true;
\r
5825 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5826 stream_.doConvertBuffer[mode] = true;
\r
5827 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5828 stream_.nUserChannels[mode] > 1 )
\r
5829 stream_.doConvertBuffer[mode] = true;
\r
5831 // Allocate the ApiHandle if necessary and then save.
\r
5832 AlsaHandle *apiInfo = 0;
\r
5833 if ( stream_.apiHandle == 0 ) {
\r
5835 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5837 catch ( std::bad_alloc& ) {
\r
5838 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5842 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5843 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5847 stream_.apiHandle = (void *) apiInfo;
\r
5848 apiInfo->handles[0] = 0;
\r
5849 apiInfo->handles[1] = 0;
\r
5852 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5854 apiInfo->handles[mode] = phandle;
\r
5856 // Allocate necessary internal buffers.
\r
5857 unsigned long bufferBytes;
\r
5858 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5859 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5860 if ( stream_.userBuffer[mode] == NULL ) {
\r
5861 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5865 if ( stream_.doConvertBuffer[mode] ) {
\r
5867 bool makeBuffer = true;
\r
5868 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5869 if ( mode == INPUT ) {
\r
5870 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5871 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5872 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5876 if ( makeBuffer ) {
\r
5877 bufferBytes *= *bufferSize;
\r
5878 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5879 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5880 if ( stream_.deviceBuffer == NULL ) {
\r
5881 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5887 stream_.sampleRate = sampleRate;
\r
5888 stream_.nBuffers = periods;
\r
5889 stream_.device[mode] = device;
\r
5890 stream_.state = STREAM_STOPPED;
\r
5892 // Setup the buffer conversion information structure.
\r
5893 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5895 // Setup thread if necessary.
\r
5896 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5897 // We had already set up an output stream.
\r
5898 stream_.mode = DUPLEX;
\r
5899 // Link the streams if possible.
\r
5900 apiInfo->synchronized = false;
\r
5901 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5902 apiInfo->synchronized = true;
\r
5904 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5905 error( RtError::WARNING );
\r
5909 stream_.mode = mode;
\r
5911 // Setup callback thread.
\r
5912 stream_.callbackInfo.object = (void *) this;
\r
5914 // Set the thread attributes for joinable and realtime scheduling
\r
5915 // priority (optional). The higher priority will only take affect
\r
5916 // if the program is run as root or suid. Note, under Linux
\r
5917 // processes with CAP_SYS_NICE privilege, a user can change
\r
5918 // scheduling policy and priority (thus need not be root). See
\r
5919 // POSIX "capabilities".
\r
5920 pthread_attr_t attr;
\r
5921 pthread_attr_init( &attr );
\r
5922 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5923 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5924 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5925 struct sched_param param;
\r
5926 int priority = options->priority;
\r
5927 int min = sched_get_priority_min( SCHED_RR );
\r
5928 int max = sched_get_priority_max( SCHED_RR );
\r
5929 if ( priority < min ) priority = min;
\r
5930 else if ( priority > max ) priority = max;
\r
5931 param.sched_priority = priority;
\r
5932 pthread_attr_setschedparam( &attr, ¶m );
\r
5933 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5936 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5938 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5941 stream_.callbackInfo.isRunning = true;
\r
5942 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5943 pthread_attr_destroy( &attr );
\r
5945 stream_.callbackInfo.isRunning = false;
\r
5946 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5955 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5956 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5957 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5959 stream_.apiHandle = 0;
\r
5962 for ( int i=0; i<2; i++ ) {
\r
5963 if ( stream_.userBuffer[i] ) {
\r
5964 free( stream_.userBuffer[i] );
\r
5965 stream_.userBuffer[i] = 0;
\r
5969 if ( stream_.deviceBuffer ) {
\r
5970 free( stream_.deviceBuffer );
\r
5971 stream_.deviceBuffer = 0;
\r
5977 void RtApiAlsa :: closeStream()
\r
5979 if ( stream_.state == STREAM_CLOSED ) {
\r
5980 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5981 error( RtError::WARNING );
\r
5985 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5986 stream_.callbackInfo.isRunning = false;
\r
5987 MUTEX_LOCK( &stream_.mutex );
\r
5988 if ( stream_.state == STREAM_STOPPED ) {
\r
5989 apiInfo->runnable = true;
\r
5990 pthread_cond_signal( &apiInfo->runnable_cv );
\r
5992 MUTEX_UNLOCK( &stream_.mutex );
\r
5993 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5995 if ( stream_.state == STREAM_RUNNING ) {
\r
5996 stream_.state = STREAM_STOPPED;
\r
5997 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
5998 snd_pcm_drop( apiInfo->handles[0] );
\r
5999 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6000 snd_pcm_drop( apiInfo->handles[1] );
\r
6004 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6005 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6006 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6008 stream_.apiHandle = 0;
\r
6011 for ( int i=0; i<2; i++ ) {
\r
6012 if ( stream_.userBuffer[i] ) {
\r
6013 free( stream_.userBuffer[i] );
\r
6014 stream_.userBuffer[i] = 0;
\r
6018 if ( stream_.deviceBuffer ) {
\r
6019 free( stream_.deviceBuffer );
\r
6020 stream_.deviceBuffer = 0;
\r
6023 stream_.mode = UNINITIALIZED;
\r
6024 stream_.state = STREAM_CLOSED;
\r
6027 void RtApiAlsa :: startStream()
\r
6029 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6032 if ( stream_.state == STREAM_RUNNING ) {
\r
6033 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6034 error( RtError::WARNING );
\r
6038 MUTEX_LOCK( &stream_.mutex );
\r
6041 snd_pcm_state_t state;
\r
6042 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6043 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6044 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6045 state = snd_pcm_state( handle[0] );
\r
6046 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6047 result = snd_pcm_prepare( handle[0] );
\r
6048 if ( result < 0 ) {
\r
6049 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6050 errorText_ = errorStream_.str();
\r
6056 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6057 state = snd_pcm_state( handle[1] );
\r
6058 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6059 result = snd_pcm_prepare( handle[1] );
\r
6060 if ( result < 0 ) {
\r
6061 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6062 errorText_ = errorStream_.str();
\r
6068 stream_.state = STREAM_RUNNING;
\r
6071 apiInfo->runnable = true;
\r
6072 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6073 MUTEX_UNLOCK( &stream_.mutex );
\r
6075 if ( result >= 0 ) return;
\r
6076 error( RtError::SYSTEM_ERROR );
\r
6079 void RtApiAlsa :: stopStream()
\r
6082 if ( stream_.state == STREAM_STOPPED ) {
\r
6083 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6084 error( RtError::WARNING );
\r
6088 stream_.state = STREAM_STOPPED;
\r
6089 MUTEX_LOCK( &stream_.mutex );
\r
6091 //if ( stream_.state == STREAM_STOPPED ) {
\r
6092 // MUTEX_UNLOCK( &stream_.mutex );
\r
6097 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6098 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6099 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6100 if ( apiInfo->synchronized )
\r
6101 result = snd_pcm_drop( handle[0] );
\r
6103 result = snd_pcm_drain( handle[0] );
\r
6104 if ( result < 0 ) {
\r
6105 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6106 errorText_ = errorStream_.str();
\r
6111 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6112 result = snd_pcm_drop( handle[1] );
\r
6113 if ( result < 0 ) {
\r
6114 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6115 errorText_ = errorStream_.str();
\r
6121 stream_.state = STREAM_STOPPED;
\r
6122 MUTEX_UNLOCK( &stream_.mutex );
\r
6124 if ( result >= 0 ) return;
\r
6125 error( RtError::SYSTEM_ERROR );
\r
6128 void RtApiAlsa :: abortStream()
\r
6131 if ( stream_.state == STREAM_STOPPED ) {
\r
6132 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6133 error( RtError::WARNING );
\r
6137 stream_.state = STREAM_STOPPED;
\r
6138 MUTEX_LOCK( &stream_.mutex );
\r
6140 //if ( stream_.state == STREAM_STOPPED ) {
\r
6141 // MUTEX_UNLOCK( &stream_.mutex );
\r
6146 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6147 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6148 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6149 result = snd_pcm_drop( handle[0] );
\r
6150 if ( result < 0 ) {
\r
6151 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6152 errorText_ = errorStream_.str();
\r
6157 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6158 result = snd_pcm_drop( handle[1] );
\r
6159 if ( result < 0 ) {
\r
6160 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6161 errorText_ = errorStream_.str();
\r
6167 stream_.state = STREAM_STOPPED;
\r
6168 MUTEX_UNLOCK( &stream_.mutex );
\r
6170 if ( result >= 0 ) return;
\r
6171 error( RtError::SYSTEM_ERROR );
\r
6174 void RtApiAlsa :: callbackEvent()
\r
6176 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6177 if ( stream_.state == STREAM_STOPPED ) {
\r
6178 MUTEX_LOCK( &stream_.mutex );
\r
6179 while ( !apiInfo->runnable )
\r
6180 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6182 if ( stream_.state != STREAM_RUNNING ) {
\r
6183 MUTEX_UNLOCK( &stream_.mutex );
\r
6186 MUTEX_UNLOCK( &stream_.mutex );
\r
6189 if ( stream_.state == STREAM_CLOSED ) {
\r
6190 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6191 error( RtError::WARNING );
\r
6195 int doStopStream = 0;
\r
6196 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6197 double streamTime = getStreamTime();
\r
6198 RtAudioStreamStatus status = 0;
\r
6199 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6200 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6201 apiInfo->xrun[0] = false;
\r
6203 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6204 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6205 apiInfo->xrun[1] = false;
\r
6207 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6208 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6210 if ( doStopStream == 2 ) {
\r
6215 MUTEX_LOCK( &stream_.mutex );
\r
6217 // The state might change while waiting on a mutex.
\r
6218 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6223 snd_pcm_t **handle;
\r
6224 snd_pcm_sframes_t frames;
\r
6225 RtAudioFormat format;
\r
6226 handle = (snd_pcm_t **) apiInfo->handles;
\r
6228 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6230 // Setup parameters.
\r
6231 if ( stream_.doConvertBuffer[1] ) {
\r
6232 buffer = stream_.deviceBuffer;
\r
6233 channels = stream_.nDeviceChannels[1];
\r
6234 format = stream_.deviceFormat[1];
\r
6237 buffer = stream_.userBuffer[1];
\r
6238 channels = stream_.nUserChannels[1];
\r
6239 format = stream_.userFormat;
\r
6242 // Read samples from device in interleaved/non-interleaved format.
\r
6243 if ( stream_.deviceInterleaved[1] )
\r
6244 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6246 void *bufs[channels];
\r
6247 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6248 for ( int i=0; i<channels; i++ )
\r
6249 bufs[i] = (void *) (buffer + (i * offset));
\r
6250 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6253 if ( result < (int) stream_.bufferSize ) {
\r
6254 // Either an error or overrun occured.
\r
6255 if ( result == -EPIPE ) {
\r
6256 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6257 if ( state == SND_PCM_STATE_XRUN ) {
\r
6258 apiInfo->xrun[1] = true;
\r
6259 result = snd_pcm_prepare( handle[1] );
\r
6260 if ( result < 0 ) {
\r
6261 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6262 errorText_ = errorStream_.str();
\r
6266 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6267 errorText_ = errorStream_.str();
\r
6271 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6272 errorText_ = errorStream_.str();
\r
6274 error( RtError::WARNING );
\r
6278 // Do byte swapping if necessary.
\r
6279 if ( stream_.doByteSwap[1] )
\r
6280 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6282 // Do buffer conversion if necessary.
\r
6283 if ( stream_.doConvertBuffer[1] )
\r
6284 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6286 // Check stream latency
\r
6287 result = snd_pcm_delay( handle[1], &frames );
\r
6288 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6293 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6295 // Setup parameters and do buffer conversion if necessary.
\r
6296 if ( stream_.doConvertBuffer[0] ) {
\r
6297 buffer = stream_.deviceBuffer;
\r
6298 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6299 channels = stream_.nDeviceChannels[0];
\r
6300 format = stream_.deviceFormat[0];
\r
6303 buffer = stream_.userBuffer[0];
\r
6304 channels = stream_.nUserChannels[0];
\r
6305 format = stream_.userFormat;
\r
6308 // Do byte swapping if necessary.
\r
6309 if ( stream_.doByteSwap[0] )
\r
6310 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6312 // Write samples to device in interleaved/non-interleaved format.
\r
6313 if ( stream_.deviceInterleaved[0] )
\r
6314 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6316 void *bufs[channels];
\r
6317 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6318 for ( int i=0; i<channels; i++ )
\r
6319 bufs[i] = (void *) (buffer + (i * offset));
\r
6320 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6323 if ( result < (int) stream_.bufferSize ) {
\r
6324 // Either an error or underrun occured.
\r
6325 if ( result == -EPIPE ) {
\r
6326 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6327 if ( state == SND_PCM_STATE_XRUN ) {
\r
6328 apiInfo->xrun[0] = true;
\r
6329 result = snd_pcm_prepare( handle[0] );
\r
6330 if ( result < 0 ) {
\r
6331 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6332 errorText_ = errorStream_.str();
\r
6336 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6337 errorText_ = errorStream_.str();
\r
6341 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6342 errorText_ = errorStream_.str();
\r
6344 error( RtError::WARNING );
\r
6348 // Check stream latency
\r
6349 result = snd_pcm_delay( handle[0], &frames );
\r
6350 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6354 MUTEX_UNLOCK( &stream_.mutex );
\r
6356 RtApi::tickStreamTime();
\r
6357 if ( doStopStream == 1 ) this->stopStream();
\r
6360 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6362 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6363 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6364 bool *isRunning = &info->isRunning;
\r
6366 while ( *isRunning == true ) {
\r
6367 pthread_testcancel();
\r
6368 object->callbackEvent();
\r
6371 pthread_exit( NULL );
\r
6374 //******************** End of __LINUX_ALSA__ *********************//
\r
6378 #if defined(__LINUX_OSS__)
\r
6380 #include <unistd.h>
\r
6381 #include <sys/ioctl.h>
\r
6382 #include <unistd.h>
\r
6383 #include <fcntl.h>
\r
6384 #include "soundcard.h"
\r
6385 #include <errno.h>
\r
6388 extern "C" void *ossCallbackHandler(void * ptr);
\r
6390 // A structure to hold various information related to the OSS API
\r
6391 // implementation.
\r
6392 struct OssHandle {
\r
6393 int id[2]; // device ids
\r
6396 pthread_cond_t runnable;
\r
6399 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6402 RtApiOss :: RtApiOss()
\r
6404 // Nothing to do here.
\r
6407 RtApiOss :: ~RtApiOss()
\r
6409 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6412 unsigned int RtApiOss :: getDeviceCount( void )
\r
6414 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6415 if ( mixerfd == -1 ) {
\r
6416 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6417 error( RtError::WARNING );
\r
6421 oss_sysinfo sysinfo;
\r
6422 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6424 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6425 error( RtError::WARNING );
\r
6430 return sysinfo.numaudios;
\r
6433 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6435 RtAudio::DeviceInfo info;
\r
6436 info.probed = false;
\r
6438 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6439 if ( mixerfd == -1 ) {
\r
6440 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6441 error( RtError::WARNING );
\r
6445 oss_sysinfo sysinfo;
\r
6446 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6447 if ( result == -1 ) {
\r
6449 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6450 error( RtError::WARNING );
\r
6454 unsigned nDevices = sysinfo.numaudios;
\r
6455 if ( nDevices == 0 ) {
\r
6457 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6458 error( RtError::INVALID_USE );
\r
6461 if ( device >= nDevices ) {
\r
6463 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6464 error( RtError::INVALID_USE );
\r
6467 oss_audioinfo ainfo;
\r
6468 ainfo.dev = device;
\r
6469 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6471 if ( result == -1 ) {
\r
6472 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6473 errorText_ = errorStream_.str();
\r
6474 error( RtError::WARNING );
\r
6479 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6480 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6481 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6482 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6483 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6486 // Probe data formats ... do for input
\r
6487 unsigned long mask = ainfo.iformats;
\r
6488 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6489 info.nativeFormats |= RTAUDIO_SINT16;
\r
6490 if ( mask & AFMT_S8 )
\r
6491 info.nativeFormats |= RTAUDIO_SINT8;
\r
6492 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6493 info.nativeFormats |= RTAUDIO_SINT32;
\r
6494 if ( mask & AFMT_FLOAT )
\r
6495 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6496 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6497 info.nativeFormats |= RTAUDIO_SINT24;
\r
6499 // Check that we have at least one supported format
\r
6500 if ( info.nativeFormats == 0 ) {
\r
6501 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6502 errorText_ = errorStream_.str();
\r
6503 error( RtError::WARNING );
\r
6507 // Probe the supported sample rates.
\r
6508 info.sampleRates.clear();
\r
6509 if ( ainfo.nrates ) {
\r
6510 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6511 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6512 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6513 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6520 // Check min and max rate values;
\r
6521 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6522 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6523 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6527 if ( info.sampleRates.size() == 0 ) {
\r
6528 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6529 errorText_ = errorStream_.str();
\r
6530 error( RtError::WARNING );
\r
6533 info.probed = true;
\r
6534 info.name = ainfo.name;
\r
6541 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6542 unsigned int firstChannel, unsigned int sampleRate,
\r
6543 RtAudioFormat format, unsigned int *bufferSize,
\r
6544 RtAudio::StreamOptions *options )
\r
6546 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6547 if ( mixerfd == -1 ) {
\r
6548 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6552 oss_sysinfo sysinfo;
\r
6553 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6554 if ( result == -1 ) {
\r
6556 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6560 unsigned nDevices = sysinfo.numaudios;
\r
6561 if ( nDevices == 0 ) {
\r
6562 // This should not happen because a check is made before this function is called.
\r
6564 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6568 if ( device >= nDevices ) {
\r
6569 // This should not happen because a check is made before this function is called.
\r
6571 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6575 oss_audioinfo ainfo;
\r
6576 ainfo.dev = device;
\r
6577 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6579 if ( result == -1 ) {
\r
6580 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6581 errorText_ = errorStream_.str();
\r
6585 // Check if device supports input or output
\r
6586 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6587 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6588 if ( mode == OUTPUT )
\r
6589 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6591 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6592 errorText_ = errorStream_.str();
\r
6597 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6598 if ( mode == OUTPUT )
\r
6599 flags |= O_WRONLY;
\r
6600 else { // mode == INPUT
\r
6601 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6602 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6603 close( handle->id[0] );
\r
6604 handle->id[0] = 0;
\r
6605 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6606 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6607 errorText_ = errorStream_.str();
\r
6610 // Check that the number previously set channels is the same.
\r
6611 if ( stream_.nUserChannels[0] != channels ) {
\r
6612 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6613 errorText_ = errorStream_.str();
\r
6619 flags |= O_RDONLY;
\r
6622 // Set exclusive access if specified.
\r
6623 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
6625 // Try to open the device.
\r
6627 fd = open( ainfo.devnode, flags, 0 );
\r
6629 if ( errno == EBUSY )
\r
6630 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
6632 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
6633 errorText_ = errorStream_.str();
\r
6637 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
6639 if ( flags | O_RDWR ) {
\r
6640 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
6641 if ( result == -1) {
\r
6642 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
6643 errorText_ = errorStream_.str();
\r
6649 // Check the device channel support.
\r
6650 stream_.nUserChannels[mode] = channels;
\r
6651 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
6653 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
6654 errorText_ = errorStream_.str();
\r
6658 // Set the number of channels.
\r
6659 int deviceChannels = channels + firstChannel;
\r
6660 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
6661 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
6663 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
6664 errorText_ = errorStream_.str();
\r
6667 stream_.nDeviceChannels[mode] = deviceChannels;
\r
6669 // Get the data format mask
\r
6671 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
6672 if ( result == -1 ) {
\r
6674 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
6675 errorText_ = errorStream_.str();
\r
6679 // Determine how to set the device format.
\r
6680 stream_.userFormat = format;
\r
6681 int deviceFormat = -1;
\r
6682 stream_.doByteSwap[mode] = false;
\r
6683 if ( format == RTAUDIO_SINT8 ) {
\r
6684 if ( mask & AFMT_S8 ) {
\r
6685 deviceFormat = AFMT_S8;
\r
6686 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6689 else if ( format == RTAUDIO_SINT16 ) {
\r
6690 if ( mask & AFMT_S16_NE ) {
\r
6691 deviceFormat = AFMT_S16_NE;
\r
6692 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6694 else if ( mask & AFMT_S16_OE ) {
\r
6695 deviceFormat = AFMT_S16_OE;
\r
6696 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6697 stream_.doByteSwap[mode] = true;
\r
6700 else if ( format == RTAUDIO_SINT24 ) {
\r
6701 if ( mask & AFMT_S24_NE ) {
\r
6702 deviceFormat = AFMT_S24_NE;
\r
6703 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6705 else if ( mask & AFMT_S24_OE ) {
\r
6706 deviceFormat = AFMT_S24_OE;
\r
6707 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6708 stream_.doByteSwap[mode] = true;
\r
6711 else if ( format == RTAUDIO_SINT32 ) {
\r
6712 if ( mask & AFMT_S32_NE ) {
\r
6713 deviceFormat = AFMT_S32_NE;
\r
6714 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6716 else if ( mask & AFMT_S32_OE ) {
\r
6717 deviceFormat = AFMT_S32_OE;
\r
6718 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6719 stream_.doByteSwap[mode] = true;
\r
6723 if ( deviceFormat == -1 ) {
\r
6724 // The user requested format is not natively supported by the device.
\r
6725 if ( mask & AFMT_S16_NE ) {
\r
6726 deviceFormat = AFMT_S16_NE;
\r
6727 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6729 else if ( mask & AFMT_S32_NE ) {
\r
6730 deviceFormat = AFMT_S32_NE;
\r
6731 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6733 else if ( mask & AFMT_S24_NE ) {
\r
6734 deviceFormat = AFMT_S24_NE;
\r
6735 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6737 else if ( mask & AFMT_S16_OE ) {
\r
6738 deviceFormat = AFMT_S16_OE;
\r
6739 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6740 stream_.doByteSwap[mode] = true;
\r
6742 else if ( mask & AFMT_S32_OE ) {
\r
6743 deviceFormat = AFMT_S32_OE;
\r
6744 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6745 stream_.doByteSwap[mode] = true;
\r
6747 else if ( mask & AFMT_S24_OE ) {
\r
6748 deviceFormat = AFMT_S24_OE;
\r
6749 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6750 stream_.doByteSwap[mode] = true;
\r
6752 else if ( mask & AFMT_S8) {
\r
6753 deviceFormat = AFMT_S8;
\r
6754 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6758 if ( stream_.deviceFormat[mode] == 0 ) {
\r
6759 // This really shouldn't happen ...
\r
6761 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6762 errorText_ = errorStream_.str();
\r
6766 // Set the data format.
\r
6767 int temp = deviceFormat;
\r
6768 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
6769 if ( result == -1 || deviceFormat != temp ) {
\r
6771 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
6772 errorText_ = errorStream_.str();
\r
6776 // Attempt to set the buffer size. According to OSS, the minimum
\r
6777 // number of buffers is two. The supposed minimum buffer size is 16
\r
6778 // bytes, so that will be our lower bound. The argument to this
\r
6779 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
6780 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
6781 // We'll check the actual value used near the end of the setup
\r
6783 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
6784 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
6786 if ( options ) buffers = options->numberOfBuffers;
\r
6787 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
6788 if ( buffers < 2 ) buffers = 3;
\r
6789 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
6790 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
6791 if ( result == -1 ) {
\r
6793 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
6794 errorText_ = errorStream_.str();
\r
6797 stream_.nBuffers = buffers;
\r
6799 // Save buffer size (in sample frames).
\r
6800 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
6801 stream_.bufferSize = *bufferSize;
\r
6803 // Set the sample rate.
\r
6804 int srate = sampleRate;
\r
6805 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
6806 if ( result == -1 ) {
\r
6808 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
6809 errorText_ = errorStream_.str();
\r
6813 // Verify the sample rate setup worked.
\r
6814 if ( abs( srate - sampleRate ) > 100 ) {
\r
6816 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
6817 errorText_ = errorStream_.str();
\r
6820 stream_.sampleRate = sampleRate;
\r
6822 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6823 // We're doing duplex setup here.
\r
6824 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
6825 stream_.nDeviceChannels[0] = deviceChannels;
\r
6828 // Set interleaving parameters.
\r
6829 stream_.userInterleaved = true;
\r
6830 stream_.deviceInterleaved[mode] = true;
\r
6831 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
6832 stream_.userInterleaved = false;
\r
6834 // Set flags for buffer conversion
\r
6835 stream_.doConvertBuffer[mode] = false;
\r
6836 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
6837 stream_.doConvertBuffer[mode] = true;
\r
6838 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
6839 stream_.doConvertBuffer[mode] = true;
\r
6840 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
6841 stream_.nUserChannels[mode] > 1 )
\r
6842 stream_.doConvertBuffer[mode] = true;
\r
6844 // Allocate the stream handles if necessary and then save.
\r
6845 if ( stream_.apiHandle == 0 ) {
\r
6847 handle = new OssHandle;
\r
6849 catch ( std::bad_alloc& ) {
\r
6850 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
6854 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
6855 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
6859 stream_.apiHandle = (void *) handle;
\r
6862 handle = (OssHandle *) stream_.apiHandle;
\r
6864 handle->id[mode] = fd;
\r
6866 // Allocate necessary internal buffers.
\r
6867 unsigned long bufferBytes;
\r
6868 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6869 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6870 if ( stream_.userBuffer[mode] == NULL ) {
\r
6871 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
6875 if ( stream_.doConvertBuffer[mode] ) {
\r
6877 bool makeBuffer = true;
\r
6878 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6879 if ( mode == INPUT ) {
\r
6880 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6881 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6882 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6886 if ( makeBuffer ) {
\r
6887 bufferBytes *= *bufferSize;
\r
6888 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6889 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6890 if ( stream_.deviceBuffer == NULL ) {
\r
6891 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
6897 stream_.device[mode] = device;
\r
6898 stream_.state = STREAM_STOPPED;
\r
6900 // Setup the buffer conversion information structure.
\r
6901 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6903 // Setup thread if necessary.
\r
6904 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
6905 // We had already set up an output stream.
\r
6906 stream_.mode = DUPLEX;
\r
6907 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
6910 stream_.mode = mode;
\r
6912 // Setup callback thread.
\r
6913 stream_.callbackInfo.object = (void *) this;
\r
6915 // Set the thread attributes for joinable and realtime scheduling
\r
6916 // priority. The higher priority will only take affect if the
\r
6917 // program is run as root or suid.
\r
6918 pthread_attr_t attr;
\r
6919 pthread_attr_init( &attr );
\r
6920 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
6921 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6922 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
6923 struct sched_param param;
\r
6924 int priority = options->priority;
\r
6925 int min = sched_get_priority_min( SCHED_RR );
\r
6926 int max = sched_get_priority_max( SCHED_RR );
\r
6927 if ( priority < min ) priority = min;
\r
6928 else if ( priority > max ) priority = max;
\r
6929 param.sched_priority = priority;
\r
6930 pthread_attr_setschedparam( &attr, ¶m );
\r
6931 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
6934 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6936 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6939 stream_.callbackInfo.isRunning = true;
\r
6940 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
6941 pthread_attr_destroy( &attr );
\r
6943 stream_.callbackInfo.isRunning = false;
\r
6944 errorText_ = "RtApiOss::error creating callback thread!";
\r
6953 pthread_cond_destroy( &handle->runnable );
\r
6954 if ( handle->id[0] ) close( handle->id[0] );
\r
6955 if ( handle->id[1] ) close( handle->id[1] );
\r
6957 stream_.apiHandle = 0;
\r
6960 for ( int i=0; i<2; i++ ) {
\r
6961 if ( stream_.userBuffer[i] ) {
\r
6962 free( stream_.userBuffer[i] );
\r
6963 stream_.userBuffer[i] = 0;
\r
6967 if ( stream_.deviceBuffer ) {
\r
6968 free( stream_.deviceBuffer );
\r
6969 stream_.deviceBuffer = 0;
\r
6975 void RtApiOss :: closeStream()
\r
6977 if ( stream_.state == STREAM_CLOSED ) {
\r
6978 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
6979 error( RtError::WARNING );
\r
6983 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6984 stream_.callbackInfo.isRunning = false;
\r
6985 MUTEX_LOCK( &stream_.mutex );
\r
6986 if ( stream_.state == STREAM_STOPPED )
\r
6987 pthread_cond_signal( &handle->runnable );
\r
6988 MUTEX_UNLOCK( &stream_.mutex );
\r
6989 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6991 if ( stream_.state == STREAM_RUNNING ) {
\r
6992 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6993 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
6995 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
6996 stream_.state = STREAM_STOPPED;
\r
7000 pthread_cond_destroy( &handle->runnable );
\r
7001 if ( handle->id[0] ) close( handle->id[0] );
\r
7002 if ( handle->id[1] ) close( handle->id[1] );
\r
7004 stream_.apiHandle = 0;
\r
7007 for ( int i=0; i<2; i++ ) {
\r
7008 if ( stream_.userBuffer[i] ) {
\r
7009 free( stream_.userBuffer[i] );
\r
7010 stream_.userBuffer[i] = 0;
\r
7014 if ( stream_.deviceBuffer ) {
\r
7015 free( stream_.deviceBuffer );
\r
7016 stream_.deviceBuffer = 0;
\r
7019 stream_.mode = UNINITIALIZED;
\r
7020 stream_.state = STREAM_CLOSED;
\r
7023 void RtApiOss :: startStream()
\r
7026 if ( stream_.state == STREAM_RUNNING ) {
\r
7027 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7028 error( RtError::WARNING );
\r
7032 MUTEX_LOCK( &stream_.mutex );
\r
7034 stream_.state = STREAM_RUNNING;
\r
7036 // No need to do anything else here ... OSS automatically starts
\r
7037 // when fed samples.
\r
7039 MUTEX_UNLOCK( &stream_.mutex );
\r
7041 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7042 pthread_cond_signal( &handle->runnable );
\r
7045 void RtApiOss :: stopStream()
\r
7048 if ( stream_.state == STREAM_STOPPED ) {
\r
7049 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7050 error( RtError::WARNING );
\r
7054 MUTEX_LOCK( &stream_.mutex );
\r
7056 // The state might change while waiting on a mutex.
\r
7057 if ( stream_.state == STREAM_STOPPED ) {
\r
7058 MUTEX_UNLOCK( &stream_.mutex );
\r
7063 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7064 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7066 // Flush the output with zeros a few times.
\r
7069 RtAudioFormat format;
\r
7071 if ( stream_.doConvertBuffer[0] ) {
\r
7072 buffer = stream_.deviceBuffer;
\r
7073 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7074 format = stream_.deviceFormat[0];
\r
7077 buffer = stream_.userBuffer[0];
\r
7078 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7079 format = stream_.userFormat;
\r
7082 memset( buffer, 0, samples * formatBytes(format) );
\r
7083 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7084 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7085 if ( result == -1 ) {
\r
7086 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7087 error( RtError::WARNING );
\r
7091 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7092 if ( result == -1 ) {
\r
7093 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7094 errorText_ = errorStream_.str();
\r
7097 handle->triggered = false;
\r
7100 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7101 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7102 if ( result == -1 ) {
\r
7103 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7104 errorText_ = errorStream_.str();
\r
7110 stream_.state = STREAM_STOPPED;
\r
7111 MUTEX_UNLOCK( &stream_.mutex );
\r
7113 if ( result != -1 ) return;
\r
7114 error( RtError::SYSTEM_ERROR );
\r
7117 void RtApiOss :: abortStream()
\r
7120 if ( stream_.state == STREAM_STOPPED ) {
\r
7121 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7122 error( RtError::WARNING );
\r
7126 MUTEX_LOCK( &stream_.mutex );
\r
7128 // The state might change while waiting on a mutex.
\r
7129 if ( stream_.state == STREAM_STOPPED ) {
\r
7130 MUTEX_UNLOCK( &stream_.mutex );
\r
7135 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7136 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7137 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7138 if ( result == -1 ) {
\r
7139 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7140 errorText_ = errorStream_.str();
\r
7143 handle->triggered = false;
\r
7146 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7147 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7148 if ( result == -1 ) {
\r
7149 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7150 errorText_ = errorStream_.str();
\r
7156 stream_.state = STREAM_STOPPED;
\r
7157 MUTEX_UNLOCK( &stream_.mutex );
\r
7159 if ( result != -1 ) return;
\r
7160 error( RtError::SYSTEM_ERROR );
\r
7163 void RtApiOss :: callbackEvent()
\r
7165 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7166 if ( stream_.state == STREAM_STOPPED ) {
\r
7167 MUTEX_LOCK( &stream_.mutex );
\r
7168 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7169 if ( stream_.state != STREAM_RUNNING ) {
\r
7170 MUTEX_UNLOCK( &stream_.mutex );
\r
7173 MUTEX_UNLOCK( &stream_.mutex );
\r
7176 if ( stream_.state == STREAM_CLOSED ) {
\r
7177 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7178 error( RtError::WARNING );
\r
7182 // Invoke user callback to get fresh output data.
\r
7183 int doStopStream = 0;
\r
7184 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7185 double streamTime = getStreamTime();
\r
7186 RtAudioStreamStatus status = 0;
\r
7187 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7188 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7189 handle->xrun[0] = false;
\r
7191 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7192 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7193 handle->xrun[1] = false;
\r
7195 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7196 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7197 if ( doStopStream == 2 ) {
\r
7198 this->abortStream();
\r
7202 MUTEX_LOCK( &stream_.mutex );
\r
7204 // The state might change while waiting on a mutex.
\r
7205 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7210 RtAudioFormat format;
\r
7212 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7214 // Setup parameters and do buffer conversion if necessary.
\r
7215 if ( stream_.doConvertBuffer[0] ) {
\r
7216 buffer = stream_.deviceBuffer;
\r
7217 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7218 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7219 format = stream_.deviceFormat[0];
\r
7222 buffer = stream_.userBuffer[0];
\r
7223 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7224 format = stream_.userFormat;
\r
7227 // Do byte swapping if necessary.
\r
7228 if ( stream_.doByteSwap[0] )
\r
7229 byteSwapBuffer( buffer, samples, format );
\r
7231 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7233 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7234 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7235 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7236 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7237 handle->triggered = true;
\r
7240 // Write samples to device.
\r
7241 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7243 if ( result == -1 ) {
\r
7244 // We'll assume this is an underrun, though there isn't a
\r
7245 // specific means for determining that.
\r
7246 handle->xrun[0] = true;
\r
7247 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7248 error( RtError::WARNING );
\r
7249 // Continue on to input section.
\r
7253 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7255 // Setup parameters.
\r
7256 if ( stream_.doConvertBuffer[1] ) {
\r
7257 buffer = stream_.deviceBuffer;
\r
7258 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7259 format = stream_.deviceFormat[1];
\r
7262 buffer = stream_.userBuffer[1];
\r
7263 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7264 format = stream_.userFormat;
\r
7267 // Read samples from device.
\r
7268 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7270 if ( result == -1 ) {
\r
7271 // We'll assume this is an overrun, though there isn't a
\r
7272 // specific means for determining that.
\r
7273 handle->xrun[1] = true;
\r
7274 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7275 error( RtError::WARNING );
\r
7279 // Do byte swapping if necessary.
\r
7280 if ( stream_.doByteSwap[1] )
\r
7281 byteSwapBuffer( buffer, samples, format );
\r
7283 // Do buffer conversion if necessary.
\r
7284 if ( stream_.doConvertBuffer[1] )
\r
7285 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7289 MUTEX_UNLOCK( &stream_.mutex );
\r
7291 RtApi::tickStreamTime();
\r
7292 if ( doStopStream == 1 ) this->stopStream();
\r
7295 extern "C" void *ossCallbackHandler( void *ptr )
\r
7297 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7298 RtApiOss *object = (RtApiOss *) info->object;
\r
7299 bool *isRunning = &info->isRunning;
\r
7301 while ( *isRunning == true ) {
\r
7302 pthread_testcancel();
\r
7303 object->callbackEvent();
\r
7306 pthread_exit( NULL );
\r
7309 //******************** End of __LINUX_OSS__ *********************//
\r
7313 // *************************************************** //
\r
7315 // Protected common (OS-independent) RtAudio methods.
\r
7317 // *************************************************** //
\r
7319 // This method can be modified to control the behavior of error
\r
7320 // message printing.
\r
7321 void RtApi :: error( RtError::Type type )
\r
7323 errorStream_.str(""); // clear the ostringstream
\r
7324 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7325 std::cerr << '\n' << errorText_ << "\n\n";
\r
7326 else if ( type != RtError::WARNING )
\r
7327 throw( RtError( errorText_, type ) );
\r
7330 void RtApi :: verifyStream()
\r
7332 if ( stream_.state == STREAM_CLOSED ) {
\r
7333 errorText_ = "RtApi:: a stream is not open!";
\r
7334 error( RtError::INVALID_USE );
\r
7338 void RtApi :: clearStreamInfo()
\r
7340 stream_.mode = UNINITIALIZED;
\r
7341 stream_.state = STREAM_CLOSED;
\r
7342 stream_.sampleRate = 0;
\r
7343 stream_.bufferSize = 0;
\r
7344 stream_.nBuffers = 0;
\r
7345 stream_.userFormat = 0;
\r
7346 stream_.userInterleaved = true;
\r
7347 stream_.streamTime = 0.0;
\r
7348 stream_.apiHandle = 0;
\r
7349 stream_.deviceBuffer = 0;
\r
7350 stream_.callbackInfo.callback = 0;
\r
7351 stream_.callbackInfo.userData = 0;
\r
7352 stream_.callbackInfo.isRunning = false;
\r
7353 for ( int i=0; i<2; i++ ) {
\r
7354 stream_.device[i] = 11111;
\r
7355 stream_.doConvertBuffer[i] = false;
\r
7356 stream_.deviceInterleaved[i] = true;
\r
7357 stream_.doByteSwap[i] = false;
\r
7358 stream_.nUserChannels[i] = 0;
\r
7359 stream_.nDeviceChannels[i] = 0;
\r
7360 stream_.channelOffset[i] = 0;
\r
7361 stream_.deviceFormat[i] = 0;
\r
7362 stream_.latency[i] = 0;
\r
7363 stream_.userBuffer[i] = 0;
\r
7364 stream_.convertInfo[i].channels = 0;
\r
7365 stream_.convertInfo[i].inJump = 0;
\r
7366 stream_.convertInfo[i].outJump = 0;
\r
7367 stream_.convertInfo[i].inFormat = 0;
\r
7368 stream_.convertInfo[i].outFormat = 0;
\r
7369 stream_.convertInfo[i].inOffset.clear();
\r
7370 stream_.convertInfo[i].outOffset.clear();
\r
7374 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7376 if ( format == RTAUDIO_SINT16 )
\r
7378 else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||
\r
7379 format == RTAUDIO_FLOAT32 )
\r
7381 else if ( format == RTAUDIO_FLOAT64 )
\r
7383 else if ( format == RTAUDIO_SINT8 )
\r
7386 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7387 error( RtError::WARNING );
\r
7392 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7394 if ( mode == INPUT ) { // convert device to user buffer
\r
7395 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7396 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7397 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7398 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7400 else { // convert user to device buffer
\r
7401 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7402 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7403 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7404 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7407 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7408 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7410 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7412 // Set up the interleave/deinterleave offsets.
\r
7413 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7414 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7415 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7416 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7417 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7418 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7419 stream_.convertInfo[mode].inJump = 1;
\r
7423 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7424 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7425 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7426 stream_.convertInfo[mode].outJump = 1;
\r
7430 else { // no (de)interleaving
\r
7431 if ( stream_.userInterleaved ) {
\r
7432 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7433 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7434 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7438 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7439 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7440 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7441 stream_.convertInfo[mode].inJump = 1;
\r
7442 stream_.convertInfo[mode].outJump = 1;
\r
7447 // Add channel offset.
\r
7448 if ( firstChannel > 0 ) {
\r
7449 if ( stream_.deviceInterleaved[mode] ) {
\r
7450 if ( mode == OUTPUT ) {
\r
7451 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7452 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7455 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7456 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7460 if ( mode == OUTPUT ) {
\r
7461 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7462 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7465 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7466 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7472 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7474 // This function does format conversion, input/output channel compensation, and
\r
7475 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7476 // the lower three bytes of a 32-bit integer.
\r
7478 // Clear our device buffer when in/out duplex device channels are different
\r
7479 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7480 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7481 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7484 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7486 Float64 *out = (Float64 *)outBuffer;
\r
7488 if (info.inFormat == RTAUDIO_SINT8) {
\r
7489 signed char *in = (signed char *)inBuffer;
\r
7490 scale = 1.0 / 127.5;
\r
7491 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7492 for (j=0; j<info.channels; j++) {
\r
7493 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7494 out[info.outOffset[j]] += 0.5;
\r
7495 out[info.outOffset[j]] *= scale;
\r
7497 in += info.inJump;
\r
7498 out += info.outJump;
\r
7501 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7502 Int16 *in = (Int16 *)inBuffer;
\r
7503 scale = 1.0 / 32767.5;
\r
7504 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7505 for (j=0; j<info.channels; j++) {
\r
7506 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7507 out[info.outOffset[j]] += 0.5;
\r
7508 out[info.outOffset[j]] *= scale;
\r
7510 in += info.inJump;
\r
7511 out += info.outJump;
\r
7514 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7515 Int32 *in = (Int32 *)inBuffer;
\r
7516 scale = 1.0 / 8388607.5;
\r
7517 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7518 for (j=0; j<info.channels; j++) {
\r
7519 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);
\r
7520 out[info.outOffset[j]] += 0.5;
\r
7521 out[info.outOffset[j]] *= scale;
\r
7523 in += info.inJump;
\r
7524 out += info.outJump;
\r
7527 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7528 Int32 *in = (Int32 *)inBuffer;
\r
7529 scale = 1.0 / 2147483647.5;
\r
7530 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7531 for (j=0; j<info.channels; j++) {
\r
7532 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7533 out[info.outOffset[j]] += 0.5;
\r
7534 out[info.outOffset[j]] *= scale;
\r
7536 in += info.inJump;
\r
7537 out += info.outJump;
\r
7540 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7541 Float32 *in = (Float32 *)inBuffer;
\r
7542 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7543 for (j=0; j<info.channels; j++) {
\r
7544 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7546 in += info.inJump;
\r
7547 out += info.outJump;
\r
7550 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7551 // Channel compensation and/or (de)interleaving only.
\r
7552 Float64 *in = (Float64 *)inBuffer;
\r
7553 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7554 for (j=0; j<info.channels; j++) {
\r
7555 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7557 in += info.inJump;
\r
7558 out += info.outJump;
\r
7562 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7564 Float32 *out = (Float32 *)outBuffer;
\r
7566 if (info.inFormat == RTAUDIO_SINT8) {
\r
7567 signed char *in = (signed char *)inBuffer;
\r
7568 scale = (Float32) ( 1.0 / 127.5 );
\r
7569 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7570 for (j=0; j<info.channels; j++) {
\r
7571 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7572 out[info.outOffset[j]] += 0.5;
\r
7573 out[info.outOffset[j]] *= scale;
\r
7575 in += info.inJump;
\r
7576 out += info.outJump;
\r
7579 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7580 Int16 *in = (Int16 *)inBuffer;
\r
7581 scale = (Float32) ( 1.0 / 32767.5 );
\r
7582 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7583 for (j=0; j<info.channels; j++) {
\r
7584 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7585 out[info.outOffset[j]] += 0.5;
\r
7586 out[info.outOffset[j]] *= scale;
\r
7588 in += info.inJump;
\r
7589 out += info.outJump;
\r
7592 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7593 Int32 *in = (Int32 *)inBuffer;
\r
7594 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7595 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7596 for (j=0; j<info.channels; j++) {
\r
7597 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);
\r
7598 out[info.outOffset[j]] += 0.5;
\r
7599 out[info.outOffset[j]] *= scale;
\r
7601 in += info.inJump;
\r
7602 out += info.outJump;
\r
7605 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7606 Int32 *in = (Int32 *)inBuffer;
\r
7607 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7608 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7609 for (j=0; j<info.channels; j++) {
\r
7610 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7611 out[info.outOffset[j]] += 0.5;
\r
7612 out[info.outOffset[j]] *= scale;
\r
7614 in += info.inJump;
\r
7615 out += info.outJump;
\r
7618 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7619 // Channel compensation and/or (de)interleaving only.
\r
7620 Float32 *in = (Float32 *)inBuffer;
\r
7621 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7622 for (j=0; j<info.channels; j++) {
\r
7623 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7625 in += info.inJump;
\r
7626 out += info.outJump;
\r
7629 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7630 Float64 *in = (Float64 *)inBuffer;
\r
7631 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7632 for (j=0; j<info.channels; j++) {
\r
7633 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7635 in += info.inJump;
\r
7636 out += info.outJump;
\r
7640 else if (info.outFormat == RTAUDIO_SINT32) {
\r
7641 Int32 *out = (Int32 *)outBuffer;
\r
7642 if (info.inFormat == RTAUDIO_SINT8) {
\r
7643 signed char *in = (signed char *)inBuffer;
\r
7644 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7645 for (j=0; j<info.channels; j++) {
\r
7646 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7647 out[info.outOffset[j]] <<= 24;
\r
7649 in += info.inJump;
\r
7650 out += info.outJump;
\r
7653 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7654 Int16 *in = (Int16 *)inBuffer;
\r
7655 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7656 for (j=0; j<info.channels; j++) {
\r
7657 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7658 out[info.outOffset[j]] <<= 16;
\r
7660 in += info.inJump;
\r
7661 out += info.outJump;
\r
7664 else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes
\r
7665 Int32 *in = (Int32 *)inBuffer;
\r
7666 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7667 for (j=0; j<info.channels; j++) {
\r
7668 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7669 out[info.outOffset[j]] <<= 8;
\r
7671 in += info.inJump;
\r
7672 out += info.outJump;
\r
7675 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7676 // Channel compensation and/or (de)interleaving only.
\r
7677 Int32 *in = (Int32 *)inBuffer;
\r
7678 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7679 for (j=0; j<info.channels; j++) {
\r
7680 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7682 in += info.inJump;
\r
7683 out += info.outJump;
\r
7686 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7687 Float32 *in = (Float32 *)inBuffer;
\r
7688 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7689 for (j=0; j<info.channels; j++) {
\r
7690 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7692 in += info.inJump;
\r
7693 out += info.outJump;
\r
7696 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7697 Float64 *in = (Float64 *)inBuffer;
\r
7698 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7699 for (j=0; j<info.channels; j++) {
\r
7700 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7702 in += info.inJump;
\r
7703 out += info.outJump;
\r
7707 else if (info.outFormat == RTAUDIO_SINT24) {
\r
7708 Int32 *out = (Int32 *)outBuffer;
\r
7709 if (info.inFormat == RTAUDIO_SINT8) {
\r
7710 signed char *in = (signed char *)inBuffer;
\r
7711 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7712 for (j=0; j<info.channels; j++) {
\r
7713 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7714 out[info.outOffset[j]] <<= 16;
\r
7716 in += info.inJump;
\r
7717 out += info.outJump;
\r
7720 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7721 Int16 *in = (Int16 *)inBuffer;
\r
7722 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7723 for (j=0; j<info.channels; j++) {
\r
7724 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7725 out[info.outOffset[j]] <<= 8;
\r
7727 in += info.inJump;
\r
7728 out += info.outJump;
\r
7731 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7732 // Channel compensation and/or (de)interleaving only.
\r
7733 Int32 *in = (Int32 *)inBuffer;
\r
7734 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7735 for (j=0; j<info.channels; j++) {
\r
7736 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7738 in += info.inJump;
\r
7739 out += info.outJump;
\r
7742 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7743 Int32 *in = (Int32 *)inBuffer;
\r
7744 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7745 for (j=0; j<info.channels; j++) {
\r
7746 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7747 out[info.outOffset[j]] >>= 8;
\r
7749 in += info.inJump;
\r
7750 out += info.outJump;
\r
7753 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7754 Float32 *in = (Float32 *)inBuffer;
\r
7755 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7756 for (j=0; j<info.channels; j++) {
\r
7757 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7759 in += info.inJump;
\r
7760 out += info.outJump;
\r
7763 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7764 Float64 *in = (Float64 *)inBuffer;
\r
7765 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7766 for (j=0; j<info.channels; j++) {
\r
7767 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7769 in += info.inJump;
\r
7770 out += info.outJump;
\r
7774 else if (info.outFormat == RTAUDIO_SINT16) {
\r
7775 Int16 *out = (Int16 *)outBuffer;
\r
7776 if (info.inFormat == RTAUDIO_SINT8) {
\r
7777 signed char *in = (signed char *)inBuffer;
\r
7778 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7779 for (j=0; j<info.channels; j++) {
\r
7780 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
7781 out[info.outOffset[j]] <<= 8;
\r
7783 in += info.inJump;
\r
7784 out += info.outJump;
\r
7787 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7788 // Channel compensation and/or (de)interleaving only.
\r
7789 Int16 *in = (Int16 *)inBuffer;
\r
7790 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7791 for (j=0; j<info.channels; j++) {
\r
7792 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7794 in += info.inJump;
\r
7795 out += info.outJump;
\r
7798 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7799 Int32 *in = (Int32 *)inBuffer;
\r
7800 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7801 for (j=0; j<info.channels; j++) {
\r
7802 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);
\r
7804 in += info.inJump;
\r
7805 out += info.outJump;
\r
7808 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7809 Int32 *in = (Int32 *)inBuffer;
\r
7810 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7811 for (j=0; j<info.channels; j++) {
\r
7812 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
7814 in += info.inJump;
\r
7815 out += info.outJump;
\r
7818 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7819 Float32 *in = (Float32 *)inBuffer;
\r
7820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7821 for (j=0; j<info.channels; j++) {
\r
7822 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7824 in += info.inJump;
\r
7825 out += info.outJump;
\r
7828 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7829 Float64 *in = (Float64 *)inBuffer;
\r
7830 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7831 for (j=0; j<info.channels; j++) {
\r
7832 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7834 in += info.inJump;
\r
7835 out += info.outJump;
\r
7839 else if (info.outFormat == RTAUDIO_SINT8) {
\r
7840 signed char *out = (signed char *)outBuffer;
\r
7841 if (info.inFormat == RTAUDIO_SINT8) {
\r
7842 // Channel compensation and/or (de)interleaving only.
\r
7843 signed char *in = (signed char *)inBuffer;
\r
7844 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7845 for (j=0; j<info.channels; j++) {
\r
7846 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7848 in += info.inJump;
\r
7849 out += info.outJump;
\r
7852 if (info.inFormat == RTAUDIO_SINT16) {
\r
7853 Int16 *in = (Int16 *)inBuffer;
\r
7854 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7855 for (j=0; j<info.channels; j++) {
\r
7856 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
7858 in += info.inJump;
\r
7859 out += info.outJump;
\r
7862 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7863 Int32 *in = (Int32 *)inBuffer;
\r
7864 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7865 for (j=0; j<info.channels; j++) {
\r
7866 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);
\r
7868 in += info.inJump;
\r
7869 out += info.outJump;
\r
7872 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7873 Int32 *in = (Int32 *)inBuffer;
\r
7874 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7875 for (j=0; j<info.channels; j++) {
\r
7876 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
7878 in += info.inJump;
\r
7879 out += info.outJump;
\r
7882 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7883 Float32 *in = (Float32 *)inBuffer;
\r
7884 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7885 for (j=0; j<info.channels; j++) {
\r
7886 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7888 in += info.inJump;
\r
7889 out += info.outJump;
\r
7892 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7893 Float64 *in = (Float64 *)inBuffer;
\r
7894 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7895 for (j=0; j<info.channels; j++) {
\r
7896 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7898 in += info.inJump;
\r
7899 out += info.outJump;
\r
7905 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
7906 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
7907 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
7909 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
7911 register char val;
\r
7912 register char *ptr;
\r
7915 if ( format == RTAUDIO_SINT16 ) {
\r
7916 for ( unsigned int i=0; i<samples; i++ ) {
\r
7917 // Swap 1st and 2nd bytes.
\r
7919 *(ptr) = *(ptr+1);
\r
7922 // Increment 2 bytes.
\r
7926 else if ( format == RTAUDIO_SINT24 ||
\r
7927 format == RTAUDIO_SINT32 ||
\r
7928 format == RTAUDIO_FLOAT32 ) {
\r
7929 for ( unsigned int i=0; i<samples; i++ ) {
\r
7930 // Swap 1st and 4th bytes.
\r
7932 *(ptr) = *(ptr+3);
\r
7935 // Swap 2nd and 3rd bytes.
\r
7938 *(ptr) = *(ptr+1);
\r
7941 // Increment 3 more bytes.
\r
7945 else if ( format == RTAUDIO_FLOAT64 ) {
\r
7946 for ( unsigned int i=0; i<samples; i++ ) {
\r
7947 // Swap 1st and 8th bytes
\r
7949 *(ptr) = *(ptr+7);
\r
7952 // Swap 2nd and 7th bytes
\r
7955 *(ptr) = *(ptr+5);
\r
7958 // Swap 3rd and 6th bytes
\r
7961 *(ptr) = *(ptr+3);
\r
7964 // Swap 4th and 5th bytes
\r
7967 *(ptr) = *(ptr+1);
\r
7970 // Increment 5 more bytes.
\r
7976 // Indentation settings for Vim and Emacs
\r
7978 // Local Variables:
\r
7979 // c-basic-offset: 2
\r
7980 // indent-tabs-mode: nil
\r
7983 // vim: et sts=2 sw=2
\r