RtAudio provides a common API (Application Programming Interface)\r
for realtime audio input/output across Linux (native ALSA, Jack,\r
and OSS), Macintosh OS X (CoreAudio and Jack), and Windows\r
- (DirectSound and ASIO) operating systems.\r
+ (DirectSound, ASIO and WASAPI) operating systems.\r
\r
RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/\r
\r
RtAudio: realtime audio i/o C++ classes\r
- Copyright (c) 2001-2013 Gary P. Scavone\r
+ Copyright (c) 2001-2016 Gary P. Scavone\r
\r
Permission is hereby granted, free of charge, to any person\r
obtaining a copy of this software and associated documentation files\r
*/\r
/************************************************************************/\r
\r
-// RtAudio: Version 4.0.12\r
+// RtAudio: Version 4.1.2\r
\r
#include "RtAudio.h"\r
#include <iostream>\r
#include <cstdlib>\r
#include <cstring>\r
#include <climits>\r
+#include <algorithm>\r
\r
// Static variable definitions.\r
const unsigned int RtApi::MAX_SAMPLE_RATES = 14;\r
32000, 44100, 48000, 88200, 96000, 176400, 192000\r
};\r
\r
-#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)\r
+#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)\r
#define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)\r
#define MUTEX_DESTROY(A) DeleteCriticalSection(A)\r
#define MUTEX_LOCK(A) EnterCriticalSection(A)\r
#define MUTEX_UNLOCK(A) LeaveCriticalSection(A)\r
+\r
+ #include "tchar.h"\r
+\r
+ static std::string convertCharPointerToStdString(const char *text)\r
+ {\r
+ return std::string(text);\r
+ }\r
+\r
+ static std::string convertCharPointerToStdString(const wchar_t *text)\r
+ {\r
+ int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);\r
+ std::string s( length-1, '\0' );\r
+ WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);\r
+ return s;\r
+ }\r
+\r
#elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)\r
// pthread API\r
#define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)\r
//\r
// *************************************************** //\r
\r
+std::string RtAudio :: getVersion( void ) throw()\r
+{\r
+ return RTAUDIO_VERSION;\r
+}\r
+\r
void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()\r
{\r
apis.clear();\r
#if defined(__WINDOWS_ASIO__)\r
apis.push_back( WINDOWS_ASIO );\r
#endif\r
+#if defined(__WINDOWS_WASAPI__)\r
+ apis.push_back( WINDOWS_WASAPI );\r
+#endif\r
#if defined(__WINDOWS_DS__)\r
apis.push_back( WINDOWS_DS );\r
#endif\r
if ( api == WINDOWS_ASIO )\r
rtapi_ = new RtApiAsio();\r
#endif\r
+#if defined(__WINDOWS_WASAPI__)\r
+ if ( api == WINDOWS_WASAPI )\r
+ rtapi_ = new RtApiWasapi();\r
+#endif\r
#if defined(__WINDOWS_DS__)\r
if ( api == WINDOWS_DS )\r
rtapi_ = new RtApiDs();\r
#endif\r
}\r
\r
-RtAudio :: RtAudio( RtAudio::Api api ) throw()\r
+RtAudio :: RtAudio( RtAudio::Api api )\r
{\r
rtapi_ = 0;\r
\r
getCompiledApi( apis );\r
for ( unsigned int i=0; i<apis.size(); i++ ) {\r
openRtApi( apis[i] );\r
- if ( rtapi_->getDeviceCount() ) break;\r
+ if ( rtapi_ && rtapi_->getDeviceCount() ) break;\r
}\r
\r
if ( rtapi_ ) return;\r
// It should not be possible to get here because the preprocessor\r
// definition __RTAUDIO_DUMMY__ is automatically defined if no\r
// API-specific definitions are passed to the compiler. But just in\r
- // case something weird happens, we'll print out an error message.\r
- std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";\r
+ // case something weird happens, we'll thow an error.\r
+ std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";\r
+ throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );\r
}\r
\r
RtAudio :: ~RtAudio() throw()\r
{\r
- delete rtapi_;\r
+ if ( rtapi_ )\r
+ delete rtapi_;\r
}\r
\r
void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,\r
stream_.userBuffer[1] = 0;\r
MUTEX_INITIALIZE( &stream_.mutex );\r
showWarnings_ = true;\r
+ firstErrorOccurred_ = false;\r
}\r
\r
RtApi :: ~RtApi()\r
return;\r
}\r
\r
+ // Clear stream information potentially left from a previously open stream.\r
+ clearStreamInfo();\r
+\r
if ( oParams && oParams->nChannels < 1 ) {\r
errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";\r
error( RtAudioError::INVALID_USE );\r
}\r
}\r
\r
- clearStreamInfo();\r
bool result;\r
\r
if ( oChannels > 0 ) {\r
#endif\r
}\r
\r
+void RtApi :: setStreamTime( double time )\r
+{\r
+ verifyStream();\r
+\r
+ if ( time >= 0.0 )\r
+ stream_.streamTime = time;\r
+}\r
+\r
unsigned int RtApi :: getStreamSampleRate( void )\r
{\r
verifyStream();\r
//const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );\r
int length = CFStringGetLength(cfname);\r
char *mname = (char *)malloc(length * 3 + 1);\r
+#if defined( UNICODE ) || defined( _UNICODE )\r
+ CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);\r
+#else\r
CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());\r
+#endif\r
info.name.append( (const char *)mname, strlen(mname) );\r
info.name.append( ": " );\r
CFRelease( cfname );\r
//const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );\r
length = CFStringGetLength(cfname);\r
char *name = (char *)malloc(length * 3 + 1);\r
+#if defined( UNICODE ) || defined( _UNICODE )\r
+ CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);\r
+#else\r
CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());\r
+#endif\r
info.name.append( (const char *)name, strlen(name) );\r
CFRelease( cfname );\r
free(name);\r
return info;\r
}\r
\r
- Float64 minimumRate = 100000000.0, maximumRate = 0.0;\r
+ // The sample rate reporting mechanism is a bit of a mystery. It\r
+ // seems that it can either return individual rates or a range of\r
+ // rates. I assume that if the min / max range values are the same,\r
+ // then that represents a single supported rate and if the min / max\r
+ // range values are different, the device supports an arbitrary\r
+ // range of values (though there might be multiple ranges, so we'll\r
+ // use the most conservative range).\r
+ Float64 minimumRate = 1.0, maximumRate = 10000000000.0;\r
+ bool haveValueRange = false;\r
+ info.sampleRates.clear();\r
for ( UInt32 i=0; i<nRanges; i++ ) {\r
- if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;\r
- if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;\r
+ if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {\r
+ unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;\r
+ info.sampleRates.push_back( tmpSr );\r
+\r
+ if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = tmpSr;\r
+\r
+ } else {\r
+ haveValueRange = true;\r
+ if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;\r
+ if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;\r
+ }\r
}\r
\r
- info.sampleRates.clear();\r
- for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
- if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )\r
- info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+ if ( haveValueRange ) {\r
+ for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
+ if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {\r
+ info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+ }\r
+ }\r
}\r
\r
+ // Sort and remove any redundant values\r
+ std::sort( info.sampleRates.begin(), info.sampleRates.end() );\r
+ info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );\r
+\r
if ( info.sampleRates.size() == 0 ) {\r
errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";\r
errorText_ = errorStream_.str();\r
const AudioObjectPropertyAddress /*properties*/[],\r
void* ratePointer )\r
{\r
-\r
Float64 *rate = (Float64 *) ratePointer;\r
UInt32 dataSize = sizeof( Float64 );\r
AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,\r
\r
result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );\r
if (result != noErr || dataSize == 0) {\r
+ free( bufferList );\r
errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";\r
errorText_ = errorStream_.str();\r
return FAILURE;\r
dataSize = sizeof( Float64 );\r
property.mSelector = kAudioDevicePropertyNominalSampleRate;\r
result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );\r
-\r
if ( result != noErr ) {\r
errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";\r
errorText_ = errorStream_.str();\r
\r
nominalRate = (Float64) sampleRate;\r
result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );\r
-\r
if ( result != noErr ) {\r
+ AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );\r
errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";\r
errorText_ = errorStream_.str();\r
return FAILURE;\r
\r
// Setup the device property listener for over/underload.\r
property.mSelector = kAudioDeviceProcessorOverload;\r
+ property.mScope = kAudioObjectPropertyScopeGlobal;\r
result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );\r
\r
return SUCCESS;\r
\r
CoreHandle *handle = (CoreHandle *) stream_.apiHandle;\r
if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {\r
+ if (handle) {\r
+ AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,\r
+ kAudioObjectPropertyScopeGlobal,\r
+ kAudioObjectPropertyElementMaster };\r
+\r
+ property.mSelector = kAudioDeviceProcessorOverload;\r
+ property.mScope = kAudioObjectPropertyScopeGlobal;\r
+ if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {\r
+ errorText_ = "RtApiCore::closeStream(): error removing property listener!";\r
+ error( RtAudioError::WARNING );\r
+ }\r
+ }\r
if ( stream_.state == STREAM_RUNNING )\r
AudioDeviceStop( handle->id[0], callbackHandler );\r
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )\r
}\r
\r
if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {\r
+ if (handle) {\r
+ AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,\r
+ kAudioObjectPropertyScopeGlobal,\r
+ kAudioObjectPropertyElementMaster };\r
+\r
+ property.mSelector = kAudioDeviceProcessorOverload;\r
+ property.mScope = kAudioObjectPropertyScopeGlobal;\r
+ if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {\r
+ errorText_ = "RtApiCore::closeStream(): error removing property listener!";\r
+ error( RtAudioError::WARNING );\r
+ }\r
+ }\r
if ( stream_.state == STREAM_RUNNING )\r
AudioDeviceStop( handle->id[1], callbackHandler );\r
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )\r
}\r
}\r
}\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
- }\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
}\r
\r
AudioDeviceID inputDevice;\r
\r
// Get the current jack server sample rate.\r
info.sampleRates.clear();\r
- info.sampleRates.push_back( jack_get_sample_rate( client ) );\r
+\r
+ info.preferredSampleRate = jack_get_sample_rate( client );\r
+ info.sampleRates.push_back( info.preferredSampleRate );\r
\r
// Count the available ports containing the client name as device\r
// channels. Jack "input ports" equal RtAudio output channels.\r
memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );\r
}\r
}\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
- }\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
}\r
\r
if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {\r
info.sampleRates.clear();\r
for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {\r
result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );\r
- if ( result == ASE_OK )\r
+ if ( result == ASE_OK ) {\r
info.sampleRates.push_back( SAMPLE_RATES[i] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[i];\r
+ }\r
}\r
\r
// Determine supported data types ... just check first channel and assume rest are the same.\r
return info;\r
}\r
\r
-static void bufferSwitch( long index, ASIOBool processNow )\r
+static void bufferSwitch( long index, ASIOBool /*processNow*/ )\r
{\r
RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;\r
object->callbackEvent( index );\r
unsigned int firstChannel, unsigned int sampleRate,\r
RtAudioFormat format, unsigned int *bufferSize,\r
RtAudio::StreamOptions *options )\r
-{\r
+{////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r
+\r
+ bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;\r
+\r
// For ASIO, a duplex stream MUST use the same driver.\r
- if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {\r
+ if ( isDuplexInput && stream_.device[0] != device ) {\r
errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";\r
return FAILURE;\r
}\r
}\r
\r
// Only load the driver once for duplex stream.\r
- if ( mode != INPUT || stream_.mode != OUTPUT ) {\r
+ if ( !isDuplexInput ) {\r
// The getDeviceInfo() function will not work when a stream is open\r
// because ASIO does not allow multiple devices to run at the same\r
// time. Thus, we'll probe the system before opening a stream and\r
}\r
}\r
\r
+ // keep them before any "goto error", they are used for error cleanup + goto device boundary checks\r
+ bool buffersAllocated = false;\r
+ AsioHandle *handle = (AsioHandle *) stream_.apiHandle;\r
+ unsigned int nChannels;\r
+\r
+\r
// Check the device channel count.\r
long inputChannels, outputChannels;\r
result = ASIOGetChannels( &inputChannels, &outputChannels );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||\r
( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
stream_.nDeviceChannels[mode] = channels;\r
stream_.nUserChannels[mode] = channels;\r
// Verify the sample rate is supported.\r
result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Get the current sample rate\r
ASIOSampleRate currentRate;\r
result = ASIOGetSampleRate( ¤tRate );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Set the sample rate only if necessary\r
if ( currentRate != sampleRate ) {\r
result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
}\r
\r
else channelInfo.isInput = true;\r
result = ASIOGetChannelInfo( &channelInfo );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Assuming WINDOWS host is always little-endian.\r
}\r
\r
if ( stream_.deviceFormat[mode] == 0 ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Set the buffer size. For a duplex stream, this will end up\r
long minSize, maxSize, preferSize, granularity;\r
result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
- if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
- else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
- else if ( granularity == -1 ) {\r
- // Make sure bufferSize is a power of two.\r
- int log2_of_min_size = 0;\r
- int log2_of_max_size = 0;\r
+ if ( isDuplexInput ) {\r
+ // When this is the duplex input (output was opened before), then we have to use the same\r
+ // buffersize as the output, because it might use the preferred buffer size, which most\r
+ // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,\r
+ // So instead of throwing an error, make them equal. The caller uses the reference\r
+ // to the "bufferSize" param as usual to set up processing buffers.\r
\r
- for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {\r
- if ( minSize & ((long)1 << i) ) log2_of_min_size = i;\r
- if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;\r
- }\r
+ *bufferSize = stream_.bufferSize;\r
\r
- long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );\r
- int min_delta_num = log2_of_min_size;\r
+ } else {\r
+ if ( *bufferSize == 0 ) *bufferSize = preferSize;\r
+ else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
+ else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
+ else if ( granularity == -1 ) {\r
+ // Make sure bufferSize is a power of two.\r
+ int log2_of_min_size = 0;\r
+ int log2_of_max_size = 0;\r
+\r
+ for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {\r
+ if ( minSize & ((long)1 << i) ) log2_of_min_size = i;\r
+ if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;\r
+ }\r
\r
- for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {\r
- long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );\r
- if (current_delta < min_delta) {\r
- min_delta = current_delta;\r
- min_delta_num = i;\r
+ long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );\r
+ int min_delta_num = log2_of_min_size;\r
+\r
+ for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {\r
+ long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );\r
+ if (current_delta < min_delta) {\r
+ min_delta = current_delta;\r
+ min_delta_num = i;\r
+ }\r
}\r
- }\r
\r
- *bufferSize = ( (unsigned int)1 << min_delta_num );\r
- if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
- else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
- }\r
- else if ( granularity != 0 ) {\r
- // Set to an even multiple of granularity, rounding up.\r
- *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;\r
+ *bufferSize = ( (unsigned int)1 << min_delta_num );\r
+ if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
+ else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
+ }\r
+ else if ( granularity != 0 ) {\r
+ // Set to an even multiple of granularity, rounding up.\r
+ *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;\r
+ }\r
}\r
\r
- if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {\r
- drivers.removeCurrentDriver();\r
+ /*\r
+ // we don't use it anymore, see above!\r
+ // Just left it here for the case...\r
+ if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {\r
errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";\r
- return FAILURE;\r
+ goto error;\r
}\r
+ */\r
\r
stream_.bufferSize = *bufferSize;\r
stream_.nBuffers = 2;\r
stream_.deviceInterleaved[mode] = false;\r
\r
// Allocate, if necessary, our AsioHandle structure for the stream.\r
- AsioHandle *handle = (AsioHandle *) stream_.apiHandle;\r
if ( handle == 0 ) {\r
try {\r
handle = new AsioHandle;\r
}\r
catch ( std::bad_alloc& ) {\r
- //if ( handle == NULL ) { \r
- drivers.removeCurrentDriver();\r
errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";\r
- return FAILURE;\r
+ goto error;\r
}\r
handle->bufferInfos = 0;\r
\r
// Create the ASIO internal buffers. Since RtAudio sets up input\r
// and output separately, we'll have to dispose of previously\r
// created output buffers for a duplex stream.\r
- long inputLatency, outputLatency;\r
if ( mode == INPUT && stream_.mode == OUTPUT ) {\r
ASIODisposeBuffers();\r
if ( handle->bufferInfos ) free( handle->bufferInfos );\r
}\r
\r
// Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.\r
- bool buffersAllocated = false;\r
- unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];\r
+ unsigned int i;\r
+ nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];\r
handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );\r
if ( handle->bufferInfos == NULL ) {\r
errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";\r
infos->buffers[0] = infos->buffers[1] = 0;\r
}\r
\r
+ // prepare for callbacks\r
+ stream_.sampleRate = sampleRate;\r
+ stream_.device[mode] = device;\r
+ stream_.mode = isDuplexInput ? DUPLEX : mode;\r
+\r
+ // store this class instance before registering callbacks, that are going to use it\r
+ asioCallbackInfo = &stream_.callbackInfo;\r
+ stream_.callbackInfo.object = (void *) this;\r
+\r
// Set up the ASIO callback structure and create the ASIO data buffers.\r
asioCallbacks.bufferSwitch = &bufferSwitch;\r
asioCallbacks.sampleRateDidChange = &sampleRateChanged;\r
asioCallbacks.asioMessage = &asioMessages;\r
asioCallbacks.bufferSwitchTimeInfo = NULL;\r
result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );\r
+ if ( result != ASE_OK ) {\r
+ // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges\r
+ // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver\r
+ // in that case, let's be naïve and try that instead\r
+ *bufferSize = preferSize;\r
+ stream_.bufferSize = *bufferSize;\r
+ result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );\r
+ }\r
+\r
if ( result != ASE_OK ) {\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";\r
errorText_ = errorStream_.str();\r
goto error;\r
}\r
- buffersAllocated = true;\r
+ buffersAllocated = true; \r
+ stream_.state = STREAM_STOPPED;\r
\r
// Set flags for buffer conversion.\r
stream_.doConvertBuffer[mode] = false;\r
\r
bool makeBuffer = true;\r
bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );\r
- if ( mode == INPUT ) {\r
- if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {\r
- unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );\r
- if ( bufferBytes <= bytesOut ) makeBuffer = false;\r
- }\r
+ if ( isDuplexInput && stream_.deviceBuffer ) {\r
+ unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );\r
+ if ( bufferBytes <= bytesOut ) makeBuffer = false;\r
}\r
\r
if ( makeBuffer ) {\r
}\r
}\r
\r
- stream_.sampleRate = sampleRate;\r
- stream_.device[mode] = device;\r
- stream_.state = STREAM_STOPPED;\r
- asioCallbackInfo = &stream_.callbackInfo;\r
- stream_.callbackInfo.object = (void *) this;\r
- if ( stream_.mode == OUTPUT && mode == INPUT )\r
- // We had already set up an output stream.\r
- stream_.mode = DUPLEX;\r
- else\r
- stream_.mode = mode;\r
-\r
// Determine device latencies\r
+ long inputLatency, outputLatency;\r
result = ASIOGetLatencies( &inputLatency, &outputLatency );\r
if ( result != ASE_OK ) {\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";\r
return SUCCESS;\r
\r
error:\r
- if ( buffersAllocated )\r
- ASIODisposeBuffers();\r
- drivers.removeCurrentDriver();\r
+ if ( !isDuplexInput ) {\r
+ // the cleanup for error in the duplex input, is done by RtApi::openStream\r
+ // So we clean up for single channel only\r
\r
- if ( handle ) {\r
- CloseHandle( handle->condition );\r
- if ( handle->bufferInfos )\r
- free( handle->bufferInfos );\r
- delete handle;\r
- stream_.apiHandle = 0;\r
- }\r
+ if ( buffersAllocated )\r
+ ASIODisposeBuffers();\r
\r
- for ( int i=0; i<2; i++ ) {\r
- if ( stream_.userBuffer[i] ) {\r
- free( stream_.userBuffer[i] );\r
- stream_.userBuffer[i] = 0;\r
+ drivers.removeCurrentDriver();\r
+\r
+ if ( handle ) {\r
+ CloseHandle( handle->condition );\r
+ if ( handle->bufferInfos )\r
+ free( handle->bufferInfos );\r
+\r
+ delete handle;\r
+ stream_.apiHandle = 0;\r
}\r
- }\r
\r
- if ( stream_.deviceBuffer ) {\r
- free( stream_.deviceBuffer );\r
- stream_.deviceBuffer = 0;\r
+\r
+ if ( stream_.userBuffer[mode] ) {\r
+ free( stream_.userBuffer[mode] );\r
+ stream_.userBuffer[mode] = 0;\r
+ }\r
+\r
+ if ( stream_.deviceBuffer ) {\r
+ free( stream_.deviceBuffer );\r
+ stream_.deviceBuffer = 0;\r
+ }\r
}\r
\r
return FAILURE;\r
-}\r
+}////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r
\r
void RtApiAsio :: closeStream()\r
{\r
\r
if ( handle->drainCounter > 1 ) { // write zeros to the output stream\r
\r
- for ( i=0, j=0; i<nChannels; i++ ) {\r
- if ( handle->bufferInfos[i].isInput != ASIOTrue )\r
- memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );\r
+ for ( i=0, j=0; i<nChannels; i++ ) {\r
+ if ( handle->bufferInfos[i].isInput != ASIOTrue )\r
+ memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );\r
+ }\r
+\r
+ }\r
+ else if ( stream_.doConvertBuffer[0] ) {\r
+\r
+ convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );\r
+ if ( stream_.doByteSwap[0] )\r
+ byteSwapBuffer( stream_.deviceBuffer,\r
+ stream_.bufferSize * stream_.nDeviceChannels[0],\r
+ stream_.deviceFormat[0] );\r
+\r
+ for ( i=0, j=0; i<nChannels; i++ ) {\r
+ if ( handle->bufferInfos[i].isInput != ASIOTrue )\r
+ memcpy( handle->bufferInfos[i].buffers[bufferIndex],\r
+ &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );\r
+ }\r
+\r
+ }\r
+ else {\r
+\r
+ if ( stream_.doByteSwap[0] )\r
+ byteSwapBuffer( stream_.userBuffer[0],\r
+ stream_.bufferSize * stream_.nUserChannels[0],\r
+ stream_.userFormat );\r
+\r
+ for ( i=0, j=0; i<nChannels; i++ ) {\r
+ if ( handle->bufferInfos[i].isInput != ASIOTrue )\r
+ memcpy( handle->bufferInfos[i].buffers[bufferIndex],\r
+ &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );\r
+ }\r
+\r
+ }\r
+ }\r
+\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
+ }\r
+\r
+ if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {\r
+\r
+ bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);\r
+\r
+ if (stream_.doConvertBuffer[1]) {\r
+\r
+ // Always interleave ASIO input data.\r
+ for ( i=0, j=0; i<nChannels; i++ ) {\r
+ if ( handle->bufferInfos[i].isInput == ASIOTrue )\r
+ memcpy( &stream_.deviceBuffer[j++*bufferBytes],\r
+ handle->bufferInfos[i].buffers[bufferIndex],\r
+ bufferBytes );\r
+ }\r
+\r
+ if ( stream_.doByteSwap[1] )\r
+ byteSwapBuffer( stream_.deviceBuffer,\r
+ stream_.bufferSize * stream_.nDeviceChannels[1],\r
+ stream_.deviceFormat[1] );\r
+ convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );\r
+\r
+ }\r
+ else {\r
+ for ( i=0, j=0; i<nChannels; i++ ) {\r
+ if ( handle->bufferInfos[i].isInput == ASIOTrue ) {\r
+ memcpy( &stream_.userBuffer[1][bufferBytes*j++],\r
+ handle->bufferInfos[i].buffers[bufferIndex],\r
+ bufferBytes );\r
+ }\r
+ }\r
+\r
+ if ( stream_.doByteSwap[1] )\r
+ byteSwapBuffer( stream_.userBuffer[1],\r
+ stream_.bufferSize * stream_.nUserChannels[1],\r
+ stream_.userFormat );\r
+ }\r
+ }\r
+\r
+ unlock:\r
+ // The following call was suggested by Malte Clasen. While the API\r
+ // documentation indicates it should not be required, some device\r
+ // drivers apparently do not function correctly without it.\r
+ ASIOOutputReady();\r
+\r
+ RtApi::tickStreamTime();\r
+ return SUCCESS;\r
+}\r
+\r
+static void sampleRateChanged( ASIOSampleRate sRate )\r
+{\r
+ // The ASIO documentation says that this usually only happens during\r
+ // external sync. Audio processing is not stopped by the driver,\r
+ // actual sample rate might not have even changed, maybe only the\r
+ // sample rate status of an AES/EBU or S/PDIF digital input at the\r
+ // audio device.\r
+\r
+ RtApi *object = (RtApi *) asioCallbackInfo->object;\r
+ try {\r
+ object->stopStream();\r
+ }\r
+ catch ( RtAudioError &exception ) {\r
+ std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;\r
+ return;\r
+ }\r
+\r
+ std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;\r
+}\r
+\r
+static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )\r
+{\r
+ long ret = 0;\r
+\r
+ switch( selector ) {\r
+ case kAsioSelectorSupported:\r
+ if ( value == kAsioResetRequest\r
+ || value == kAsioEngineVersion\r
+ || value == kAsioResyncRequest\r
+ || value == kAsioLatenciesChanged\r
+ // The following three were added for ASIO 2.0, you don't\r
+ // necessarily have to support them.\r
+ || value == kAsioSupportsTimeInfo\r
+ || value == kAsioSupportsTimeCode\r
+ || value == kAsioSupportsInputMonitor)\r
+ ret = 1L;\r
+ break;\r
+ case kAsioResetRequest:\r
+ // Defer the task and perform the reset of the driver during the\r
+ // next "safe" situation. You cannot reset the driver right now,\r
+ // as this code is called from the driver. Reset the driver is\r
+ // done by completely destruct is. I.e. ASIOStop(),\r
+ // ASIODisposeBuffers(), Destruction Afterwards you initialize the\r
+ // driver again.\r
+ std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;\r
+ ret = 1L;\r
+ break;\r
+ case kAsioResyncRequest:\r
+ // This informs the application that the driver encountered some\r
+ // non-fatal data loss. It is used for synchronization purposes\r
+ // of different media. Added mainly to work around the Win16Mutex\r
+ // problems in Windows 95/98 with the Windows Multimedia system,\r
+ // which could lose data because the Mutex was held too long by\r
+ // another thread. However a driver can issue it in other\r
+ // situations, too.\r
+ // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;\r
+ asioXRun = true;\r
+ ret = 1L;\r
+ break;\r
+ case kAsioLatenciesChanged:\r
+ // This will inform the host application that the drivers were\r
+ // latencies changed. Beware, it this does not mean that the\r
+ // buffer sizes have changed! You might need to update internal\r
+ // delay data.\r
+ std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;\r
+ ret = 1L;\r
+ break;\r
+ case kAsioEngineVersion:\r
+ // Return the supported ASIO version of the host application. If\r
+ // a host application does not implement this selector, ASIO 1.0\r
+ // is assumed by the driver.\r
+ ret = 2L;\r
+ break;\r
+ case kAsioSupportsTimeInfo:\r
+ // Informs the driver whether the\r
+ // asioCallbacks.bufferSwitchTimeInfo() callback is supported.\r
+ // For compatibility with ASIO 1.0 drivers the host application\r
+ // should always support the "old" bufferSwitch method, too.\r
+ ret = 0;\r
+ break;\r
+ case kAsioSupportsTimeCode:\r
+ // Informs the driver whether application is interested in time\r
+ // code info. If an application does not need to know about time\r
+ // code, the driver has less work to do.\r
+ ret = 0;\r
+ break;\r
+ }\r
+ return ret;\r
+}\r
+\r
+static const char* getAsioErrorString( ASIOError result )\r
+{\r
+ struct Messages \r
+ {\r
+ ASIOError value;\r
+ const char*message;\r
+ };\r
+\r
+ static const Messages m[] = \r
+ {\r
+ { ASE_NotPresent, "Hardware input or output is not present or available." },\r
+ { ASE_HWMalfunction, "Hardware is malfunctioning." },\r
+ { ASE_InvalidParameter, "Invalid input parameter." },\r
+ { ASE_InvalidMode, "Invalid mode." },\r
+ { ASE_SPNotAdvancing, "Sample position not advancing." },\r
+ { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },\r
+ { ASE_NoMemory, "Not enough memory to complete the request." }\r
+ };\r
+\r
+ for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )\r
+ if ( m[i].value == result ) return m[i].message;\r
+\r
+ return "Unknown error.";\r
+}\r
+\r
+//******************** End of __WINDOWS_ASIO__ *********************//\r
+#endif\r
+\r
+\r
+#if defined(__WINDOWS_WASAPI__) // Windows WASAPI API\r
+\r
+// Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014\r
+// - Introduces support for the Windows WASAPI API\r
+// - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required\r
+// - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface\r
+// - Includes automatic internal conversion of sample rate and buffer size between hardware and the user\r
+\r
+#ifndef INITGUID\r
+ #define INITGUID\r
+#endif\r
+#include <audioclient.h>\r
+#include <avrt.h>\r
+#include <mmdeviceapi.h>\r
+#include <functiondiscoverykeys_devpkey.h>\r
+\r
+//=============================================================================\r
+\r
+#define SAFE_RELEASE( objectPtr )\\r
+if ( objectPtr )\\r
+{\\r
+ objectPtr->Release();\\r
+ objectPtr = NULL;\\r
+}\r
+\r
+typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+// WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.\r
+// Therefore we must perform all necessary conversions to user buffers in order to satisfy these\r
+// requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to\r
+// provide intermediate storage for read / write synchronization.\r
+class WasapiBuffer\r
+{\r
+public:\r
+ WasapiBuffer()\r
+ : buffer_( NULL ),\r
+ bufferSize_( 0 ),\r
+ inIndex_( 0 ),\r
+ outIndex_( 0 ) {}\r
+\r
+ ~WasapiBuffer() {\r
+ free( buffer_ );\r
+ }\r
+\r
+ // sets the length of the internal ring buffer\r
+ void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {\r
+ free( buffer_ );\r
+\r
+ buffer_ = ( char* ) calloc( bufferSize, formatBytes );\r
+\r
+ bufferSize_ = bufferSize;\r
+ inIndex_ = 0;\r
+ outIndex_ = 0;\r
+ }\r
+\r
+ // attempt to push a buffer into the ring buffer at the current "in" index\r
+ bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )\r
+ {\r
+ if ( !buffer || // incoming buffer is NULL\r
+ bufferSize == 0 || // incoming buffer has no data\r
+ bufferSize > bufferSize_ ) // incoming buffer too large\r
+ {\r
+ return false;\r
+ }\r
+\r
+ unsigned int relOutIndex = outIndex_;\r
+ unsigned int inIndexEnd = inIndex_ + bufferSize;\r
+ if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {\r
+ relOutIndex += bufferSize_;\r
+ }\r
+\r
+ // "in" index can end on the "out" index but cannot begin at it\r
+ if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {\r
+ return false; // not enough space between "in" index and "out" index\r
+ }\r
+\r
+ // copy buffer from external to internal\r
+ int fromZeroSize = inIndex_ + bufferSize - bufferSize_;\r
+ fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;\r
+ int fromInSize = bufferSize - fromZeroSize;\r
+\r
+ switch( format )\r
+ {\r
+ case RTAUDIO_SINT8:\r
+ memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );\r
+ memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );\r
+ break;\r
+ case RTAUDIO_SINT16:\r
+ memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );\r
+ memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );\r
+ break;\r
+ case RTAUDIO_SINT24:\r
+ memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );\r
+ memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );\r
+ break;\r
+ case RTAUDIO_SINT32:\r
+ memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );\r
+ memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );\r
+ break;\r
+ case RTAUDIO_FLOAT32:\r
+ memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );\r
+ memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );\r
+ break;\r
+ case RTAUDIO_FLOAT64:\r
+ memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );\r
+ memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );\r
+ break;\r
+ }\r
+\r
+ // update "in" index\r
+ inIndex_ += bufferSize;\r
+ inIndex_ %= bufferSize_;\r
+\r
+ return true;\r
+ }\r
+\r
+ // attempt to pull a buffer from the ring buffer from the current "out" index\r
+ bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )\r
+ {\r
+ if ( !buffer || // incoming buffer is NULL\r
+ bufferSize == 0 || // incoming buffer has no data\r
+ bufferSize > bufferSize_ ) // incoming buffer too large\r
+ {\r
+ return false;\r
+ }\r
+\r
+ unsigned int relInIndex = inIndex_;\r
+ unsigned int outIndexEnd = outIndex_ + bufferSize;\r
+ if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {\r
+ relInIndex += bufferSize_;\r
+ }\r
+\r
+ // "out" index can begin at and end on the "in" index\r
+ if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {\r
+ return false; // not enough space between "out" index and "in" index\r
+ }\r
+\r
+ // copy buffer from internal to external\r
+ int fromZeroSize = outIndex_ + bufferSize - bufferSize_;\r
+ fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;\r
+ int fromOutSize = bufferSize - fromZeroSize;\r
+\r
+ switch( format )\r
+ {\r
+ case RTAUDIO_SINT8:\r
+ memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );\r
+ memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );\r
+ break;\r
+ case RTAUDIO_SINT16:\r
+ memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );\r
+ memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );\r
+ break;\r
+ case RTAUDIO_SINT24:\r
+ memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );\r
+ memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );\r
+ break;\r
+ case RTAUDIO_SINT32:\r
+ memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );\r
+ memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );\r
+ break;\r
+ case RTAUDIO_FLOAT32:\r
+ memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );\r
+ memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );\r
+ break;\r
+ case RTAUDIO_FLOAT64:\r
+ memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );\r
+ memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );\r
+ break;\r
+ }\r
+\r
+ // update "out" index\r
+ outIndex_ += bufferSize;\r
+ outIndex_ %= bufferSize_;\r
+\r
+ return true;\r
+ }\r
+\r
+private:\r
+ char* buffer_;\r
+ unsigned int bufferSize_;\r
+ unsigned int inIndex_;\r
+ unsigned int outIndex_;\r
+};\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate\r
+// between HW and the user. The convertBufferWasapi function is used to perform this conversion\r
+// between HwIn->UserIn and UserOut->HwOut during the stream callback loop.\r
+// This sample rate converter favors speed over quality, and works best with conversions between\r
+// one rate and its multiple.\r
+void convertBufferWasapi( char* outBuffer,\r
+ const char* inBuffer,\r
+ const unsigned int& channelCount,\r
+ const unsigned int& inSampleRate,\r
+ const unsigned int& outSampleRate,\r
+ const unsigned int& inSampleCount,\r
+ unsigned int& outSampleCount,\r
+ const RtAudioFormat& format )\r
+{\r
+ // calculate the new outSampleCount and relative sampleStep\r
+ float sampleRatio = ( float ) outSampleRate / inSampleRate;\r
+ float sampleStep = 1.0f / sampleRatio;\r
+ float inSampleFraction = 0.0f;\r
+\r
+ outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );\r
+\r
+ // frame-by-frame, copy each relative input sample into it's corresponding output sample\r
+ for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )\r
+ {\r
+ unsigned int inSample = ( unsigned int ) inSampleFraction;\r
+\r
+ switch ( format )\r
+ {\r
+ case RTAUDIO_SINT8:\r
+ memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );\r
+ break;\r
+ case RTAUDIO_SINT16:\r
+ memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );\r
+ break;\r
+ case RTAUDIO_SINT24:\r
+ memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );\r
+ break;\r
+ case RTAUDIO_SINT32:\r
+ memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );\r
+ break;\r
+ case RTAUDIO_FLOAT32:\r
+ memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );\r
+ break;\r
+ case RTAUDIO_FLOAT64:\r
+ memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );\r
+ break;\r
+ }\r
+\r
+ // jump to next in sample\r
+ inSampleFraction += sampleStep;\r
+ }\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+// A structure to hold various information related to the WASAPI implementation.\r
+struct WasapiHandle\r
+{\r
+ IAudioClient* captureAudioClient;\r
+ IAudioClient* renderAudioClient;\r
+ IAudioCaptureClient* captureClient;\r
+ IAudioRenderClient* renderClient;\r
+ HANDLE captureEvent;\r
+ HANDLE renderEvent;\r
+\r
+ WasapiHandle()\r
+ : captureAudioClient( NULL ),\r
+ renderAudioClient( NULL ),\r
+ captureClient( NULL ),\r
+ renderClient( NULL ),\r
+ captureEvent( NULL ),\r
+ renderEvent( NULL ) {}\r
+};\r
+\r
+//=============================================================================\r
+\r
+RtApiWasapi::RtApiWasapi()\r
+ : coInitialized_( false ), deviceEnumerator_( NULL )\r
+{\r
+ // WASAPI can run either apartment or multi-threaded\r
+ HRESULT hr = CoInitialize( NULL );\r
+ if ( !FAILED( hr ) )\r
+ coInitialized_ = true;\r
+\r
+ // Instantiate device enumerator\r
+ hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,\r
+ CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),\r
+ ( void** ) &deviceEnumerator_ );\r
+\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ }\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+RtApiWasapi::~RtApiWasapi()\r
+{\r
+ if ( stream_.state != STREAM_CLOSED )\r
+ closeStream();\r
+\r
+ SAFE_RELEASE( deviceEnumerator_ );\r
+\r
+ // If this object previously called CoInitialize()\r
+ if ( coInitialized_ )\r
+ CoUninitialize();\r
+}\r
+\r
+//=============================================================================\r
+\r
+unsigned int RtApiWasapi::getDeviceCount( void )\r
+{\r
+ unsigned int captureDeviceCount = 0;\r
+ unsigned int renderDeviceCount = 0;\r
+\r
+ IMMDeviceCollection* captureDevices = NULL;\r
+ IMMDeviceCollection* renderDevices = NULL;\r
+\r
+ // Count capture devices\r
+ errorText_.clear();\r
+ HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = captureDevices->GetCount( &captureDeviceCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";\r
+ goto Exit;\r
+ }\r
+\r
+ // Count render devices\r
+ hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = renderDevices->GetCount( &renderDeviceCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";\r
+ goto Exit;\r
+ }\r
+\r
+Exit:\r
+ // release all references\r
+ SAFE_RELEASE( captureDevices );\r
+ SAFE_RELEASE( renderDevices );\r
+\r
+ if ( errorText_.empty() )\r
+ return captureDeviceCount + renderDeviceCount;\r
+\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ return 0;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )\r
+{\r
+ RtAudio::DeviceInfo info;\r
+ unsigned int captureDeviceCount = 0;\r
+ unsigned int renderDeviceCount = 0;\r
+ std::string defaultDeviceName;\r
+ bool isCaptureDevice = false;\r
+\r
+ PROPVARIANT deviceNameProp;\r
+ PROPVARIANT defaultDeviceNameProp;\r
+\r
+ IMMDeviceCollection* captureDevices = NULL;\r
+ IMMDeviceCollection* renderDevices = NULL;\r
+ IMMDevice* devicePtr = NULL;\r
+ IMMDevice* defaultDevicePtr = NULL;\r
+ IAudioClient* audioClient = NULL;\r
+ IPropertyStore* devicePropStore = NULL;\r
+ IPropertyStore* defaultDevicePropStore = NULL;\r
+\r
+ WAVEFORMATEX* deviceFormat = NULL;\r
+ WAVEFORMATEX* closestMatchFormat = NULL;\r
+\r
+ // probed\r
+ info.probed = false;\r
+\r
+ // Count capture devices\r
+ errorText_.clear();\r
+ RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;\r
+ HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = captureDevices->GetCount( &captureDeviceCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";\r
+ goto Exit;\r
+ }\r
+\r
+ // Count render devices\r
+ hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = renderDevices->GetCount( &renderDeviceCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";\r
+ goto Exit;\r
+ }\r
+\r
+ // validate device index\r
+ if ( device >= captureDeviceCount + renderDeviceCount ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";\r
+ errorType = RtAudioError::INVALID_USE;\r
+ goto Exit;\r
+ }\r
+\r
+ // determine whether index falls within capture or render devices\r
+ if ( device >= renderDeviceCount ) {\r
+ hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";\r
+ goto Exit;\r
+ }\r
+ isCaptureDevice = true;\r
+ }\r
+ else {\r
+ hr = renderDevices->Item( device, &devicePtr );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";\r
+ goto Exit;\r
+ }\r
+ isCaptureDevice = false;\r
+ }\r
+\r
+ // get default device name\r
+ if ( isCaptureDevice ) {\r
+ hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";\r
+ goto Exit;\r
+ }\r
+ }\r
+ else {\r
+ hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";\r
+ goto Exit;\r
+ }\r
+ }\r
+\r
+ hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";\r
+ goto Exit;\r
+ }\r
+ PropVariantInit( &defaultDeviceNameProp );\r
+\r
+ hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";\r
+ goto Exit;\r
+ }\r
+\r
+ defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);\r
+\r
+ // name\r
+ hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";\r
+ goto Exit;\r
+ }\r
+\r
+ PropVariantInit( &deviceNameProp );\r
+\r
+ hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";\r
+ goto Exit;\r
+ }\r
+\r
+ info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);\r
+\r
+ // is default\r
+ if ( isCaptureDevice ) {\r
+ info.isDefaultInput = info.name == defaultDeviceName;\r
+ info.isDefaultOutput = false;\r
+ }\r
+ else {\r
+ info.isDefaultInput = false;\r
+ info.isDefaultOutput = info.name == defaultDeviceName;\r
+ }\r
+\r
+ // channel count\r
+ hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = audioClient->GetMixFormat( &deviceFormat );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";\r
+ goto Exit;\r
+ }\r
+\r
+ if ( isCaptureDevice ) {\r
+ info.inputChannels = deviceFormat->nChannels;\r
+ info.outputChannels = 0;\r
+ info.duplexChannels = 0;\r
+ }\r
+ else {\r
+ info.inputChannels = 0;\r
+ info.outputChannels = deviceFormat->nChannels;\r
+ info.duplexChannels = 0;\r
+ }\r
+\r
+ // sample rates\r
+ info.sampleRates.clear();\r
+\r
+ // allow support for all sample rates as we have a built-in sample rate converter\r
+ for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {\r
+ info.sampleRates.push_back( SAMPLE_RATES[i] );\r
+ }\r
+ info.preferredSampleRate = deviceFormat->nSamplesPerSec;\r
+\r
+ // native format\r
+ info.nativeFormats = 0;\r
+\r
+ if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||\r
+ ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&\r
+ ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )\r
+ {\r
+ if ( deviceFormat->wBitsPerSample == 32 ) {\r
+ info.nativeFormats |= RTAUDIO_FLOAT32;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 64 ) {\r
+ info.nativeFormats |= RTAUDIO_FLOAT64;\r
+ }\r
+ }\r
+ else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||\r
+ ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&\r
+ ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )\r
+ {\r
+ if ( deviceFormat->wBitsPerSample == 8 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT8;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 16 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT16;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 24 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT24;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 32 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT32;\r
+ }\r
+ }\r
+\r
+ // probed\r
+ info.probed = true;\r
+\r
+Exit:\r
+ // release all references\r
+ PropVariantClear( &deviceNameProp );\r
+ PropVariantClear( &defaultDeviceNameProp );\r
+\r
+ SAFE_RELEASE( captureDevices );\r
+ SAFE_RELEASE( renderDevices );\r
+ SAFE_RELEASE( devicePtr );\r
+ SAFE_RELEASE( defaultDevicePtr );\r
+ SAFE_RELEASE( audioClient );\r
+ SAFE_RELEASE( devicePropStore );\r
+ SAFE_RELEASE( defaultDevicePropStore );\r
+\r
+ CoTaskMemFree( deviceFormat );\r
+ CoTaskMemFree( closestMatchFormat );\r
+\r
+ if ( !errorText_.empty() )\r
+ error( errorType );\r
+ return info;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+unsigned int RtApiWasapi::getDefaultOutputDevice( void )\r
+{\r
+ for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {\r
+ if ( getDeviceInfo( i ).isDefaultOutput ) {\r
+ return i;\r
+ }\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+unsigned int RtApiWasapi::getDefaultInputDevice( void )\r
+{\r
+ for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {\r
+ if ( getDeviceInfo( i ).isDefaultInput ) {\r
+ return i;\r
+ }\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::closeStream( void )\r
+{\r
+ if ( stream_.state == STREAM_CLOSED ) {\r
+ errorText_ = "RtApiWasapi::closeStream: No open stream to close.";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ if ( stream_.state != STREAM_STOPPED )\r
+ stopStream();\r
+\r
+ // clean up stream memory\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )\r
+\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )\r
+\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )\r
+ CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );\r
+\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )\r
+ CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );\r
+\r
+ delete ( WasapiHandle* ) stream_.apiHandle;\r
+ stream_.apiHandle = NULL;\r
+\r
+ for ( int i = 0; i < 2; i++ ) {\r
+ if ( stream_.userBuffer[i] ) {\r
+ free( stream_.userBuffer[i] );\r
+ stream_.userBuffer[i] = 0;\r
+ }\r
+ }\r
+\r
+ if ( stream_.deviceBuffer ) {\r
+ free( stream_.deviceBuffer );\r
+ stream_.deviceBuffer = 0;\r
+ }\r
+\r
+ // update stream state\r
+ stream_.state = STREAM_CLOSED;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::startStream( void )\r
+{\r
+ verifyStream();\r
+\r
+ if ( stream_.state == STREAM_RUNNING ) {\r
+ errorText_ = "RtApiWasapi::startStream: The stream is already running.";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ // update stream state\r
+ stream_.state = STREAM_RUNNING;\r
+\r
+ // create WASAPI stream thread\r
+ stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );\r
+\r
+ if ( !stream_.callbackInfo.thread ) {\r
+ errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";\r
+ error( RtAudioError::THREAD_ERROR );\r
+ }\r
+ else {\r
+ SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );\r
+ ResumeThread( ( void* ) stream_.callbackInfo.thread );\r
+ }\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::stopStream( void )\r
+{\r
+ verifyStream();\r
+\r
+ if ( stream_.state == STREAM_STOPPED ) {\r
+ errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ // inform stream thread by setting stream state to STREAM_STOPPING\r
+ stream_.state = STREAM_STOPPING;\r
+\r
+ // wait until stream thread is stopped\r
+ while( stream_.state != STREAM_STOPPED ) {\r
+ Sleep( 1 );\r
+ }\r
+\r
+ // Wait for the last buffer to play before stopping.\r
+ Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );\r
+\r
+ // stop capture client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ return;\r
+ }\r
+ }\r
+\r
+ // stop render client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ return;\r
+ }\r
+ }\r
+\r
+ // close thread handle\r
+ if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";\r
+ error( RtAudioError::THREAD_ERROR );\r
+ return;\r
+ }\r
+\r
+ stream_.callbackInfo.thread = (ThreadHandle) NULL;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::abortStream( void )\r
+{\r
+ verifyStream();\r
+\r
+ if ( stream_.state == STREAM_STOPPED ) {\r
+ errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ // inform stream thread by setting stream state to STREAM_STOPPING\r
+ stream_.state = STREAM_STOPPING;\r
+\r
+ // wait until stream thread is stopped\r
+ while ( stream_.state != STREAM_STOPPED ) {\r
+ Sleep( 1 );\r
+ }\r
+\r
+ // stop capture client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ return;\r
+ }\r
+ }\r
+\r
+ // stop render client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ return;\r
+ }\r
+ }\r
+\r
+ // close thread handle\r
+ if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {\r
+ errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";\r
+ error( RtAudioError::THREAD_ERROR );\r
+ return;\r
+ }\r
+\r
+ stream_.callbackInfo.thread = (ThreadHandle) NULL;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,\r
+ unsigned int firstChannel, unsigned int sampleRate,\r
+ RtAudioFormat format, unsigned int* bufferSize,\r
+ RtAudio::StreamOptions* options )\r
+{\r
+ bool methodResult = FAILURE;\r
+ unsigned int captureDeviceCount = 0;\r
+ unsigned int renderDeviceCount = 0;\r
+\r
+ IMMDeviceCollection* captureDevices = NULL;\r
+ IMMDeviceCollection* renderDevices = NULL;\r
+ IMMDevice* devicePtr = NULL;\r
+ WAVEFORMATEX* deviceFormat = NULL;\r
+ unsigned int bufferBytes;\r
+ stream_.state = STREAM_STOPPED;\r
+\r
+ // create API Handle if not already created\r
+ if ( !stream_.apiHandle )\r
+ stream_.apiHandle = ( void* ) new WasapiHandle();\r
+\r
+ // Count capture devices\r
+ errorText_.clear();\r
+ RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;\r
+ HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = captureDevices->GetCount( &captureDeviceCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";\r
+ goto Exit;\r
+ }\r
+\r
+ // Count render devices\r
+ hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = renderDevices->GetCount( &renderDeviceCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";\r
+ goto Exit;\r
+ }\r
+\r
+ // validate device index\r
+ if ( device >= captureDeviceCount + renderDeviceCount ) {\r
+ errorType = RtAudioError::INVALID_USE;\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";\r
+ goto Exit;\r
+ }\r
+\r
+ // determine whether index falls within capture or render devices\r
+ if ( device >= renderDeviceCount ) {\r
+ if ( mode != INPUT ) {\r
+ errorType = RtAudioError::INVALID_USE;\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";\r
+ goto Exit;\r
+ }\r
+\r
+ // retrieve captureAudioClient from devicePtr\r
+ IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;\r
+\r
+ hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,\r
+ NULL, ( void** ) &captureAudioClient );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = captureAudioClient->GetMixFormat( &deviceFormat );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";\r
+ goto Exit;\r
+ }\r
+\r
+ stream_.nDeviceChannels[mode] = deviceFormat->nChannels;\r
+ captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );\r
+ }\r
+ else {\r
+ if ( mode != OUTPUT ) {\r
+ errorType = RtAudioError::INVALID_USE;\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";\r
+ goto Exit;\r
+ }\r
+\r
+ // retrieve renderAudioClient from devicePtr\r
+ IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;\r
+\r
+ hr = renderDevices->Item( device, &devicePtr );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,\r
+ NULL, ( void** ) &renderAudioClient );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = renderAudioClient->GetMixFormat( &deviceFormat );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";\r
+ goto Exit;\r
+ }\r
+\r
+ stream_.nDeviceChannels[mode] = deviceFormat->nChannels;\r
+ renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );\r
+ }\r
+\r
+ // fill stream data\r
+ if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||\r
+ ( stream_.mode == INPUT && mode == OUTPUT ) ) {\r
+ stream_.mode = DUPLEX;\r
+ }\r
+ else {\r
+ stream_.mode = mode;\r
+ }\r
+\r
+ stream_.device[mode] = device;\r
+ stream_.doByteSwap[mode] = false;\r
+ stream_.sampleRate = sampleRate;\r
+ stream_.bufferSize = *bufferSize;\r
+ stream_.nBuffers = 1;\r
+ stream_.nUserChannels[mode] = channels;\r
+ stream_.channelOffset[mode] = firstChannel;\r
+ stream_.userFormat = format;\r
+ stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;\r
+\r
+ if ( options && options->flags & RTAUDIO_NONINTERLEAVED )\r
+ stream_.userInterleaved = false;\r
+ else\r
+ stream_.userInterleaved = true;\r
+ stream_.deviceInterleaved[mode] = true;\r
+\r
+ // Set flags for buffer conversion.\r
+ stream_.doConvertBuffer[mode] = false;\r
+ if ( stream_.userFormat != stream_.deviceFormat[mode] ||\r
+ stream_.nUserChannels != stream_.nDeviceChannels )\r
+ stream_.doConvertBuffer[mode] = true;\r
+ else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&\r
+ stream_.nUserChannels[mode] > 1 )\r
+ stream_.doConvertBuffer[mode] = true;\r
+\r
+ if ( stream_.doConvertBuffer[mode] )\r
+ setConvertInfo( mode, 0 );\r
+\r
+ // Allocate necessary internal buffers\r
+ bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );\r
+\r
+ stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );\r
+ if ( !stream_.userBuffer[mode] ) {\r
+ errorType = RtAudioError::MEMORY_ERROR;\r
+ errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";\r
+ goto Exit;\r
+ }\r
+\r
+ if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )\r
+ stream_.callbackInfo.priority = 15;\r
+ else\r
+ stream_.callbackInfo.priority = 0;\r
+\r
+ ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback\r
+ ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode\r
+\r
+ methodResult = SUCCESS;\r
+\r
+Exit:\r
+ //clean up\r
+ SAFE_RELEASE( captureDevices );\r
+ SAFE_RELEASE( renderDevices );\r
+ SAFE_RELEASE( devicePtr );\r
+ CoTaskMemFree( deviceFormat );\r
+\r
+ // if method failed, close the stream\r
+ if ( methodResult == FAILURE )\r
+ closeStream();\r
+\r
+ if ( !errorText_.empty() )\r
+ error( errorType );\r
+ return methodResult;\r
+}\r
+\r
+//=============================================================================\r
+\r
+DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )\r
+{\r
+ if ( wasapiPtr )\r
+ ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();\r
+\r
+ return 0;\r
+}\r
+\r
+DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )\r
+{\r
+ if ( wasapiPtr )\r
+ ( ( RtApiWasapi* ) wasapiPtr )->stopStream();\r
+\r
+ return 0;\r
+}\r
+\r
+DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )\r
+{\r
+ if ( wasapiPtr )\r
+ ( ( RtApiWasapi* ) wasapiPtr )->abortStream();\r
+\r
+ return 0;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::wasapiThread()\r
+{\r
+ // as this is a new thread, we must CoInitialize it\r
+ CoInitialize( NULL );\r
+\r
+ HRESULT hr;\r
+\r
+ IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;\r
+ IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;\r
+ IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;\r
+ IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;\r
+ HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;\r
+ HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;\r
+\r
+ WAVEFORMATEX* captureFormat = NULL;\r
+ WAVEFORMATEX* renderFormat = NULL;\r
+ float captureSrRatio = 0.0f;\r
+ float renderSrRatio = 0.0f;\r
+ WasapiBuffer captureBuffer;\r
+ WasapiBuffer renderBuffer;\r
+\r
+ // declare local stream variables\r
+ RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;\r
+ BYTE* streamBuffer = NULL;\r
+ unsigned long captureFlags = 0;\r
+ unsigned int bufferFrameCount = 0;\r
+ unsigned int numFramesPadding = 0;\r
+ unsigned int convBufferSize = 0;\r
+ bool callbackPushed = false;\r
+ bool callbackPulled = false;\r
+ bool callbackStopped = false;\r
+ int callbackResult = 0;\r
+\r
+ // convBuffer is used to store converted buffers between WASAPI and the user\r
+ char* convBuffer = NULL;\r
+ unsigned int convBuffSize = 0;\r
+ unsigned int deviceBuffSize = 0;\r
+\r
+ errorText_.clear();\r
+ RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;\r
+\r
+ // Attempt to assign "Pro Audio" characteristic to thread\r
+ HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );\r
+ if ( AvrtDll ) {\r
+ DWORD taskIndex = 0;\r
+ TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );\r
+ AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );\r
+ FreeLibrary( AvrtDll );\r
+ }\r
+\r
+ // start capture stream if applicable\r
+ if ( captureAudioClient ) {\r
+ hr = captureAudioClient->GetMixFormat( &captureFormat );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";\r
+ goto Exit;\r
+ }\r
+\r
+ captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );\r
+\r
+ // initialize capture stream according to desire buffer size\r
+ float desiredBufferSize = stream_.bufferSize * captureSrRatio;\r
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );\r
+\r
+ if ( !captureClient ) {\r
+ hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,\r
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,\r
+ desiredBufferPeriod,\r
+ desiredBufferPeriod,\r
+ captureFormat,\r
+ NULL );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),\r
+ ( void** ) &captureClient );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ // configure captureEvent to trigger on every available capture buffer\r
+ captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );\r
+ if ( !captureEvent ) {\r
+ errorType = RtAudioError::SYSTEM_ERROR;\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = captureAudioClient->SetEventHandle( captureEvent );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;\r
+ }\r
+\r
+ unsigned int inBufferSize = 0;\r
+ hr = captureAudioClient->GetBufferSize( &inBufferSize );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";\r
+ goto Exit;\r
+ }\r
+\r
+ // scale outBufferSize according to stream->user sample rate ratio\r
+ unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];\r
+ inBufferSize *= stream_.nDeviceChannels[INPUT];\r
+\r
+ // set captureBuffer size\r
+ captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );\r
+\r
+ // reset the capture stream\r
+ hr = captureAudioClient->Reset();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";\r
+ goto Exit;\r
+ }\r
+\r
+ // start the capture stream\r
+ hr = captureAudioClient->Start();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";\r
+ goto Exit;\r
+ }\r
+ }\r
+\r
+ // start render stream if applicable\r
+ if ( renderAudioClient ) {\r
+ hr = renderAudioClient->GetMixFormat( &renderFormat );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";\r
+ goto Exit;\r
+ }\r
+\r
+ renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );\r
+\r
+ // initialize render stream according to desire buffer size\r
+ float desiredBufferSize = stream_.bufferSize * renderSrRatio;\r
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );\r
+\r
+ if ( !renderClient ) {\r
+ hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,\r
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,\r
+ desiredBufferPeriod,\r
+ desiredBufferPeriod,\r
+ renderFormat,\r
+ NULL );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),\r
+ ( void** ) &renderClient );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ // configure renderEvent to trigger on every available render buffer\r
+ renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );\r
+ if ( !renderEvent ) {\r
+ errorType = RtAudioError::SYSTEM_ERROR;\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";\r
+ goto Exit;\r
+ }\r
+\r
+ hr = renderAudioClient->SetEventHandle( renderEvent );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;\r
+ }\r
+\r
+ unsigned int outBufferSize = 0;\r
+ hr = renderAudioClient->GetBufferSize( &outBufferSize );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";\r
+ goto Exit;\r
+ }\r
+\r
+ // scale inBufferSize according to user->stream sample rate ratio\r
+ unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];\r
+ outBufferSize *= stream_.nDeviceChannels[OUTPUT];\r
+\r
+ // set renderBuffer size\r
+ renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );\r
+\r
+ // reset the render stream\r
+ hr = renderAudioClient->Reset();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";\r
+ goto Exit;\r
+ }\r
+\r
+ // start the render stream\r
+ hr = renderAudioClient->Start();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";\r
+ goto Exit;\r
+ }\r
+ }\r
+\r
+ if ( stream_.mode == INPUT ) {\r
+ convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );\r
+ deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );\r
+ }\r
+ else if ( stream_.mode == OUTPUT ) {\r
+ convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );\r
+ deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );\r
+ }\r
+ else if ( stream_.mode == DUPLEX ) {\r
+ convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),\r
+ ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );\r
+ deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),\r
+ stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );\r
+ }\r
+\r
+ convBuffer = ( char* ) malloc( convBuffSize );\r
+ stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );\r
+ if ( !convBuffer || !stream_.deviceBuffer ) {\r
+ errorType = RtAudioError::MEMORY_ERROR;\r
+ errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";\r
+ goto Exit;\r
+ }\r
+\r
+ // stream process loop\r
+ while ( stream_.state != STREAM_STOPPING ) {\r
+ if ( !callbackPulled ) {\r
+ // Callback Input\r
+ // ==============\r
+ // 1. Pull callback buffer from inputBuffer\r
+ // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count\r
+ // Convert callback buffer to user format\r
+\r
+ if ( captureAudioClient ) {\r
+ // Pull callback buffer from inputBuffer\r
+ callbackPulled = captureBuffer.pullBuffer( convBuffer,\r
+ ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],\r
+ stream_.deviceFormat[INPUT] );\r
+\r
+ if ( callbackPulled ) {\r
+ // Convert callback buffer to user sample rate\r
+ convertBufferWasapi( stream_.deviceBuffer,\r
+ convBuffer,\r
+ stream_.nDeviceChannels[INPUT],\r
+ captureFormat->nSamplesPerSec,\r
+ stream_.sampleRate,\r
+ ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),\r
+ convBufferSize,\r
+ stream_.deviceFormat[INPUT] );\r
+\r
+ if ( stream_.doConvertBuffer[INPUT] ) {\r
+ // Convert callback buffer to user format\r
+ convertBuffer( stream_.userBuffer[INPUT],\r
+ stream_.deviceBuffer,\r
+ stream_.convertInfo[INPUT] );\r
+ }\r
+ else {\r
+ // no further conversion, simple copy deviceBuffer to userBuffer\r
+ memcpy( stream_.userBuffer[INPUT],\r
+ stream_.deviceBuffer,\r
+ stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );\r
+ }\r
+ }\r
+ }\r
+ else {\r
+ // if there is no capture stream, set callbackPulled flag\r
+ callbackPulled = true;\r
+ }\r
+\r
+ // Execute Callback\r
+ // ================\r
+ // 1. Execute user callback method\r
+ // 2. Handle return value from callback\r
+\r
+ // if callback has not requested the stream to stop\r
+ if ( callbackPulled && !callbackStopped ) {\r
+ // Execute user callback method\r
+ callbackResult = callback( stream_.userBuffer[OUTPUT],\r
+ stream_.userBuffer[INPUT],\r
+ stream_.bufferSize,\r
+ getStreamTime(),\r
+ captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,\r
+ stream_.callbackInfo.userData );\r
+\r
+ // Handle return value from callback\r
+ if ( callbackResult == 1 ) {\r
+ // instantiate a thread to stop this thread\r
+ HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );\r
+ if ( !threadHandle ) {\r
+ errorType = RtAudioError::THREAD_ERROR;\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";\r
+ goto Exit;\r
+ }\r
+ else if ( !CloseHandle( threadHandle ) ) {\r
+ errorType = RtAudioError::THREAD_ERROR;\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ callbackStopped = true;\r
+ }\r
+ else if ( callbackResult == 2 ) {\r
+ // instantiate a thread to stop this thread\r
+ HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );\r
+ if ( !threadHandle ) {\r
+ errorType = RtAudioError::THREAD_ERROR;\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";\r
+ goto Exit;\r
+ }\r
+ else if ( !CloseHandle( threadHandle ) ) {\r
+ errorType = RtAudioError::THREAD_ERROR;\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";\r
+ goto Exit;\r
+ }\r
+\r
+ callbackStopped = true;\r
+ }\r
}\r
-\r
}\r
- else if ( stream_.doConvertBuffer[0] ) {\r
\r
- convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );\r
- if ( stream_.doByteSwap[0] )\r
- byteSwapBuffer( stream_.deviceBuffer,\r
- stream_.bufferSize * stream_.nDeviceChannels[0],\r
- stream_.deviceFormat[0] );\r
+ // Callback Output\r
+ // ===============\r
+ // 1. Convert callback buffer to stream format\r
+ // 2. Convert callback buffer to stream sample rate and channel count\r
+ // 3. Push callback buffer into outputBuffer\r
+\r
+ if ( renderAudioClient && callbackPulled ) {\r
+ if ( stream_.doConvertBuffer[OUTPUT] ) {\r
+ // Convert callback buffer to stream format\r
+ convertBuffer( stream_.deviceBuffer,\r
+ stream_.userBuffer[OUTPUT],\r
+ stream_.convertInfo[OUTPUT] );\r
\r
- for ( i=0, j=0; i<nChannels; i++ ) {\r
- if ( handle->bufferInfos[i].isInput != ASIOTrue )\r
- memcpy( handle->bufferInfos[i].buffers[bufferIndex],\r
- &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );\r
}\r
\r
+ // Convert callback buffer to stream sample rate\r
+ convertBufferWasapi( convBuffer,\r
+ stream_.deviceBuffer,\r
+ stream_.nDeviceChannels[OUTPUT],\r
+ stream_.sampleRate,\r
+ renderFormat->nSamplesPerSec,\r
+ stream_.bufferSize,\r
+ convBufferSize,\r
+ stream_.deviceFormat[OUTPUT] );\r
+\r
+ // Push callback buffer into outputBuffer\r
+ callbackPushed = renderBuffer.pushBuffer( convBuffer,\r
+ convBufferSize * stream_.nDeviceChannels[OUTPUT],\r
+ stream_.deviceFormat[OUTPUT] );\r
}\r
else {\r
+ // if there is no render stream, set callbackPushed flag\r
+ callbackPushed = true;\r
+ }\r
\r
- if ( stream_.doByteSwap[0] )\r
- byteSwapBuffer( stream_.userBuffer[0],\r
- stream_.bufferSize * stream_.nUserChannels[0],\r
- stream_.userFormat );\r
+ // Stream Capture\r
+ // ==============\r
+ // 1. Get capture buffer from stream\r
+ // 2. Push capture buffer into inputBuffer\r
+ // 3. If 2. was successful: Release capture buffer\r
\r
- for ( i=0, j=0; i<nChannels; i++ ) {\r
- if ( handle->bufferInfos[i].isInput != ASIOTrue )\r
- memcpy( handle->bufferInfos[i].buffers[bufferIndex],\r
- &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );\r
+ if ( captureAudioClient ) {\r
+ // if the callback input buffer was not pulled from captureBuffer, wait for next capture event\r
+ if ( !callbackPulled ) {\r
+ WaitForSingleObject( captureEvent, INFINITE );\r
}\r
\r
- }\r
+ // Get capture buffer from stream\r
+ hr = captureClient->GetBuffer( &streamBuffer,\r
+ &bufferFrameCount,\r
+ &captureFlags, NULL, NULL );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";\r
+ goto Exit;\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
+ if ( bufferFrameCount != 0 ) {\r
+ // Push capture buffer into inputBuffer\r
+ if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,\r
+ bufferFrameCount * stream_.nDeviceChannels[INPUT],\r
+ stream_.deviceFormat[INPUT] ) )\r
+ {\r
+ // Release capture buffer\r
+ hr = captureClient->ReleaseBuffer( bufferFrameCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";\r
+ goto Exit;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that capture was unsuccessful\r
+ hr = captureClient->ReleaseBuffer( 0 );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";\r
+ goto Exit;\r
+ }\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that capture was unsuccessful\r
+ hr = captureClient->ReleaseBuffer( 0 );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";\r
+ goto Exit;\r
+ }\r
+ }\r
}\r
- }\r
\r
- if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {\r
+ // Stream Render\r
+ // =============\r
+ // 1. Get render buffer from stream\r
+ // 2. Pull next buffer from outputBuffer\r
+ // 3. If 2. was successful: Fill render buffer with next buffer\r
+ // Release render buffer\r
\r
- bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);\r
+ if ( renderAudioClient ) {\r
+ // if the callback output buffer was not pushed to renderBuffer, wait for next render event\r
+ if ( callbackPulled && !callbackPushed ) {\r
+ WaitForSingleObject( renderEvent, INFINITE );\r
+ }\r
\r
- if (stream_.doConvertBuffer[1]) {\r
+ // Get render buffer from stream\r
+ hr = renderAudioClient->GetBufferSize( &bufferFrameCount );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";\r
+ goto Exit;\r
+ }\r
\r
- // Always interleave ASIO input data.\r
- for ( i=0, j=0; i<nChannels; i++ ) {\r
- if ( handle->bufferInfos[i].isInput == ASIOTrue )\r
- memcpy( &stream_.deviceBuffer[j++*bufferBytes],\r
- handle->bufferInfos[i].buffers[bufferIndex],\r
- bufferBytes );\r
+ hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";\r
+ goto Exit;\r
}\r
\r
- if ( stream_.doByteSwap[1] )\r
- byteSwapBuffer( stream_.deviceBuffer,\r
- stream_.bufferSize * stream_.nDeviceChannels[1],\r
- stream_.deviceFormat[1] );\r
- convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );\r
+ bufferFrameCount -= numFramesPadding;\r
\r
- }\r
- else {\r
- for ( i=0, j=0; i<nChannels; i++ ) {\r
- if ( handle->bufferInfos[i].isInput == ASIOTrue ) {\r
- memcpy( &stream_.userBuffer[1][bufferBytes*j++],\r
- handle->bufferInfos[i].buffers[bufferIndex],\r
- bufferBytes );\r
+ if ( bufferFrameCount != 0 ) {\r
+ hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";\r
+ goto Exit;\r
}\r
- }\r
\r
- if ( stream_.doByteSwap[1] )\r
- byteSwapBuffer( stream_.userBuffer[1],\r
- stream_.bufferSize * stream_.nUserChannels[1],\r
- stream_.userFormat );\r
+ // Pull next buffer from outputBuffer\r
+ // Fill render buffer with next buffer\r
+ if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,\r
+ bufferFrameCount * stream_.nDeviceChannels[OUTPUT],\r
+ stream_.deviceFormat[OUTPUT] ) )\r
+ {\r
+ // Release render buffer\r
+ hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";\r
+ goto Exit;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that render was unsuccessful\r
+ hr = renderClient->ReleaseBuffer( 0, 0 );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";\r
+ goto Exit;\r
+ }\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that render was unsuccessful\r
+ hr = renderClient->ReleaseBuffer( 0, 0 );\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";\r
+ goto Exit;\r
+ }\r
+ }\r
}\r
- }\r
-\r
- unlock:\r
- // The following call was suggested by Malte Clasen. While the API\r
- // documentation indicates it should not be required, some device\r
- // drivers apparently do not function correctly without it.\r
- ASIOOutputReady();\r
-\r
- RtApi::tickStreamTime();\r
- return SUCCESS;\r
-}\r
\r
-static void sampleRateChanged( ASIOSampleRate sRate )\r
-{\r
- // The ASIO documentation says that this usually only happens during\r
- // external sync. Audio processing is not stopped by the driver,\r
- // actual sample rate might not have even changed, maybe only the\r
- // sample rate status of an AES/EBU or S/PDIF digital input at the\r
- // audio device.\r
+ // if the callback buffer was pushed renderBuffer reset callbackPulled flag\r
+ if ( callbackPushed ) {\r
+ callbackPulled = false;\r
+ // tick stream time\r
+ RtApi::tickStreamTime();\r
+ }\r
\r
- RtApi *object = (RtApi *) asioCallbackInfo->object;\r
- try {\r
- object->stopStream();\r
- }\r
- catch ( RtAudioError &exception ) {\r
- std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;\r
- return;\r
}\r
\r
- std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;\r
-}\r
-\r
-static long asioMessages( long selector, long value, void* message, double* opt )\r
-{\r
- long ret = 0;\r
-\r
- switch( selector ) {\r
- case kAsioSelectorSupported:\r
- if ( value == kAsioResetRequest\r
- || value == kAsioEngineVersion\r
- || value == kAsioResyncRequest\r
- || value == kAsioLatenciesChanged\r
- // The following three were added for ASIO 2.0, you don't\r
- // necessarily have to support them.\r
- || value == kAsioSupportsTimeInfo\r
- || value == kAsioSupportsTimeCode\r
- || value == kAsioSupportsInputMonitor)\r
- ret = 1L;\r
- break;\r
- case kAsioResetRequest:\r
- // Defer the task and perform the reset of the driver during the\r
- // next "safe" situation. You cannot reset the driver right now,\r
- // as this code is called from the driver. Reset the driver is\r
- // done by completely destruct is. I.e. ASIOStop(),\r
- // ASIODisposeBuffers(), Destruction Afterwards you initialize the\r
- // driver again.\r
- std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;\r
- ret = 1L;\r
- break;\r
- case kAsioResyncRequest:\r
- // This informs the application that the driver encountered some\r
- // non-fatal data loss. It is used for synchronization purposes\r
- // of different media. Added mainly to work around the Win16Mutex\r
- // problems in Windows 95/98 with the Windows Multimedia system,\r
- // which could lose data because the Mutex was held too long by\r
- // another thread. However a driver can issue it in other\r
- // situations, too.\r
- // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;\r
- asioXRun = true;\r
- ret = 1L;\r
- break;\r
- case kAsioLatenciesChanged:\r
- // This will inform the host application that the drivers were\r
- // latencies changed. Beware, it this does not mean that the\r
- // buffer sizes have changed! You might need to update internal\r
- // delay data.\r
- std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;\r
- ret = 1L;\r
- break;\r
- case kAsioEngineVersion:\r
- // Return the supported ASIO version of the host application. If\r
- // a host application does not implement this selector, ASIO 1.0\r
- // is assumed by the driver.\r
- ret = 2L;\r
- break;\r
- case kAsioSupportsTimeInfo:\r
- // Informs the driver whether the\r
- // asioCallbacks.bufferSwitchTimeInfo() callback is supported.\r
- // For compatibility with ASIO 1.0 drivers the host application\r
- // should always support the "old" bufferSwitch method, too.\r
- ret = 0;\r
- break;\r
- case kAsioSupportsTimeCode:\r
- // Informs the driver whether application is interested in time\r
- // code info. If an application does not need to know about time\r
- // code, the driver has less work to do.\r
- ret = 0;\r
- break;\r
- }\r
- return ret;\r
-}\r
+Exit:\r
+ // clean up\r
+ CoTaskMemFree( captureFormat );\r
+ CoTaskMemFree( renderFormat );\r
\r
-static const char* getAsioErrorString( ASIOError result )\r
-{\r
- struct Messages \r
- {\r
- ASIOError value;\r
- const char*message;\r
- };\r
+ free ( convBuffer );\r
\r
- static const Messages m[] = \r
- {\r
- { ASE_NotPresent, "Hardware input or output is not present or available." },\r
- { ASE_HWMalfunction, "Hardware is malfunctioning." },\r
- { ASE_InvalidParameter, "Invalid input parameter." },\r
- { ASE_InvalidMode, "Invalid mode." },\r
- { ASE_SPNotAdvancing, "Sample position not advancing." },\r
- { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },\r
- { ASE_NoMemory, "Not enough memory to complete the request." }\r
- };\r
+ CoUninitialize();\r
\r
- for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )\r
- if ( m[i].value == result ) return m[i].message;\r
+ // update stream state\r
+ stream_.state = STREAM_STOPPED;\r
\r
- return "Unknown error.";\r
+ if ( errorText_.empty() )\r
+ return;\r
+ else\r
+ error( errorType );\r
}\r
-//******************** End of __WINDOWS_ASIO__ *********************//\r
+\r
+//******************** End of __WINDOWS_WASAPI__ *********************//\r
#endif\r
\r
\r
error( RtAudioError::WARNING );\r
}\r
\r
- // Clean out any devices that may have disappeared.\r
- std::vector< int > indices;\r
- for ( unsigned int i=0; i<dsDevices.size(); i++ )\r
- if ( dsDevices[i].found == false ) indices.push_back( i );\r
- unsigned int nErased = 0;\r
- for ( unsigned int i=0; i<indices.size(); i++ )\r
- dsDevices.erase( dsDevices.begin()-nErased++ );\r
+ // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).\r
+ for ( unsigned int i=0; i<dsDevices.size(); ) {\r
+ if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );\r
+ else i++;\r
+ }\r
\r
- return dsDevices.size();\r
+ return static_cast<unsigned int>(dsDevices.size());\r
}\r
\r
RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )\r
info.sampleRates.clear();\r
for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&\r
- SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )\r
+ SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {\r
info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+ }\r
}\r
\r
// Get format information.\r
return FAILURE;\r
}\r
\r
- unsigned int nDevices = dsDevices.size();\r
+ size_t nDevices = dsDevices.size();\r
if ( nDevices == 0 ) {\r
// This should not happen because a check is made before this function is called.\r
errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";\r
\r
stream_.state = STREAM_STOPPED;\r
\r
+ MUTEX_LOCK( &stream_.mutex );\r
+\r
// Stop the buffer and clear memory\r
LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];\r
result = buffer->Stop();\r
\r
stream_.state = STREAM_STOPPED;\r
\r
+ if ( stream_.mode != DUPLEX )\r
+ MUTEX_LOCK( &stream_.mutex );\r
+\r
result = buffer->Stop();\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";\r
\r
unlock:\r
timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
+\r
if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );\r
}\r
\r
char *buffer;\r
long bufferBytes;\r
\r
+ MUTEX_LOCK( &stream_.mutex );\r
+ if ( stream_.state == STREAM_STOPPED ) {\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
+ return;\r
+ }\r
+\r
if ( buffersRolling == false ) {\r
if ( stream_.mode == DUPLEX ) {\r
//assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;\r
handle->bufferPointer[0] = nextWritePointer;\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
- }\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
}\r
\r
if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
}\r
\r
unlock:\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
RtApi::tickStreamTime();\r
}\r
\r
return 0;\r
}\r
\r
-#include "tchar.h"\r
-\r
-static std::string convertTChar( LPCTSTR name )\r
-{\r
-#if defined( UNICODE ) || defined( _UNICODE )\r
- int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);\r
- std::string s( length-1, '\0' );\r
- WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);\r
-#else\r
- std::string s( name );\r
-#endif\r
-\r
- return s;\r
-}\r
-\r
static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,\r
LPCTSTR description,\r
- LPCTSTR module,\r
+ LPCTSTR /*module*/,\r
LPVOID lpContext )\r
{\r
struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;\r
}\r
\r
// If good device, then save its name and guid.\r
- std::string name = convertTChar( description );\r
+ std::string name = convertCharPointerToStdString( description );\r
//if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )\r
if ( lpguid == NULL )\r
name = "Default Device";\r
\r
// Count cards and devices\r
card = -1;\r
+ subdevice = -1;\r
snd_card_next( &card );\r
while ( card >= 0 ) {\r
sprintf( name, "hw:%d", card );\r
goto probeParameters;\r
}\r
}\r
+ else\r
+ snd_ctl_close( chandle );\r
\r
result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);\r
if ( result < 0 ) {\r
// Test our discrete set of sample rate values.\r
info.sampleRates.clear();\r
for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {\r
- if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )\r
+ if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {\r
info.sampleRates.push_back( SAMPLE_RATES[i] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[i];\r
+ }\r
}\r
if ( info.sampleRates.size() == 0 ) {\r
snd_pcm_close( phandle );\r
\r
// Check that we have at least one supported format\r
if ( info.nativeFormats == 0 ) {\r
+ snd_pcm_close( phandle );\r
errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";\r
errorText_ = errorStream_.str();\r
error( RtAudioError::WARNING );\r
// Get the device name\r
char *cardname;\r
result = snd_card_get_name( card, &cardname );\r
- if ( result >= 0 )\r
+ if ( result >= 0 ) {\r
sprintf( name, "hw:%s,%d", cardname, subdevice );\r
+ free( cardname );\r
+ }\r
info.name = name;\r
\r
// That's all ... close the device and return\r
}\r
\r
if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {\r
+ result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open\r
state = snd_pcm_state( handle[1] );\r
if ( state != SND_PCM_STATE_PREPARED ) {\r
result = snd_pcm_prepare( handle[1] );\r
}\r
\r
unlock:\r
+ apiInfo->runnable = false; // fixes high CPU usage when stopped\r
MUTEX_UNLOCK( &stream_.mutex );\r
\r
if ( result >= 0 ) return;\r
}\r
\r
unlock:\r
+ apiInfo->runnable = false; // fixes high CPU usage when stopped\r
MUTEX_UNLOCK( &stream_.mutex );\r
\r
if ( result >= 0 ) return;\r
errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";\r
errorText_ = errorStream_.str();\r
}\r
+ else\r
+ errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";\r
}\r
else {\r
errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";\r
bool *isRunning = &info->isRunning;\r
\r
#ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)\r
- if ( &info->doRealtime ) {\r
+ if ( info->doRealtime ) {\r
pthread_t tID = pthread_self(); // ID of this thread\r
sched_param prio = { info->priority }; // scheduling priority of thread\r
pthread_setschedparam( tID, SCHED_RR, &prio );\r
return 1;\r
}\r
\r
-RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )\r
+RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )\r
{\r
RtAudio::DeviceInfo info;\r
info.probed = true;\r
for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )\r
info.sampleRates.push_back( *sr );\r
\r
+ info.preferredSampleRate = 48000;\r
info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;\r
\r
return info;\r
if ( format == sf->rtaudio_format ) {\r
sf_found = true;\r
stream_.userFormat = sf->rtaudio_format;\r
+ stream_.deviceFormat[mode] = stream_.userFormat;\r
ss.format = sf->pa_format;\r
break;\r
}\r
}\r
- if ( !sf_found ) {\r
- errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";\r
- return false;\r
+ if ( !sf_found ) { // Use internal data format conversion.\r
+ stream_.userFormat = format;\r
+ stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;\r
+ ss.format = PA_SAMPLE_FLOAT32LE;\r
}\r
\r
- // Set interleaving parameters.\r
+ // Set other stream parameters.\r
if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;\r
else stream_.userInterleaved = true;\r
stream_.deviceInterleaved[mode] = true;\r
stream_.nBuffers = 1;\r
stream_.doByteSwap[mode] = false;\r
- stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;\r
- stream_.deviceFormat[mode] = stream_.userFormat;\r
stream_.nUserChannels[mode] = channels;\r
stream_.nDeviceChannels[mode] = channels + firstChannel;\r
stream_.channelOffset[mode] = 0;\r
+ std::string streamName = "RtAudio";\r
+\r
+ // Set flags for buffer conversion.\r
+ stream_.doConvertBuffer[mode] = false;\r
+ if ( stream_.userFormat != stream_.deviceFormat[mode] )\r
+ stream_.doConvertBuffer[mode] = true;\r
+ if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )\r
+ stream_.doConvertBuffer[mode] = true;\r
\r
// Allocate necessary internal buffers.\r
bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );\r
pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );\r
\r
int error;\r
+ if ( options && !options->streamName.empty() ) streamName = options->streamName;\r
switch ( mode ) {\r
case INPUT:\r
- pah->s_rec = pa_simple_new( NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error );\r
+ pa_buffer_attr buffer_attr;\r
+ buffer_attr.fragsize = bufferBytes;\r
+ buffer_attr.maxlength = -1;\r
+\r
+ pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );\r
if ( !pah->s_rec ) {\r
errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";\r
goto error;\r
}\r
break;\r
case OUTPUT:\r
- pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );\r
+ pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );\r
if ( !pah->s_play ) {\r
errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";\r
goto error;\r
#include <sys/ioctl.h>\r
#include <unistd.h>\r
#include <fcntl.h>\r
-#include "soundcard.h"\r
+#include <sys/soundcard.h>\r
#include <errno.h>\r
#include <math.h>\r
\r
for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {\r
info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+\r
break;\r
}\r
}\r
else {\r
// Check min and max rate values;\r
for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
- if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )\r
+ if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {\r
info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+ }\r
}\r
}\r
\r
RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;\r
if ( errorCallback ) {\r
// abortStream() can generate new error messages. Ignore them. Just keep original one.\r
- static bool firstErrorOccured = false;\r
\r
- if ( firstErrorOccured )\r
+ if ( firstErrorOccurred_ )\r
return;\r
\r
- firstErrorOccured = true;\r
+ firstErrorOccurred_ = true;\r
const std::string errorMessage = errorText_;\r
\r
if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {\r
}\r
\r
errorCallback( type, errorMessage );\r
- firstErrorOccured = false;\r
+ firstErrorOccurred_ = false;\r
return;\r
}\r
\r
\r
void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )\r
{\r
- register char val;\r
- register char *ptr;\r
+ char val;\r
+ char *ptr;\r
\r
ptr = buffer;\r
if ( format == RTAUDIO_SINT16 ) {\r