RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/\r
\r
RtAudio: realtime audio i/o C++ classes\r
- Copyright (c) 2001-2012 Gary P. Scavone\r
+ Copyright (c) 2001-2013 Gary P. Scavone\r
\r
Permission is hereby granted, free of charge, to any person\r
obtaining a copy of this software and associated documentation files\r
*/\r
/************************************************************************/\r
\r
-// RtAudio: Version 4.0.11\r
+// RtAudio: Version 4.0.12\r
\r
#include "RtAudio.h"\r
#include <iostream>\r
RtAudioFormat format, unsigned int sampleRate,\r
unsigned int *bufferFrames,\r
RtAudioCallback callback, void *userData,\r
- RtAudio::StreamOptions *options )\r
+ RtAudio::StreamOptions *options,\r
+ RtAudioErrorCallback errorCallback )\r
{\r
return rtapi_->openStream( outputParameters, inputParameters, format,\r
sampleRate, bufferFrames, callback,\r
- userData, options );\r
+ userData, options, errorCallback );\r
}\r
\r
// *************************************************** //\r
RtAudioFormat format, unsigned int sampleRate,\r
unsigned int *bufferFrames,\r
RtAudioCallback callback, void *userData,\r
- RtAudio::StreamOptions *options )\r
+ RtAudio::StreamOptions *options,\r
+ RtAudioErrorCallback errorCallback )\r
{\r
if ( stream_.state != STREAM_CLOSED ) {\r
errorText_ = "RtApi::openStream: a stream is already open!";\r
error( RtError::INVALID_USE );\r
+ return;\r
}\r
\r
if ( oParams && oParams->nChannels < 1 ) {\r
errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";\r
error( RtError::INVALID_USE );\r
+ return;\r
}\r
\r
if ( iParams && iParams->nChannels < 1 ) {\r
errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";\r
error( RtError::INVALID_USE );\r
+ return;\r
}\r
\r
if ( oParams == NULL && iParams == NULL ) {\r
errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";\r
error( RtError::INVALID_USE );\r
+ return;\r
}\r
\r
if ( formatBytes(format) == 0 ) {\r
errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";\r
error( RtError::INVALID_USE );\r
+ return;\r
}\r
\r
unsigned int nDevices = getDeviceCount();\r
if ( oParams->deviceId >= nDevices ) {\r
errorText_ = "RtApi::openStream: output device parameter value is invalid.";\r
error( RtError::INVALID_USE );\r
+ return;\r
}\r
}\r
\r
if ( iParams->deviceId >= nDevices ) {\r
errorText_ = "RtApi::openStream: input device parameter value is invalid.";\r
error( RtError::INVALID_USE );\r
+ return;\r
}\r
}\r
\r
\r
result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,\r
sampleRate, format, bufferFrames, options );\r
- if ( result == false ) error( RtError::SYSTEM_ERROR );\r
+ if ( result == false ) {\r
+ error( RtError::SYSTEM_ERROR );\r
+ return;\r
+ }\r
}\r
\r
if ( iChannels > 0 ) {\r
if ( result == false ) {\r
if ( oChannels > 0 ) closeStream();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
}\r
\r
stream_.callbackInfo.callback = (void *) callback;\r
stream_.callbackInfo.userData = userData;\r
+ stream_.callbackInfo.errorCallback = (void *) errorCallback;\r
\r
if ( options ) options->numberOfBuffers = stream_.nBuffers;\r
stream_.state = STREAM_STOPPED;\r
return;\r
}\r
\r
-bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,\r
- unsigned int firstChannel, unsigned int sampleRate,\r
- RtAudioFormat format, unsigned int *bufferSize,\r
- RtAudio::StreamOptions *options )\r
+bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,\r
+ unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,\r
+ RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,\r
+ RtAudio::StreamOptions * /*options*/ )\r
{\r
// MUST be implemented in subclasses!\r
return FAILURE;\r
:deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }\r
};\r
\r
-ThreadHandle threadId;\r
-\r
RtApiCore:: RtApiCore()\r
{\r
#if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )\r
if ( nDevices == 0 ) {\r
errorText_ = "RtApiCore::getDeviceInfo: no devices found!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
if ( device >= nDevices ) {\r
errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
AudioDeviceID deviceList[ nDevices ];\r
return info;\r
}\r
\r
-OSStatus callbackHandler( AudioDeviceID inDevice,\r
- const AudioTimeStamp* inNow,\r
- const AudioBufferList* inInputData,\r
- const AudioTimeStamp* inInputTime,\r
- AudioBufferList* outOutputData,\r
- const AudioTimeStamp* inOutputTime, \r
- void* infoPointer )\r
+static OSStatus callbackHandler( AudioDeviceID inDevice,\r
+ const AudioTimeStamp* /*inNow*/,\r
+ const AudioBufferList* inInputData,\r
+ const AudioTimeStamp* /*inInputTime*/,\r
+ AudioBufferList* outOutputData,\r
+ const AudioTimeStamp* /*inOutputTime*/,\r
+ void* infoPointer )\r
{\r
CallbackInfo *info = (CallbackInfo *) infoPointer;\r
\r
return kAudioHardwareNoError;\r
}\r
\r
-OSStatus xrunListener( AudioObjectID inDevice,\r
- UInt32 nAddresses,\r
- const AudioObjectPropertyAddress properties[],\r
- void* handlePointer )\r
+static OSStatus xrunListener( AudioObjectID /*inDevice*/,\r
+ UInt32 nAddresses,\r
+ const AudioObjectPropertyAddress properties[],\r
+ void* handlePointer )\r
{\r
CoreHandle *handle = (CoreHandle *) handlePointer;\r
for ( UInt32 i=0; i<nAddresses; i++ ) {\r
return kAudioHardwareNoError;\r
}\r
\r
-OSStatus rateListener( AudioObjectID inDevice,\r
- UInt32 nAddresses,\r
- const AudioObjectPropertyAddress properties[],\r
- void* ratePointer )\r
+static OSStatus rateListener( AudioObjectID inDevice,\r
+ UInt32 /*nAddresses*/,\r
+ const AudioObjectPropertyAddress /*properties*/[],\r
+ void* ratePointer )\r
{\r
\r
Float64 *rate = (Float64 *) ratePointer;\r
stream_.deviceBuffer = 0;\r
}\r
\r
+ stream_.state = STREAM_CLOSED;\r
return FAILURE;\r
}\r
\r
// aborted. It is better to handle it this way because the\r
// callbackEvent() function probably should return before the AudioDeviceStop()\r
// function is called.\r
-extern "C" void *coreStopStream( void *ptr )\r
+static void *coreStopStream( void *ptr )\r
{\r
CallbackInfo *info = (CallbackInfo *) ptr;\r
RtApiCore *object = (RtApiCore *) info->object;\r
\r
// Check if we were draining the stream and signal is finished.\r
if ( handle->drainCounter > 3 ) {\r
+ ThreadHandle threadId;\r
\r
stream_.state = STREAM_STOPPING;\r
if ( handle->internalDrain == true )\r
:client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }\r
};\r
\r
-ThreadHandle threadId;\r
-void jackSilentError( const char * ) {};\r
+static void jackSilentError( const char * ) {};\r
\r
RtApiJack :: RtApiJack()\r
{\r
jack_client_close( client );\r
errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
// Get the current jack server sample rate.\r
return info;\r
}\r
\r
-int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )\r
+static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )\r
{\r
CallbackInfo *info = (CallbackInfo *) infoPointer;\r
\r
// server signals that it is shutting down. It is necessary to handle\r
// it this way because the jackShutdown() function must return before\r
// the jack_deactivate() function (in closeStream()) will return.\r
-extern "C" void *jackCloseStream( void *ptr )\r
+static void *jackCloseStream( void *ptr )\r
{\r
CallbackInfo *info = (CallbackInfo *) ptr;\r
RtApiJack *object = (RtApiJack *) info->object;\r
\r
pthread_exit( NULL );\r
}\r
-void jackShutdown( void *infoPointer )\r
+static void jackShutdown( void *infoPointer )\r
{\r
CallbackInfo *info = (CallbackInfo *) infoPointer;\r
RtApiJack *object = (RtApiJack *) info->object;\r
// other problem occurred and we should close the stream.\r
if ( object->isStreamRunning() == false ) return;\r
\r
+ ThreadHandle threadId;\r
pthread_create( &threadId, NULL, jackCloseStream, info );\r
std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;\r
}\r
\r
-int jackXrun( void *infoPointer )\r
+static int jackXrun( void *infoPointer )\r
{\r
JackHandle *handle = (JackHandle *) infoPointer;\r
\r
\r
// Get the latency of the JACK port.\r
ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );\r
- if ( ports[ firstChannel ] )\r
- stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );\r
+ if ( ports[ firstChannel ] ) {\r
+ // Added by Ge Wang\r
+ jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);\r
+ // the range (usually the min and max are equal)\r
+ jack_latency_range_t latrange; latrange.min = latrange.max = 0;\r
+ // get the latency range\r
+ jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );\r
+ // be optimistic, use the min!\r
+ stream_.latency[mode] = latrange.min;\r
+ //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );\r
+ }\r
free( ports );\r
\r
// The jack server always uses 32-bit floating-point data.\r
// aborted. It is necessary to handle it this way because the\r
// callbackEvent() function must return before the jack_deactivate()\r
// function will return.\r
-extern "C" void *jackStopStream( void *ptr )\r
+static void *jackStopStream( void *ptr )\r
{\r
CallbackInfo *info = (CallbackInfo *) ptr;\r
RtApiJack *object = (RtApiJack *) info->object;\r
\r
// Check if we were draining the stream and signal is finished.\r
if ( handle->drainCounter > 3 ) {\r
+ ThreadHandle threadId;\r
\r
stream_.state = STREAM_STOPPING;\r
if ( handle->internalDrain == true )\r
#include "asiodrivers.h"\r
#include <cmath>\r
\r
-AsioDrivers drivers;\r
-ASIOCallbacks asioCallbacks;\r
-ASIODriverInfo driverInfo;\r
-CallbackInfo *asioCallbackInfo;\r
-bool asioXRun;\r
+static AsioDrivers drivers;\r
+static ASIOCallbacks asioCallbacks;\r
+static ASIODriverInfo driverInfo;\r
+static CallbackInfo *asioCallbackInfo;\r
+static bool asioXRun;\r
\r
struct AsioHandle {\r
int drainCounter; // Tracks callback counts when draining\r
\r
// Function declarations (definitions at end of section)\r
static const char* getAsioErrorString( ASIOError result );\r
-void sampleRateChanged( ASIOSampleRate sRate );\r
-long asioMessages( long selector, long value, void* message, double* opt );\r
+static void sampleRateChanged( ASIOSampleRate sRate );\r
+static long asioMessages( long selector, long value, void* message, double* opt );\r
\r
RtApiAsio :: RtApiAsio()\r
{\r
if ( nDevices == 0 ) {\r
errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
if ( device >= nDevices ) {\r
errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
// If a stream is already open, we cannot probe other devices. Thus, use the saved results.\r
info.nativeFormats |= RTAUDIO_FLOAT32;\r
else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )\r
info.nativeFormats |= RTAUDIO_FLOAT64;\r
+ else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )\r
+ info.nativeFormats |= RTAUDIO_SINT24;\r
\r
if ( info.outputChannels > 0 )\r
if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;\r
return info;\r
}\r
\r
-void bufferSwitch( long index, ASIOBool processNow )\r
+static void bufferSwitch( long index, ASIOBool processNow )\r
{\r
RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;\r
object->callbackEvent( index );\r
stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;\r
if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;\r
}\r
+ else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {\r
+ stream_.deviceFormat[mode] = RTAUDIO_SINT24;\r
+ if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;\r
+ }\r
\r
if ( stream_.deviceFormat[mode] == 0 ) {\r
drivers.removeCurrentDriver();\r
// aborted. It is necessary to handle it this way because the\r
// callbackEvent() function must return before the ASIOStop()\r
// function will return.\r
-extern "C" unsigned __stdcall asioStopStream( void *ptr )\r
+static unsigned __stdcall asioStopStream( void *ptr )\r
{\r
CallbackInfo *info = (CallbackInfo *) ptr;\r
RtApiAsio *object = (RtApiAsio *) info->object;\r
return SUCCESS;\r
}\r
\r
-void sampleRateChanged( ASIOSampleRate sRate )\r
+static void sampleRateChanged( ASIOSampleRate sRate )\r
{\r
// The ASIO documentation says that this usually only happens during\r
// external sync. Audio processing is not stopped by the driver,\r
std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;\r
}\r
\r
-long asioMessages( long selector, long value, void* message, double* opt )\r
+static long asioMessages( long selector, long value, void* message, double* opt )\r
{\r
long ret = 0;\r
\r
const char*message;\r
};\r
\r
- static Messages m[] = \r
+ static const Messages m[] = \r
{\r
{ ASE_NotPresent, "Hardware input or output is not present or available." },\r
{ ASE_HWMalfunction, "Hardware is malfunctioning." },\r
\r
static const char* getErrorString( int code );\r
\r
-extern "C" unsigned __stdcall callbackHandler( void *ptr );\r
+static unsigned __stdcall callbackHandler( void *ptr );\r
\r
struct DsDevice {\r
LPGUID id[2];\r
: found(false) { validId[0] = false; validId[1] = false; }\r
};\r
\r
-std::vector< DsDevice > dsDevices;\r
+struct DsProbeData {\r
+ bool isInput;\r
+ std::vector<struct DsDevice>* dsDevices;\r
+};\r
\r
RtApiDs :: RtApiDs()\r
{\r
dsDevices[i].found = false;\r
\r
// Query DirectSound devices.\r
- bool isInput = false;\r
- HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );\r
+ struct DsProbeData probeInfo;\r
+ probeInfo.isInput = false;\r
+ probeInfo.dsDevices = &dsDevices;\r
+ HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";\r
errorText_ = errorStream_.str();\r
}\r
\r
// Query DirectSoundCapture devices.\r
- isInput = true;\r
- result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );\r
+ probeInfo.isInput = true;\r
+ result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";\r
errorText_ = errorStream_.str();\r
if ( dsDevices.size() == 0 ) {\r
errorText_ = "RtApiDs::getDeviceInfo: no devices found!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
}\r
\r
if ( device >= dsDevices.size() ) {\r
errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
HRESULT result;\r
// Determine the device buffer size. By default, we'll use the value\r
// defined above (32K), but we will grow it to make allowances for\r
// very large software buffer sizes.\r
- DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;\r
+ DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;\r
DWORD dsPointerLeadTime = 0;\r
\r
void *ohandle = 0, *bhandle = 0;\r
stream_.deviceBuffer = 0;\r
}\r
\r
+ stream_.state = STREAM_CLOSED;\r
return FAILURE;\r
}\r
\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
while ( true ) {\r
result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;\r
Sleep( 1 );\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];\r
if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
\r
// We will copy our output buffer into the region between\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
\r
// Copy our buffer into the DS buffer\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;\r
handle->bufferPointer[0] = nextWritePointer;\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
\r
if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
\r
if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
\r
if ( duplexPrerollBytes <= 0 ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";\r
errorText_ = errorStream_.str();\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
handle->bufferPointer[1] = nextReadPointer;\r
\r
// Definitions for utility functions and callbacks\r
// specific to the DirectSound implementation.\r
\r
-extern "C" unsigned __stdcall callbackHandler( void *ptr )\r
+static unsigned __stdcall callbackHandler( void *ptr )\r
{\r
CallbackInfo *info = (CallbackInfo *) ptr;\r
RtApiDs *object = (RtApiDs *) info->object;\r
\r
#include "tchar.h"\r
\r
-std::string convertTChar( LPCTSTR name )\r
+static std::string convertTChar( LPCTSTR name )\r
{\r
#if defined( UNICODE ) || defined( _UNICODE )\r
int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);\r
- std::string s( length, 0 );\r
- length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);\r
+ std::string s( length-1, '\0' );\r
+ WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);\r
#else\r
std::string s( name );\r
#endif\r
LPCTSTR module,\r
LPVOID lpContext )\r
{\r
- bool *isInput = (bool *) lpContext;\r
+ struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;\r
+ std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;\r
\r
HRESULT hr;\r
bool validDevice = false;\r
- if ( *isInput == true ) {\r
+ if ( probeInfo.isInput == true ) {\r
DSCCAPS caps;\r
LPDIRECTSOUNDCAPTURE object;\r
\r
\r
// If good device, then save its name and guid.\r
std::string name = convertTChar( description );\r
- if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )\r
+ //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )\r
+ if ( lpguid == NULL )\r
name = "Default Device";\r
if ( validDevice ) {\r
for ( unsigned int i=0; i<dsDevices.size(); i++ ) {\r
if ( dsDevices[i].name == name ) {\r
dsDevices[i].found = true;\r
- if ( *isInput ) {\r
+ if ( probeInfo.isInput ) {\r
dsDevices[i].id[1] = lpguid;\r
dsDevices[i].validId[1] = true;\r
}\r
DsDevice device;\r
device.name = name;\r
device.found = true;\r
- if ( *isInput ) {\r
+ if ( probeInfo.isInput ) {\r
device.id[1] = lpguid;\r
device.validId[1] = true;\r
}\r
:synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }\r
};\r
\r
-extern "C" void *alsaCallbackHandler( void * ptr );\r
+static void *alsaCallbackHandler( void * ptr );\r
\r
RtApiAlsa :: RtApiAlsa()\r
{\r
snd_card_next( &card );\r
}\r
\r
+ result = snd_ctl_open( &handle, "default", 0 );\r
+ if (result == 0) {\r
+ nDevices++;\r
+ snd_ctl_close( handle );\r
+ }\r
+\r
return nDevices;\r
}\r
\r
snd_card_next( &card );\r
}\r
\r
+ result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );\r
+ if ( result == 0 ) {\r
+ if ( nDevices == device ) {\r
+ strcpy( name, "default" );\r
+ goto foundDevice;\r
+ }\r
+ nDevices++;\r
+ }\r
+\r
if ( nDevices == 0 ) {\r
errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
if ( device >= nDevices ) {\r
errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
foundDevice:\r
snd_pcm_hw_params_t *params;\r
snd_pcm_hw_params_alloca( ¶ms );\r
\r
- // First try for playback\r
+ // First try for playback unless default device (which has subdev -1)\r
stream = SND_PCM_STREAM_PLAYBACK;\r
- snd_pcm_info_set_device( pcminfo, subdevice );\r
- snd_pcm_info_set_subdevice( pcminfo, 0 );\r
snd_pcm_info_set_stream( pcminfo, stream );\r
+ if ( subdevice != -1 ) {\r
+ snd_pcm_info_set_device( pcminfo, subdevice );\r
+ snd_pcm_info_set_subdevice( pcminfo, 0 );\r
\r
- result = snd_ctl_pcm_info( chandle, pcminfo );\r
- if ( result < 0 ) {\r
- // Device probably doesn't support playback.\r
- goto captureProbe;\r
+ result = snd_ctl_pcm_info( chandle, pcminfo );\r
+ if ( result < 0 ) {\r
+ // Device probably doesn't support playback.\r
+ goto captureProbe;\r
+ }\r
}\r
\r
result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );\r
snd_pcm_close( phandle );\r
\r
captureProbe:\r
- // Now try for capture\r
stream = SND_PCM_STREAM_CAPTURE;\r
snd_pcm_info_set_stream( pcminfo, stream );\r
\r
- result = snd_ctl_pcm_info( chandle, pcminfo );\r
- snd_ctl_close( chandle );\r
- if ( result < 0 ) {\r
- // Device probably doesn't support capture.\r
- if ( info.outputChannels == 0 ) return info;\r
- goto probeParameters;\r
+ // Now try for capture unless default device (with subdev = -1)\r
+ if ( subdevice != -1 ) {\r
+ result = snd_ctl_pcm_info( chandle, pcminfo );\r
+ snd_ctl_close( chandle );\r
+ if ( result < 0 ) {\r
+ // Device probably doesn't support capture.\r
+ if ( info.outputChannels == 0 ) return info;\r
+ goto probeParameters;\r
+ }\r
}\r
\r
result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);\r
snd_card_next( &card );\r
}\r
\r
+ result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );\r
+ if ( result == 0 ) {\r
+ if ( nDevices == device ) {\r
+ strcpy( name, "default" );\r
+ goto foundDevice;\r
+ }\r
+ nDevices++;\r
+ }\r
+\r
if ( nDevices == 0 ) {\r
// This should not happen because a check is made before this function is called.\r
errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";\r
pthread_attr_t attr;\r
pthread_attr_init( &attr );\r
pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );\r
+\r
#ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)\r
if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {\r
- struct sched_param param;\r
+ // We previously attempted to increase the audio callback priority\r
+ // to SCHED_RR here via the attributes. However, while no errors\r
+ // were reported in doing so, it did not work. So, now this is\r
+ // done in the alsaCallbackHandler function.\r
+ stream_.callbackInfo.doRealtime = true;\r
int priority = options->priority;\r
int min = sched_get_priority_min( SCHED_RR );\r
int max = sched_get_priority_max( SCHED_RR );\r
if ( priority < min ) priority = min;\r
else if ( priority > max ) priority = max;\r
- param.sched_priority = priority;\r
- pthread_attr_setschedparam( &attr, ¶m );\r
- pthread_attr_setschedpolicy( &attr, SCHED_RR );\r
+ stream_.callbackInfo.priority = priority;\r
}\r
- else\r
- pthread_attr_setschedpolicy( &attr, SCHED_OTHER );\r
-#else\r
- pthread_attr_setschedpolicy( &attr, SCHED_OTHER );\r
#endif\r
\r
stream_.callbackInfo.isRunning = true;\r
stream_.deviceBuffer = 0;\r
}\r
\r
+ stream_.state = STREAM_CLOSED;\r
return FAILURE;\r
}\r
\r
if ( doStopStream == 1 ) this->stopStream();\r
}\r
\r
-extern "C" void *alsaCallbackHandler( void *ptr )\r
+static void *alsaCallbackHandler( void *ptr )\r
{\r
CallbackInfo *info = (CallbackInfo *) ptr;\r
RtApiAlsa *object = (RtApiAlsa *) info->object;\r
bool *isRunning = &info->isRunning;\r
\r
+#ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)\r
+ if ( &info->doRealtime ) {\r
+ pthread_t tID = pthread_self(); // ID of this thread\r
+ sched_param prio = { info->priority }; // scheduling priority of thread\r
+ pthread_setschedparam( tID, SCHED_RR, &prio );\r
+ }\r
+#endif\r
+\r
while ( *isRunning == true ) {\r
pthread_testcancel();\r
object->callbackEvent();\r
#include <pulse/simple.h>\r
#include <cstdio>\r
\r
-namespace {\r
-const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,\r
- 44100, 48000, 96000, 0}; }\r
+static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,\r
+ 44100, 48000, 96000, 0};\r
\r
struct rtaudio_pa_format_mapping_t {\r
RtAudioFormat rtaudio_format;\r
return info;\r
}\r
\r
-extern "C" void *pulseaudio_callback( void * user )\r
+static void *pulseaudio_callback( void * user )\r
{\r
CallbackInfo *cbi = static_cast<CallbackInfo *>( user );\r
RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );\r
RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;\r
double streamTime = getStreamTime();\r
RtAudioStreamStatus status = 0;\r
- int doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],\r
+ int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],\r
stream_.bufferSize, streamTime, status,\r
stream_.callbackInfo.userData );\r
\r
}\r
\r
MUTEX_LOCK( &stream_.mutex );\r
+ void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];\r
+ void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];\r
\r
if ( stream_.state != STREAM_RUNNING )\r
goto unlock;\r
\r
int pa_error;\r
size_t bytes;\r
- switch ( stream_.mode ) {\r
- case INPUT:\r
- bytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );\r
- if ( pa_simple_read( pah->s_rec, stream_.userBuffer[1], bytes, &pa_error ) < 0 ) {\r
- errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<\r
- pa_strerror( pa_error ) << ".";\r
- errorText_ = errorStream_.str();\r
- error( RtError::WARNING );\r
- }\r
- break;\r
- case OUTPUT:\r
- bytes = stream_.nUserChannels[0] * stream_.bufferSize * formatBytes( stream_.userFormat );\r
- if ( pa_simple_write( pah->s_play, stream_.userBuffer[0], bytes, &pa_error ) < 0 ) {\r
+ if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {\r
+ if ( stream_.doConvertBuffer[OUTPUT] ) {\r
+ convertBuffer( stream_.deviceBuffer,\r
+ stream_.userBuffer[OUTPUT],\r
+ stream_.convertInfo[OUTPUT] );\r
+ bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *\r
+ formatBytes( stream_.deviceFormat[OUTPUT] );\r
+ } else\r
+ bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *\r
+ formatBytes( stream_.userFormat );\r
+\r
+ if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {\r
errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<\r
pa_strerror( pa_error ) << ".";\r
errorText_ = errorStream_.str();\r
error( RtError::WARNING );\r
}\r
- break;\r
- case DUPLEX:\r
- bytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );\r
- if ( pa_simple_read( pah->s_rec, stream_.userBuffer[1], bytes, &pa_error ) < 0 ) {\r
+ }\r
+\r
+ if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {\r
+ if ( stream_.doConvertBuffer[INPUT] )\r
+ bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *\r
+ formatBytes( stream_.deviceFormat[INPUT] );\r
+ else\r
+ bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *\r
+ formatBytes( stream_.userFormat );\r
+ \r
+ if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {\r
errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<\r
pa_strerror( pa_error ) << ".";\r
errorText_ = errorStream_.str();\r
error( RtError::WARNING );\r
}\r
- bytes = stream_.nUserChannels[0] * stream_.bufferSize * formatBytes( stream_.userFormat );\r
- if ( pa_simple_write( pah->s_play, stream_.userBuffer[0], bytes, &pa_error ) < 0) {\r
- errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<\r
- pa_strerror( pa_error ) << ".";\r
- errorText_ = errorStream_.str();\r
- error( RtError::WARNING );\r
+ if ( stream_.doConvertBuffer[INPUT] ) {\r
+ convertBuffer( stream_.userBuffer[INPUT],\r
+ stream_.deviceBuffer,\r
+ stream_.convertInfo[INPUT] );\r
}\r
- break;\r
- default:\r
- // ERROR\r
- break;\r
}\r
\r
unlock:\r
errorText_ = errorStream_.str();\r
MUTEX_UNLOCK( &stream_.mutex );\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
}\r
\r
errorText_ = errorStream_.str();\r
MUTEX_UNLOCK( &stream_.mutex );\r
error( RtError::SYSTEM_ERROR );\r
+ return;\r
}\r
}\r
\r
return false;\r
}\r
\r
- if ( options && ( options->flags & RTAUDIO_NONINTERLEAVED ) ) {\r
- errorText_ = "RtApiPulse::probeDeviceOpen: only interleaved audio data supported.";\r
- return false;\r
- }\r
-\r
- stream_.userInterleaved = true;\r
- stream_.nBuffers = 1;\r
-\r
+ // Set interleaving parameters.\r
+ if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;\r
+ else stream_.userInterleaved = true;\r
stream_.deviceInterleaved[mode] = true;\r
+ stream_.nBuffers = 1;\r
stream_.doByteSwap[mode] = false;\r
- stream_.doConvertBuffer[mode] = false;\r
+ stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;\r
stream_.deviceFormat[mode] = stream_.userFormat;\r
stream_.nUserChannels[mode] = channels;\r
- stream_.nDeviceChannels[mode] = channels;\r
+ stream_.nDeviceChannels[mode] = channels + firstChannel;\r
stream_.channelOffset[mode] = 0;\r
\r
// Allocate necessary internal buffers.\r
}\r
stream_.bufferSize = *bufferSize;\r
\r
+ if ( stream_.doConvertBuffer[mode] ) {\r
+\r
+ bool makeBuffer = true;\r
+ bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );\r
+ if ( mode == INPUT ) {\r
+ if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {\r
+ unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );\r
+ if ( bufferBytes <= bytesOut ) makeBuffer = false;\r
+ }\r
+ }\r
+\r
+ if ( makeBuffer ) {\r
+ bufferBytes *= *bufferSize;\r
+ if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );\r
+ stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );\r
+ if ( stream_.deviceBuffer == NULL ) {\r
+ errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";\r
+ goto error;\r
+ }\r
+ }\r
+ }\r
+\r
+ stream_.device[mode] = device;\r
+\r
+ // Setup the buffer conversion information structure.\r
+ if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );\r
+\r
if ( !stream_.apiHandle ) {\r
PulseAudioHandle *pah = new PulseAudioHandle;\r
if ( !pah ) {\r
else\r
stream_.mode = DUPLEX;\r
\r
- stream_.state = STREAM_STOPPED;\r
-\r
if ( !stream_.callbackInfo.isRunning ) {\r
stream_.callbackInfo.object = this;\r
stream_.callbackInfo.isRunning = true;\r
goto error;\r
}\r
}\r
+\r
+ stream_.state = STREAM_STOPPED;\r
return true;\r
\r
error:\r
- closeStream();\r
- return false;\r
+ if ( pah && stream_.callbackInfo.isRunning ) {\r
+ pthread_cond_destroy( &pah->runnable_cv );\r
+ delete pah;\r
+ stream_.apiHandle = 0;\r
+ }\r
+\r
+ for ( int i=0; i<2; i++ ) {\r
+ if ( stream_.userBuffer[i] ) {\r
+ free( stream_.userBuffer[i] );\r
+ stream_.userBuffer[i] = 0;\r
+ }\r
+ }\r
+\r
+ if ( stream_.deviceBuffer ) {\r
+ free( stream_.deviceBuffer );\r
+ stream_.deviceBuffer = 0;\r
+ }\r
+\r
+ return FAILURE;\r
}\r
\r
//******************** End of __LINUX_PULSE__ *********************//\r
#include <errno.h>\r
#include <math.h>\r
\r
-extern "C" void *ossCallbackHandler(void * ptr);\r
+static void *ossCallbackHandler(void * ptr);\r
\r
// A structure to hold various information related to the OSS API\r
// implementation.\r
close( mixerfd );\r
errorText_ = "RtApiOss::getDeviceInfo: no devices found!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
if ( device >= nDevices ) {\r
close( mixerfd );\r
errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";\r
error( RtError::INVALID_USE );\r
+ return info;\r
}\r
\r
oss_audioinfo ainfo;\r
if ( doStopStream == 1 ) this->stopStream();\r
}\r
\r
-extern "C" void *ossCallbackHandler( void *ptr )\r
+static void *ossCallbackHandler( void *ptr )\r
{\r
CallbackInfo *info = (CallbackInfo *) ptr;\r
RtApiOss *object = (RtApiOss *) info->object;\r
void RtApi :: error( RtError::Type type )\r
{\r
errorStream_.str(""); // clear the ostringstream\r
+\r
+ RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;\r
+ if ( errorCallback ) {\r
+ // abortStream() can generate new error messages. Ignore them. Just keep original one.\r
+ static bool firstErrorOccured = false;\r
+\r
+ if ( firstErrorOccured )\r
+ return;\r
+\r
+ firstErrorOccured = true;\r
+ const std::string errorMessage = errorText_;\r
+\r
+ if ( type != RtError::WARNING && stream_.state != STREAM_STOPPED) {\r
+ stream_.callbackInfo.isRunning = false; // exit from the thread\r
+ abortStream();\r
+ }\r
+\r
+ errorCallback( type, errorMessage );\r
+ firstErrorOccured = false;\r
+ return;\r
+ }\r
+\r
if ( type == RtError::WARNING && showWarnings_ == true )\r
std::cerr << '\n' << errorText_ << "\n\n";\r
else if ( type != RtError::WARNING )\r
stream_.callbackInfo.callback = 0;\r
stream_.callbackInfo.userData = 0;\r
stream_.callbackInfo.isRunning = false;\r
+ stream_.callbackInfo.errorCallback = 0;\r
for ( int i=0; i<2; i++ ) {\r
stream_.device[i] = 11111;\r
stream_.doConvertBuffer[i] = false;\r
{\r
if ( format == RTAUDIO_SINT16 )\r
return 2;\r
- else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||\r
- format == RTAUDIO_FLOAT32 )\r
+ else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )\r
return 4;\r
else if ( format == RTAUDIO_FLOAT64 )\r
return 8;\r
+ else if ( format == RTAUDIO_SINT24 )\r
+ return 3;\r
else if ( format == RTAUDIO_SINT8 )\r
return 1;\r
\r
}\r
}\r
else if (info.inFormat == RTAUDIO_SINT24) {\r
- Int32 *in = (Int32 *)inBuffer;\r
+ Int24 *in = (Int24 *)inBuffer;\r
scale = 1.0 / 8388607.5;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);\r
+ out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());\r
out[info.outOffset[j]] += 0.5;\r
out[info.outOffset[j]] *= scale;\r
}\r
}\r
}\r
else if (info.inFormat == RTAUDIO_SINT24) {\r
- Int32 *in = (Int32 *)inBuffer;\r
+ Int24 *in = (Int24 *)inBuffer;\r
scale = (Float32) ( 1.0 / 8388607.5 );\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);\r
+ out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());\r
out[info.outOffset[j]] += 0.5;\r
out[info.outOffset[j]] *= scale;\r
}\r
out += info.outJump;\r
}\r
}\r
- else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes\r
- Int32 *in = (Int32 *)inBuffer;\r
+ else if (info.inFormat == RTAUDIO_SINT24) {\r
+ Int24 *in = (Int24 *)inBuffer;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];\r
+ out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();\r
out[info.outOffset[j]] <<= 8;\r
}\r
in += info.inJump;\r
}\r
}\r
else if (info.outFormat == RTAUDIO_SINT24) {\r
- Int32 *out = (Int32 *)outBuffer;\r
+ Int24 *out = (Int24 *)outBuffer;\r
if (info.inFormat == RTAUDIO_SINT8) {\r
signed char *in = (signed char *)inBuffer;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];\r
- out[info.outOffset[j]] <<= 16;\r
+ out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);\r
+ //out[info.outOffset[j]] <<= 16;\r
}\r
in += info.inJump;\r
out += info.outJump;\r
Int16 *in = (Int16 *)inBuffer;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];\r
- out[info.outOffset[j]] <<= 8;\r
+ out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);\r
+ //out[info.outOffset[j]] <<= 8;\r
}\r
in += info.inJump;\r
out += info.outJump;\r
}\r
else if (info.inFormat == RTAUDIO_SINT24) {\r
// Channel compensation and/or (de)interleaving only.\r
- Int32 *in = (Int32 *)inBuffer;\r
+ Int24 *in = (Int24 *)inBuffer;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
out[info.outOffset[j]] = in[info.inOffset[j]];\r
Int32 *in = (Int32 *)inBuffer;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];\r
- out[info.outOffset[j]] >>= 8;\r
+ out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);\r
+ //out[info.outOffset[j]] >>= 8;\r
}\r
in += info.inJump;\r
out += info.outJump;\r
}\r
}\r
else if (info.inFormat == RTAUDIO_SINT24) {\r
- Int32 *in = (Int32 *)inBuffer;\r
+ Int24 *in = (Int24 *)inBuffer;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);\r
+ out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);\r
}\r
in += info.inJump;\r
out += info.outJump;\r
}\r
}\r
else if (info.inFormat == RTAUDIO_SINT24) {\r
- Int32 *in = (Int32 *)inBuffer;\r
+ Int24 *in = (Int24 *)inBuffer;\r
for (unsigned int i=0; i<stream_.bufferSize; i++) {\r
for (j=0; j<info.channels; j++) {\r
- out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);\r
+ out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);\r
}\r
in += info.inJump;\r
out += info.outJump;\r
}\r
}\r
\r
- //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }\r
- //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }\r
- //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }\r
+//static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }\r
+//static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }\r
+//static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }\r
\r
void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )\r
{\r
ptr += 2;\r
}\r
}\r
- else if ( format == RTAUDIO_SINT24 ||\r
- format == RTAUDIO_SINT32 ||\r
+ else if ( format == RTAUDIO_SINT32 ||\r
format == RTAUDIO_FLOAT32 ) {\r
for ( unsigned int i=0; i<samples; i++ ) {\r
// Swap 1st and 4th bytes.\r
ptr += 3;\r
}\r
}\r
+ else if ( format == RTAUDIO_SINT24 ) {\r
+ for ( unsigned int i=0; i<samples; i++ ) {\r
+ // Swap 1st and 3rd bytes.\r
+ val = *(ptr);\r
+ *(ptr) = *(ptr+2);\r
+ *(ptr+2) = val;\r
+\r
+ // Increment 2 more bytes.\r
+ ptr += 2;\r
+ }\r
+ }\r
else if ( format == RTAUDIO_FLOAT64 ) {\r
for ( unsigned int i=0; i<samples; i++ ) {\r
// Swap 1st and 8th bytes\r