#ifdef HAVE_SWRESAMPLE
, _swr_context (0)
#endif
+ , _audio_frames_written (0)
, _process_end (false)
{
/* Create sound output files with .tmp suffixes; we will rename
}
void
-J2KWAVEncoder::process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format)
+J2KWAVEncoder::process_begin (int64_t audio_channel_layout)
{
- if (_fs->audio_sample_rate() != _fs->target_sample_rate()) {
+ if (_fs->audio_sample_rate() != _fs->target_audio_sample_rate()) {
#ifdef HAVE_SWRESAMPLE
stringstream s;
- s << "Will resample audio from " << _fs->audio_sample_rate() << " to " << _fs->target_sample_rate();
+ s << "Will resample audio from " << _fs->audio_sample_rate() << " to " << _fs->target_audio_sample_rate();
_log->log (s.str ());
/* We will be using planar float data when we call the resampler */
0,
audio_channel_layout,
AV_SAMPLE_FMT_FLTP,
- _fs->target_sample_rate(),
+ _fs->target_audio_sample_rate(),
audio_channel_layout,
AV_SAMPLE_FMT_FLTP,
_fs->audio_sample_rate(),
#if HAVE_SWRESAMPLE
if (_swr_context) {
- float* out[_fs->audio_channels()];
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- out[i] = new float[256];
- }
+ shared_ptr<AudioBuffers> out (new AudioBuffers (_fs->audio_channels(), 256));
while (1) {
- int const frames = swr_convert (_swr_context, (uint8_t **) out, 256, 0, 0);
+ int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
if (frames < 0) {
throw EncodeError ("could not run sample-rate converter");
break;
}
- write_audio (out, frames);
- }
-
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- delete[] out[i];
+ write_audio (out);
}
swr_free (&_swr_context);
}
-#endif
+#endif
+
+ int const dcp_sr = dcp_audio_sample_rate (_fs->audio_sample_rate ());
+ int64_t const extra_audio_frames = dcp_sr - (_audio_frames_written % dcp_sr);
+ shared_ptr<AudioBuffers> silence (new AudioBuffers (_fs->audio_channels(), extra_audio_frames));
+ silence->make_silent ();
+ write_audio (silence);
close_sound_files ();
}
void
-J2KWAVEncoder::process_audio (float** data, int frames)
+J2KWAVEncoder::process_audio (shared_ptr<const AudioBuffers> audio)
{
- float* resampled[_fs->audio_channels()];
+ shared_ptr<AudioBuffers> resampled;
-#if HAVE_SWRESAMPLE
+#if HAVE_SWRESAMPLE
/* Maybe sample-rate convert */
if (_swr_context) {
/* Compute the resampled frames count and add 32 for luck */
- int const resampled_frames = ceil (frames * _fs->target_sample_rate() / _fs->audio_sample_rate()) + 32;
+ int const max_resampled_frames = ceil (audio->frames() * _fs->target_audio_sample_rate() / _fs->audio_sample_rate()) + 32;
- /* Make a buffer to put the result in */
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- resampled[i] = new float[resampled_frames];
- }
+ resampled.reset (new AudioBuffers (_fs->audio_channels(), max_resampled_frames));
/* Resample audio */
- int out_frames = swr_convert (_swr_context, (uint8_t **) resampled, resampled_frames, (uint8_t const **) data, frames);
- if (out_frames < 0) {
+ int const resampled_frames = swr_convert (
+ _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) audio->data(), audio->frames()
+ );
+
+ if (resampled_frames < 0) {
throw EncodeError ("could not run sample-rate converter");
}
+ resampled->set_frames (resampled_frames);
+
/* And point our variables at the resampled audio */
- data = resampled;
- frames = resampled_frames;
+ audio = resampled;
}
#endif
- write_audio (data, frames);
-
-#if HAVE_SWRESAMPLE
- if (_swr_context) {
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- delete[] resampled[i];
- }
- }
-#endif
+ write_audio (audio);
}
void
-J2KWAVEncoder::write_audio (float** data, int frames)
+J2KWAVEncoder::write_audio (shared_ptr<const AudioBuffers> audio)
{
for (int i = 0; i < _fs->audio_channels(); ++i) {
- sf_write_float (_sound_files[i], data[i], frames);
+ sf_write_float (_sound_files[i], audio->data(i), audio->frames());
}
+
+ _audio_frames_written += audio->frames ();
}