#include "audio_buffers.h"
#include "exceptions.h"
#include "log.h"
+#include "resampler.h"
+#include "util.h"
+#include "film.h"
#include "i18n.h"
using std::stringstream;
using std::list;
using std::pair;
+using std::cout;
using boost::optional;
using boost::shared_ptr;
-AudioDecoder::AudioDecoder (shared_ptr<const Film> f, shared_ptr<const AudioContent> c)
- : Decoder (f)
- , _next_audio (0)
- , _audio_content (c)
+AudioDecoder::AudioDecoder (shared_ptr<const Film> film, shared_ptr<const AudioContent> content)
+ : Decoder (film)
+ , _audio_content (content)
+ , _audio_position (0)
{
- if (_audio_content->content_audio_frame_rate() != _audio_content->output_audio_frame_rate()) {
-
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
-
- stringstream s;
- s << String::compose (
- "Will resample audio from %1 to %2",
- _audio_content->content_audio_frame_rate(), _audio_content->output_audio_frame_rate()
- );
-
- film->log()->log (s.str ());
-
- /* We will be using planar float data when we call the
- resampler. As far as I can see, the audio channel
- layout is not necessary for our purposes; it seems
- only to be used get the number of channels and
- decide if rematrixing is needed. It won't be, since
- input and output layouts are the same.
- */
-
- _swr_context = swr_alloc_set_opts (
- 0,
- av_get_default_channel_layout (MAX_AUDIO_CHANNELS),
- AV_SAMPLE_FMT_FLTP,
- _audio_content->output_audio_frame_rate(),
- av_get_default_channel_layout (MAX_AUDIO_CHANNELS),
- AV_SAMPLE_FMT_FLTP,
- _audio_content->content_audio_frame_rate(),
- 0, 0
- );
-
- swr_init (_swr_context);
- } else {
- _swr_context = 0;
+ if (content->output_audio_frame_rate() != content->content_audio_frame_rate() && content->audio_channels ()) {
+ _resampler.reset (new Resampler (content->content_audio_frame_rate(), content->output_audio_frame_rate(), content->audio_channels ()));
}
}
-AudioDecoder::~AudioDecoder ()
+/** Audio timestamping is made hard by many factors, but the final nail in the coffin is resampling.
+ * We have to assume that we are feeding continuous data into the resampler, and so we get continuous
+ * data out. Hence we do the timestamping here, post-resampler, just by counting samples.
+ */
+void
+AudioDecoder::audio (shared_ptr<const AudioBuffers> data)
{
- if (_swr_context) {
- swr_free (&_swr_context);
+ if (_resampler) {
+ data = _resampler->run (data);
}
-}
+ _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (data, _audio_position)));
+ _audio_position += data->frames ();
+}
-#if 0
void
-AudioDecoder::process_end ()
+AudioDecoder::flush ()
{
- if (_swr_context) {
-
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
-
- shared_ptr<AudioBuffers> out (new AudioBuffers (film->audio_mapping().dcp_channels(), 256));
-
- while (1) {
- int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
-
- if (frames < 0) {
- throw EncodeError (_("could not run sample-rate converter"));
- }
-
- if (frames == 0) {
- break;
- }
-
- out->set_frames (frames);
- _writer->write (out);
- }
+ if (!_resampler) {
+ return;
+ }
+ shared_ptr<const AudioBuffers> b = _resampler->flush ();
+ if (b) {
+ _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (b, _audio_position)));
+ _audio_position += b->frames ();
}
}
-#endif
void
-AudioDecoder::audio (shared_ptr<const AudioBuffers> data, Time time)
+AudioDecoder::seek (ContentTime t, bool)
{
- /* Maybe resample */
- if (_swr_context) {
-
- /* Compute the resampled frames count and add 32 for luck */
- int const max_resampled_frames = ceil (
- (int64_t) data->frames() * _audio_content->output_audio_frame_rate() / _audio_content->content_audio_frame_rate()
- ) + 32;
-
- shared_ptr<AudioBuffers> resampled (new AudioBuffers (data->channels(), max_resampled_frames));
-
- /* Resample audio */
- int const resampled_frames = swr_convert (
- _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) data->data(), data->frames()
- );
-
- if (resampled_frames < 0) {
- throw EncodeError (_("could not run sample-rate converter"));
- }
-
- resampled->set_frames (resampled_frames);
-
- /* And point our variables at the resampled audio */
- data = resampled;
- }
-
shared_ptr<const Film> film = _film.lock ();
assert (film);
- /* Remap channels */
- shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (film->dcp_audio_channels(), data->frames()));
- dcp_mapped->make_silent ();
- list<pair<int, libdcp::Channel> > map = _audio_content->audio_mapping().content_to_dcp ();
- for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
- dcp_mapped->accumulate_channel (data.get(), i->first, i->second);
- }
-
- Audio (dcp_mapped, time);
- _next_audio = time + film->audio_frames_to_time (data->frames());
+ FrameRateChange frc = film->active_frame_rate_change (_audio_content->position ());
+ _audio_position = (t + first_audio()) / frc.speed_up;
}
-
-