in a similar way to the V1 patch.
a bit ridiculous when most of the decoders have slightly clunky seek
and pass methods.
+
\section{Multiple streams}
Another thing unique to FFmpeg is multiple audio streams, possibly at
These disadvantages suggest that the first approach is better.
+One might think that the logical conclusion is to take streams all the
+way back to the player and resample them there, but the resampling
+must occur on the other side of the get-stuff-at-time API.
+
\end{document}
shift;
valgrind --tool="memcheck" --leak-check=full build/test/unit-tests $*
else
- build/test/unit-tests --catch_system_errors=no $*
+ build/test/unit-tests --catch_system_errors=no --log_level=test_suite $*
fi
/*
- Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "exceptions.h"
#include "config.h"
#include "frame_rate_change.h"
-#include "audio_processor.h"
#include "raw_convert.h"
#include <libcxml/cxml.h>
+#include <boost/foreach.hpp>
#include "i18n.h"
using std::setprecision;
using boost::shared_ptr;
using boost::dynamic_pointer_cast;
+using boost::optional;
-int const AudioContentProperty::AUDIO_CHANNELS = 200;
-int const AudioContentProperty::AUDIO_FRAME_RATE = 201;
-int const AudioContentProperty::AUDIO_GAIN = 202;
-int const AudioContentProperty::AUDIO_DELAY = 203;
-int const AudioContentProperty::AUDIO_MAPPING = 204;
-int const AudioContentProperty::AUDIO_PROCESSOR = 205;
+/** Something stream-related has changed */
+int const AudioContentProperty::AUDIO_STREAMS = 200;
+int const AudioContentProperty::AUDIO_GAIN = 201;
+int const AudioContentProperty::AUDIO_DELAY = 202;
AudioContent::AudioContent (shared_ptr<const Film> f)
: Content (f)
, _audio_gain (0)
, _audio_delay (Config::instance()->default_audio_delay ())
- , _audio_processor (0)
{
}
: Content (f, s)
, _audio_gain (0)
, _audio_delay (Config::instance()->default_audio_delay ())
- , _audio_processor (0)
{
}
: Content (f, p)
, _audio_gain (0)
, _audio_delay (Config::instance()->default_audio_delay ())
- , _audio_processor (0)
{
}
AudioContent::AudioContent (shared_ptr<const Film> f, cxml::ConstNodePtr node)
: Content (f, node)
- , _audio_processor (0)
{
_audio_gain = node->number_child<float> ("AudioGain");
_audio_delay = node->number_child<int> ("AudioDelay");
- if (node->optional_string_child ("AudioProcessor")) {
- _audio_processor = AudioProcessor::from_id (node->string_child ("AudioProcessor"));
- }
}
AudioContent::AudioContent (shared_ptr<const Film> f, vector<shared_ptr<Content> > c)
_audio_gain = ref->audio_gain ();
_audio_delay = ref->audio_delay ();
- _audio_processor = ref->audio_processor ();
}
void
boost::mutex::scoped_lock lm (_mutex);
node->add_child("AudioGain")->add_child_text (raw_convert<string> (_audio_gain));
node->add_child("AudioDelay")->add_child_text (raw_convert<string> (_audio_delay));
- if (_audio_processor) {
- node->add_child("AudioProcessor")->add_child_text (_audio_processor->id ());
- }
}
signal_changed (AudioContentProperty::AUDIO_DELAY);
}
-void
-AudioContent::set_audio_processor (AudioProcessor const * p)
-{
- {
- boost::mutex::scoped_lock lm (_mutex);
- _audio_processor = p;
- }
-
- /* The channel count might have changed, so reset the mapping */
- AudioMapping m (processed_audio_channels ());
- m.make_default ();
- set_audio_mapping (m);
-
- signal_changed (AudioContentProperty::AUDIO_PROCESSOR);
-}
-
boost::signals2::connection
AudioContent::analyse_audio (boost::function<void()> finished)
{
string
AudioContent::technical_summary () const
{
- return String::compose (
- "audio: channels %1, content rate %2, resampled rate %3",
- audio_channels(),
- audio_frame_rate(),
- resampled_audio_frame_rate()
- );
+ string s = "audio :";
+ BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) {
+ s += String::compose ("stream channels %1 rate %2", i->channels(), i->frame_rate());
+ }
+
+ return s;
}
void
-AudioContent::set_audio_mapping (AudioMapping)
+AudioContent::set_audio_mapping (AudioMapping mapping)
+{
+ int c = 0;
+ BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) {
+ AudioMapping stream_mapping (i->channels ());
+ for (int j = 0; j < i->channels(); ++j) {
+ for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) {
+ stream_mapping.set (j, static_cast<dcp::Channel> (k), mapping.get (c, static_cast<dcp::Channel> (k)));
+ }
+ ++c;
+ }
+ i->set_mapping (stream_mapping);
+ }
+
+ signal_changed (AudioContentProperty::AUDIO_STREAMS);
+}
+
+AudioMapping
+AudioContent::audio_mapping () const
{
- signal_changed (AudioContentProperty::AUDIO_MAPPING);
+ int channels = 0;
+ BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) {
+ channels += i->channels ();
+ }
+
+ AudioMapping merged (channels);
+
+ int c = 0;
+ int s = 0;
+ BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) {
+ AudioMapping mapping = i->mapping ();
+ for (int j = 0; j < mapping.content_channels(); ++j) {
+ merged.set_name (c, String::compose ("%1:%2", s + 1, j + 1));
+ for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) {
+ merged.set (c, static_cast<dcp::Channel> (k), mapping.get (j, static_cast<dcp::Channel> (k)));
+ }
+ ++c;
+ }
+ ++s;
+ }
+
+ return merged;
}
/** @return the frame rate that this content should be resampled to in order
DCPOMATIC_ASSERT (film);
/* Resample to a DCI-approved sample rate */
- double t = dcp_audio_frame_rate (audio_frame_rate ());
+ double t = has_rate_above_48k() ? 96000 : 48000;
FrameRateChange frc = film->active_frame_rate_change (position ());
return rint (t);
}
-int
-AudioContent::processed_audio_channels () const
+string
+AudioContent::processing_description () const
{
- if (!audio_processor ()) {
- return audio_channels ();
+ vector<AudioStreamPtr> streams = audio_streams ();
+ if (streams.empty ()) {
+ return "";
+ }
+
+ /* Possible answers are:
+ 1. all audio will be resampled from x to y.
+ 2. all audio will be resampled to y (from a variety of rates)
+ 3. some audio will be resampled to y (from a variety of rates)
+ 4. nothing will be resampled.
+ */
+
+ bool not_resampled = false;
+ bool resampled = false;
+ bool same = true;
+
+ optional<int> common_frame_rate;
+ BOOST_FOREACH (AudioStreamPtr i, streams) {
+ if (i->frame_rate() != resampled_audio_frame_rate()) {
+ resampled = true;
+ } else {
+ not_resampled = true;
+ }
+
+ if (common_frame_rate && common_frame_rate != i->frame_rate ()) {
+ same = false;
+ }
+ common_frame_rate = i->frame_rate ();
}
- return audio_processor()->out_channels (audio_channels ());
+ if (not_resampled && !resampled) {
+ return _("Audio will not be resampled");
+ }
+
+ if (not_resampled && resampled) {
+ return String::compose (_("Some audio will be resampled to %1kHz"), resampled_audio_frame_rate ());
+ }
+
+ if (!not_resampled && resampled) {
+ if (same) {
+ return String::compose (_("Audio will be resampled from %1kHz to %2kHz"), common_frame_rate.get(), resampled_audio_frame_rate ());
+ } else {
+ return String::compose (_("Audio will be resampled to %1kHz"), resampled_audio_frame_rate ());
+ }
+ }
+
+ return "";
}
-string
-AudioContent::processing_description () const
+bool
+AudioContent::has_rate_above_48k () const
{
- stringstream d;
-
- if (audio_frame_rate() != resampled_audio_frame_rate ()) {
- stringstream from;
- from << fixed << setprecision(3) << (audio_frame_rate() / 1000.0);
- stringstream to;
- to << fixed << setprecision(3) << (resampled_audio_frame_rate() / 1000.0);
-
- d << String::compose (_("Audio will be resampled from %1kHz to %2kHz."), from.str(), to.str());
- } else {
- d << _("Audio will not be resampled.");
+ BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) {
+ if (i->frame_rate() > 48000) {
+ return true;
+ }
}
- return d.str ();
+ return false;
}
-
/*
- Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#define DCPOMATIC_AUDIO_CONTENT_H
#include "content.h"
+#include "audio_stream.h"
#include "audio_mapping.h"
namespace cxml {
class Node;
}
-class AudioProcessor;
-
/** @class AudioContentProperty
* @brief Names for properties of AudioContent.
*/
class AudioContentProperty
{
public:
- static int const AUDIO_CHANNELS;
- static int const AUDIO_FRAME_RATE;
+ static int const AUDIO_STREAMS;
static int const AUDIO_GAIN;
static int const AUDIO_DELAY;
- static int const AUDIO_MAPPING;
- static int const AUDIO_PROCESSOR;
};
/** @class AudioContent
void as_xml (xmlpp::Node *) const;
std::string technical_summary () const;
- /** @return number of audio channels in the content */
- virtual int audio_channels () const = 0;
- /** @return the frame rate of the content */
- virtual int audio_frame_rate () const = 0;
- virtual AudioMapping audio_mapping () const = 0;
- virtual void set_audio_mapping (AudioMapping);
- virtual boost::filesystem::path audio_analysis_path () const;
+ virtual std::vector<AudioStreamPtr> audio_streams () const = 0;
+ AudioMapping audio_mapping () const;
+ void set_audio_mapping (AudioMapping);
+ boost::filesystem::path audio_analysis_path () const;
int resampled_audio_frame_rate () const;
- int processed_audio_channels () const;
+ bool has_rate_above_48k () const;
boost::signals2::connection analyse_audio (boost::function<void()>);
void set_audio_gain (double);
void set_audio_delay (int);
- void set_audio_processor (AudioProcessor const *);
double audio_gain () const {
boost::mutex::scoped_lock lm (_mutex);
return _audio_delay;
}
- AudioProcessor const * audio_processor () const {
- boost::mutex::scoped_lock lm (_mutex);
- return _audio_processor;
- }
-
std::string processing_description () const;
private:
double _audio_gain;
/** Delay to apply to audio (positive moves audio later) in milliseconds */
int _audio_delay;
- AudioProcessor const * _audio_processor;
};
#endif
#include "audio_decoder.h"
#include "audio_buffers.h"
-#include "audio_processor.h"
-#include "resampler.h"
-#include "util.h"
+#include "audio_decoder_stream.h"
+#include <boost/foreach.hpp>
#include <iostream>
#include "i18n.h"
-using std::list;
-using std::pair;
using std::cout;
-using std::min;
-using std::max;
-using boost::optional;
+using std::map;
using boost::shared_ptr;
AudioDecoder::AudioDecoder (shared_ptr<const AudioContent> content)
: _audio_content (content)
{
- if (content->resampled_audio_frame_rate() != content->audio_frame_rate() && content->audio_channels ()) {
- _resampler.reset (new Resampler (content->audio_frame_rate(), content->resampled_audio_frame_rate(), content->audio_channels ()));
+ BOOST_FOREACH (AudioStreamPtr i, content->audio_streams ()) {
+ _streams[i] = shared_ptr<AudioDecoderStream> (new AudioDecoderStream (_audio_content, i, this));
}
-
- if (content->audio_processor ()) {
- _processor = content->audio_processor()->clone (content->resampled_audio_frame_rate ());
- }
-
- reset_decoded_audio ();
-}
-
-void
-AudioDecoder::reset_decoded_audio ()
-{
- _decoded_audio = ContentAudio (shared_ptr<AudioBuffers> (new AudioBuffers (_audio_content->processed_audio_channels(), 0)), 0);
}
-shared_ptr<ContentAudio>
-AudioDecoder::get_audio (Frame frame, Frame length, bool accurate)
+ContentAudio
+AudioDecoder::get_audio (AudioStreamPtr stream, Frame frame, Frame length, bool accurate)
{
- shared_ptr<ContentAudio> dec;
-
- Frame const end = frame + length - 1;
-
- if (frame < _decoded_audio.frame || end > (_decoded_audio.frame + length * 4)) {
- /* Either we have no decoded data, or what we do have is a long way from what we want: seek */
- seek (ContentTime::from_frames (frame, _audio_content->resampled_audio_frame_rate()), accurate);
- }
-
- /* Offset of the data that we want from the start of _decoded_audio.audio
- (to be set up shortly)
- */
- Frame decoded_offset = 0;
-
- /* Now enough pass() calls will either:
- * (a) give us what we want, or
- * (b) hit the end of the decoder.
- *
- * If we are being accurate, we want the right frames,
- * otherwise any frames will do.
- */
- if (accurate) {
- /* Keep stuffing data into _decoded_audio until we have enough data, or the subclass does not want to give us any more */
- while ((_decoded_audio.frame > frame || (_decoded_audio.frame + _decoded_audio.audio->frames()) < end) && !pass (PASS_REASON_AUDIO)) {}
- decoded_offset = frame - _decoded_audio.frame;
- } else {
- while (_decoded_audio.audio->frames() < length && !pass (PASS_REASON_AUDIO)) {}
- /* Use decoded_offset of 0, as we don't really care what frames we return */
- }
-
- /* The amount of data available in _decoded_audio.audio starting from `frame'. This could be -ve
- if pass() returned true before we got enough data.
- */
- Frame const available = _decoded_audio.audio->frames() - decoded_offset;
-
- /* We will return either that, or the requested amount, whichever is smaller */
- Frame const to_return = max ((Frame) 0, min (available, length));
-
- /* Copy our data to the output */
- shared_ptr<AudioBuffers> out (new AudioBuffers (_decoded_audio.audio->channels(), to_return));
- out->copy_from (_decoded_audio.audio.get(), to_return, decoded_offset, 0);
-
- Frame const remaining = max ((Frame) 0, available - to_return);
-
- /* Clean up decoded; first, move the data after what we just returned to the start of the buffer */
- _decoded_audio.audio->move (decoded_offset + to_return, 0, remaining);
- /* And set up the number of frames we have left */
- _decoded_audio.audio->set_frames (remaining);
- /* Also bump where those frames are in terms of the content */
- _decoded_audio.frame += decoded_offset + to_return;
-
- return shared_ptr<ContentAudio> (new ContentAudio (out, frame));
+ return _streams[stream]->get (frame, length, accurate);
}
-/** Called by subclasses when audio data is ready.
- *
- * Audio timestamping is made hard by many factors, but perhaps the most entertaining is resampling.
- * We have to assume that we are feeding continuous data into the resampler, and so we get continuous
- * data out. Hence we do the timestamping here, post-resampler, just by counting samples.
- *
- * The time is passed in here so that after a seek we can set up our _audio_position. The
- * time is ignored once this has been done.
- */
void
-AudioDecoder::audio (shared_ptr<const AudioBuffers> data, ContentTime time)
+AudioDecoder::audio (AudioStreamPtr stream, shared_ptr<const AudioBuffers> data, ContentTime time)
{
- if (_resampler) {
- data = _resampler->run (data);
- }
-
- if (_processor) {
- data = _processor->run (data);
- }
-
- Frame const frame_rate = _audio_content->resampled_audio_frame_rate ();
-
- if (_seek_reference) {
- /* We've had an accurate seek and now we're seeing some data */
- ContentTime const delta = time - _seek_reference.get ();
- Frame const delta_frames = delta.frames (frame_rate);
- if (delta_frames > 0) {
- /* This data comes after the seek time. Pad the data with some silence. */
- shared_ptr<AudioBuffers> padded (new AudioBuffers (data->channels(), data->frames() + delta_frames));
- padded->make_silent ();
- padded->copy_from (data.get(), data->frames(), 0, delta_frames);
- data = padded;
- time -= delta;
- } else if (delta_frames < 0) {
- /* This data comes before the seek time. Throw some data away */
- Frame const to_discard = min (-delta_frames, static_cast<Frame> (data->frames()));
- Frame const to_keep = data->frames() - to_discard;
- if (to_keep == 0) {
- /* We have to throw all this data away, so keep _seek_reference and
- try again next time some data arrives.
- */
- return;
- }
- shared_ptr<AudioBuffers> trimmed (new AudioBuffers (data->channels(), to_keep));
- trimmed->copy_from (data.get(), to_keep, to_discard, 0);
- data = trimmed;
- time += ContentTime::from_frames (to_discard, frame_rate);
- }
- _seek_reference = optional<ContentTime> ();
- }
-
- if (!_audio_position) {
- _audio_position = time.frames (frame_rate);
- }
-
- DCPOMATIC_ASSERT (_audio_position.get() >= (_decoded_audio.frame + _decoded_audio.audio->frames()));
-
- add (data);
-}
-
-void
-AudioDecoder::add (shared_ptr<const AudioBuffers> data)
-{
- if (!_audio_position) {
- /* This should only happen when there is a seek followed by a flush, but
- we need to cope with it.
- */
- return;
- }
-
- /* Resize _decoded_audio to fit the new data */
- int new_size = 0;
- if (_decoded_audio.audio->frames() == 0) {
- /* There's nothing in there, so just store the new data */
- new_size = data->frames ();
- _decoded_audio.frame = _audio_position.get ();
- } else {
- /* Otherwise we need to extend _decoded_audio to include the new stuff */
- new_size = _audio_position.get() + data->frames() - _decoded_audio.frame;
- }
-
- _decoded_audio.audio->ensure_size (new_size);
- _decoded_audio.audio->set_frames (new_size);
-
- /* Copy new data in */
- _decoded_audio.audio->copy_from (data.get(), data->frames(), 0, _audio_position.get() - _decoded_audio.frame);
- _audio_position = _audio_position.get() + data->frames ();
-
- /* Limit the amount of data we keep in case nobody is asking for it */
- int const max_frames = _audio_content->resampled_audio_frame_rate () * 10;
- if (_decoded_audio.audio->frames() > max_frames) {
- int const to_remove = _decoded_audio.audio->frames() - max_frames;
- _decoded_audio.frame += to_remove;
- _decoded_audio.audio->move (to_remove, 0, max_frames);
- _decoded_audio.audio->set_frames (max_frames);
- }
+ _streams[stream]->audio (data, time);
}
void
AudioDecoder::flush ()
{
- if (!_resampler) {
- return;
- }
-
- shared_ptr<const AudioBuffers> b = _resampler->flush ();
- if (b) {
- add (b);
+ for (map<AudioStreamPtr, shared_ptr<AudioDecoderStream> >::const_iterator i = _streams.begin(); i != _streams.end(); ++i) {
+ i->second->flush ();
}
}
void
AudioDecoder::seek (ContentTime t, bool accurate)
{
- _audio_position.reset ();
- reset_decoded_audio ();
- if (accurate) {
- _seek_reference = t;
- }
- if (_processor) {
- _processor->flush ();
+ for (map<AudioStreamPtr, shared_ptr<AudioDecoderStream> >::const_iterator i = _streams.begin(); i != _streams.end(); ++i) {
+ i->second->seek (t, accurate);
}
}
#include "content.h"
#include "audio_content.h"
#include "content_audio.h"
+#include <boost/enable_shared_from_this.hpp>
class AudioBuffers;
-class Resampler;
+class AudioDecoderStream;
/** @class AudioDecoder.
* @brief Parent class for audio decoders.
*/
-class AudioDecoder : public virtual Decoder
+class AudioDecoder : public virtual Decoder, public boost::enable_shared_from_this<AudioDecoder>
{
public:
AudioDecoder (boost::shared_ptr<const AudioContent>);
* @param accurate true to try hard to return frames from exactly `frame', false if we don't mind nearby frames.
* @return Time-stamped audio data which may or may not be from the location (and of the length) requested.
*/
- boost::shared_ptr<ContentAudio> get_audio (Frame time, Frame length, bool accurate);
-
-protected:
+ ContentAudio get_audio (AudioStreamPtr stream, Frame time, Frame length, bool accurate);
- void seek (ContentTime time, bool accurate);
- void audio (boost::shared_ptr<const AudioBuffers>, ContentTime);
+protected:
+ void audio (AudioStreamPtr stream, boost::shared_ptr<const AudioBuffers>, ContentTime);
void flush ();
- void reset_decoded_audio ();
- void add (boost::shared_ptr<const AudioBuffers>);
-
+ void seek (ContentTime t, bool accurate);
+
+private:
boost::shared_ptr<const AudioContent> _audio_content;
- boost::shared_ptr<Resampler> _resampler;
- boost::shared_ptr<AudioProcessor> _processor;
- boost::optional<Frame> _audio_position;
- /** Currently-available decoded audio data */
- ContentAudio _decoded_audio;
- /** The time of an accurate seek after which we have not yet received any actual
- data at the seek time.
- */
- boost::optional<ContentTime> _seek_reference;
+ /** An AudioDecoderStream object to manage each stream in _audio_content */
+ std::map<AudioStreamPtr, boost::shared_ptr<AudioDecoderStream> > _streams;
};
#endif
--- /dev/null
+/*
+ Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "audio_decoder_stream.h"
+#include "audio_buffers.h"
+#include "audio_processor.h"
+#include "audio_decoder.h"
+#include "resampler.h"
+#include "util.h"
+#include <iostream>
+
+#include "i18n.h"
+
+using std::list;
+using std::pair;
+using std::cout;
+using std::min;
+using std::max;
+using boost::optional;
+using boost::shared_ptr;
+
+AudioDecoderStream::AudioDecoderStream (shared_ptr<const AudioContent> content, AudioStreamPtr stream, AudioDecoder* decoder)
+ : _content (content)
+ , _stream (stream)
+ , _decoder (decoder)
+{
+ if (content->resampled_audio_frame_rate() != _stream->frame_rate()) {
+ _resampler.reset (new Resampler (_stream->frame_rate(), content->resampled_audio_frame_rate(), _stream->channels ()));
+ }
+
+ reset_decoded ();
+}
+
+void
+AudioDecoderStream::reset_decoded ()
+{
+ _decoded = ContentAudio (shared_ptr<AudioBuffers> (new AudioBuffers (_stream->channels(), 0)), 0);
+}
+
+ContentAudio
+AudioDecoderStream::get (Frame frame, Frame length, bool accurate)
+{
+ shared_ptr<ContentAudio> dec;
+
+ Frame const end = frame + length - 1;
+
+ if (frame < _decoded.frame || end > (_decoded.frame + length * 4)) {
+ /* Either we have no decoded data, or what we do have is a long way from what we want: seek */
+ seek (ContentTime::from_frames (frame, _content->resampled_audio_frame_rate()), accurate);
+ }
+
+ /* Offset of the data that we want from the start of _decoded.audio
+ (to be set up shortly)
+ */
+ Frame decoded_offset = 0;
+
+ /* Now enough pass() calls will either:
+ * (a) give us what we want, or
+ * (b) hit the end of the decoder.
+ *
+ * If we are being accurate, we want the right frames,
+ * otherwise any frames will do.
+ */
+ if (accurate) {
+ /* Keep stuffing data into _decoded until we have enough data, or the subclass does not want to give us any more */
+ while (
+ (_decoded.frame > frame || (_decoded.frame + _decoded.audio->frames()) < end) &&
+ !_decoder->pass (Decoder::PASS_REASON_AUDIO)
+ )
+ {}
+
+ decoded_offset = frame - _decoded.frame;
+ } else {
+ while (
+ _decoded.audio->frames() < length &&
+ !_decoder->pass (Decoder::PASS_REASON_AUDIO)
+ )
+ {}
+
+ /* Use decoded_offset of 0, as we don't really care what frames we return */
+ }
+
+ /* The amount of data available in _decoded.audio starting from `frame'. This could be -ve
+ if pass() returned true before we got enough data.
+ */
+ Frame const available = _decoded.audio->frames() - decoded_offset;
+
+ /* We will return either that, or the requested amount, whichever is smaller */
+ Frame const to_return = max ((Frame) 0, min (available, length));
+
+ /* Copy our data to the output */
+ shared_ptr<AudioBuffers> out (new AudioBuffers (_decoded.audio->channels(), to_return));
+ out->copy_from (_decoded.audio.get(), to_return, decoded_offset, 0);
+
+ Frame const remaining = max ((Frame) 0, available - to_return);
+
+ /* Clean up decoded; first, move the data after what we just returned to the start of the buffer */
+ _decoded.audio->move (decoded_offset + to_return, 0, remaining);
+ /* And set up the number of frames we have left */
+ _decoded.audio->set_frames (remaining);
+ /* Also bump where those frames are in terms of the content */
+ _decoded.frame += decoded_offset + to_return;
+
+ return ContentAudio (out, frame);
+}
+
+/** Audio timestamping is made hard by many factors, but perhaps the most entertaining is resampling.
+ * We have to assume that we are feeding continuous data into the resampler, and so we get continuous
+ * data out. Hence we do the timestamping here, post-resampler, just by counting samples.
+ *
+ * The time is passed in here so that after a seek we can set up our _position. The
+ * time is ignored once this has been done.
+ */
+void
+AudioDecoderStream::audio (shared_ptr<const AudioBuffers> data, ContentTime time)
+{
+ if (_resampler) {
+ data = _resampler->run (data);
+ }
+
+ Frame const frame_rate = _content->resampled_audio_frame_rate ();
+
+ if (_seek_reference) {
+ /* We've had an accurate seek and now we're seeing some data */
+ ContentTime const delta = time - _seek_reference.get ();
+ Frame const delta_frames = delta.frames (frame_rate);
+ if (delta_frames > 0) {
+ /* This data comes after the seek time. Pad the data with some silence. */
+ shared_ptr<AudioBuffers> padded (new AudioBuffers (data->channels(), data->frames() + delta_frames));
+ padded->make_silent ();
+ padded->copy_from (data.get(), data->frames(), 0, delta_frames);
+ data = padded;
+ time -= delta;
+ } else if (delta_frames < 0) {
+ /* This data comes before the seek time. Throw some data away */
+ Frame const to_discard = min (-delta_frames, static_cast<Frame> (data->frames()));
+ Frame const to_keep = data->frames() - to_discard;
+ if (to_keep == 0) {
+ /* We have to throw all this data away, so keep _seek_reference and
+ try again next time some data arrives.
+ */
+ return;
+ }
+ shared_ptr<AudioBuffers> trimmed (new AudioBuffers (data->channels(), to_keep));
+ trimmed->copy_from (data.get(), to_keep, to_discard, 0);
+ data = trimmed;
+ time += ContentTime::from_frames (to_discard, frame_rate);
+ }
+ _seek_reference = optional<ContentTime> ();
+ }
+
+ if (!_position) {
+ _position = time.frames (frame_rate);
+ }
+
+ DCPOMATIC_ASSERT (_position.get() >= (_decoded.frame + _decoded.audio->frames()));
+
+ add (data);
+}
+
+void
+AudioDecoderStream::add (shared_ptr<const AudioBuffers> data)
+{
+ if (!_position) {
+ /* This should only happen when there is a seek followed by a flush, but
+ we need to cope with it.
+ */
+ return;
+ }
+
+ /* Resize _decoded to fit the new data */
+ int new_size = 0;
+ if (_decoded.audio->frames() == 0) {
+ /* There's nothing in there, so just store the new data */
+ new_size = data->frames ();
+ _decoded.frame = _position.get ();
+ } else {
+ /* Otherwise we need to extend _decoded to include the new stuff */
+ new_size = _position.get() + data->frames() - _decoded.frame;
+ }
+
+ _decoded.audio->ensure_size (new_size);
+ _decoded.audio->set_frames (new_size);
+
+ /* Copy new data in */
+ _decoded.audio->copy_from (data.get(), data->frames(), 0, _position.get() - _decoded.frame);
+ _position = _position.get() + data->frames ();
+
+ /* Limit the amount of data we keep in case nobody is asking for it */
+ int const max_frames = _content->resampled_audio_frame_rate () * 10;
+ if (_decoded.audio->frames() > max_frames) {
+ int const to_remove = _decoded.audio->frames() - max_frames;
+ _decoded.frame += to_remove;
+ _decoded.audio->move (to_remove, 0, max_frames);
+ _decoded.audio->set_frames (max_frames);
+ }
+}
+
+void
+AudioDecoderStream::flush ()
+{
+ if (!_resampler) {
+ return;
+ }
+
+ shared_ptr<const AudioBuffers> b = _resampler->flush ();
+ if (b) {
+ add (b);
+ }
+}
+
+void
+AudioDecoderStream::seek (ContentTime t, bool accurate)
+{
+ _position.reset ();
+ reset_decoded ();
+ if (accurate) {
+ _seek_reference = t;
+ }
+}
--- /dev/null
+/*
+ Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef DCPOMATIC_AUDIO_DECODER_STREAM_H
+#define DCPOMATIC_AUDIO_DECODER_STREAM_H
+
+#include "audio_stream.h"
+#include "content_audio.h"
+#include <boost/shared_ptr.hpp>
+
+class AudioContent;
+class AudioDecoder;
+class Resampler;
+
+class AudioDecoderStream
+{
+public:
+ AudioDecoderStream (boost::shared_ptr<const AudioContent>, AudioStreamPtr, AudioDecoder* decoder);
+
+ ContentAudio get (Frame time, Frame length, bool accurate);
+ void audio (boost::shared_ptr<const AudioBuffers>, ContentTime);
+ void flush ();
+ void seek (ContentTime time, bool accurate);
+
+private:
+
+ void reset_decoded ();
+ void add (boost::shared_ptr<const AudioBuffers>);
+
+ boost::shared_ptr<const AudioContent> _content;
+ AudioStreamPtr _stream;
+ AudioDecoder* _decoder;
+ boost::shared_ptr<Resampler> _resampler;
+ boost::optional<Frame> _position;
+ /** Currently-available decoded audio data */
+ ContentAudio _decoded;
+ /** The time of an accurate seek after which we have not yet received any actual
+ data at the seek time.
+ */
+ boost::optional<ContentTime> _seek_reference;
+};
+
+#endif
*/
+#ifndef DCPOMATIC_AUDIO_EXAMINER_H
+#define DCPOMATIC_AUDIO_EXAMINER_H
+
/** @file src/lib/audio_examiner.h
* @brief AudioExaminer class.
*/
+#include "types.h"
+
/** @class AudioExaminer
* @brief Parent for classes which examine AudioContent for their pertinent details.
*/
virtual Frame audio_length () const = 0;
virtual int audio_frame_rate () const = 0;
};
+
+#endif
for (int i = 0; i < _content_channels; ++i) {
_gain[i].resize (MAX_DCP_AUDIO_CHANNELS);
}
+
+ _name.resize (_content_channels);
+
+ make_zero ();
}
void
-AudioMapping::make_default ()
+AudioMapping::make_zero ()
{
for (int i = 0; i < _content_channels; ++i) {
for (int j = 0; j < MAX_DCP_AUDIO_CHANNELS; ++j) {
_gain[i][j] = 0;
}
}
+}
+
+void
+AudioMapping::make_default ()
+{
+ make_zero ();
if (_content_channels == 1) {
/* Mono -> Centre */
}
}
+void
+AudioMapping::set_name (int channel, string name)
+{
+ _name[channel] = name;
+}
return _content_channels;
}
+ void set_name (int channel, std::string name);
+ std::string name (int channel) const {
+ return _name[channel];
+ }
+
std::string digest () const;
std::list<dcp::Channel> mapped_dcp_channels () const;
private:
void setup (int);
+ void make_zero ();
int _content_channels;
std::vector<std::vector<float> > _gain;
+ std::vector<std::string> _name;
};
#endif
--- /dev/null
+/*
+ Copyright (C) 2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "audio_stream.h"
+#include "audio_mapping.h"
+
+AudioStream::AudioStream (int frame_rate, int channels)
+ : _frame_rate (frame_rate)
+{
+ _mapping = AudioMapping (channels);
+}
+
+AudioStream::AudioStream (int frame_rate, AudioMapping mapping)
+ : _frame_rate (frame_rate)
+ , _mapping (mapping)
+{
+
+}
+
+void
+AudioStream::set_mapping (AudioMapping mapping)
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _mapping = mapping;
+}
+
+void
+AudioStream::set_frame_rate (int frame_rate)
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _frame_rate = frame_rate;
+}
+
+int
+AudioStream::channels () const
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ return _mapping.content_channels ();
+}
--- /dev/null
+/*
+ Copyright (C) 2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef DCPOMATIC_AUDIO_STREAM_H
+#define DCPOMATIC_AUDIO_STREAM_H
+
+#include "audio_mapping.h"
+#include <boost/thread/mutex.hpp>
+
+class audio_sampling_rate_test;
+
+class AudioStream
+{
+public:
+ AudioStream (int frame_rate, int channels);
+ AudioStream (int frame_rate, AudioMapping mapping);
+
+ void set_mapping (AudioMapping mapping);
+ void set_frame_rate (int frame_rate);
+
+ AudioMapping const & mapping () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _mapping;
+ }
+
+ AudioMapping & mapping () {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _mapping;
+ }
+
+ int frame_rate () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _frame_rate;
+ }
+
+ int channels () const;
+
+protected:
+ mutable boost::mutex _mutex;
+
+private:
+ friend class audio_sampling_rate_test;
+
+ int _frame_rate;
+ AudioMapping _mapping;
+};
+
+typedef boost::shared_ptr<AudioStream> AudioStreamPtr;
+
+#endif
vector<boost::filesystem::path> p = _paths;
lm.unlock ();
- /* Some content files are very big, so we use a poor's
+ /* Some content files are very big, so we use a poor man's
digest here: a MD5 of the first and last 1e6 bytes with the
size of the first file tacked on the end as a string.
*/
*/
+#ifndef DCPOMATIC_CONTENT_AUDIO_H
+#define DCPOMATIC_CONTENT_AUDIO_H
+
/** @file src/lib/content_audio.h
* @brief ContentAudio class.
*/
#include "audio_buffers.h"
+#include "types.h"
/** @class ContentAudio
* @brief A block of audio from a piece of content, with a timestamp as a frame within that content.
boost::shared_ptr<AudioBuffers> audio;
Frame frame;
};
+
+#endif
take_from_video_examiner (examiner);
take_from_audio_examiner (examiner);
- boost::mutex::scoped_lock lm (_mutex);
- _name = examiner->name ();
- _has_subtitles = examiner->has_subtitles ();
- _encrypted = examiner->encrypted ();
- _kdm_valid = examiner->kdm_valid ();
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _name = examiner->name ();
+ _has_subtitles = examiner->has_subtitles ();
+ _encrypted = examiner->encrypted ();
+ _kdm_valid = examiner->kdm_valid ();
+ }
if (could_be_played != can_be_played ()) {
signal_changed (DCPContentProperty::CAN_BE_PLAYED);
bool
DCPContent::can_be_played () const
{
+ boost::mutex::scoped_lock lm (_mutex);
return !_encrypted || _kdm_valid;
}
shared_ptr<const dcp::SoundFrame> sf = (*_reel)->main_sound()->mxf()->get_frame (entry_point + frame);
uint8_t const * from = sf->data ();
- int const channels = _dcp_content->audio_channels ();
+ int const channels = _dcp_content->audio_stream()->channels ();
int const frames = sf->size() / (3 * channels);
shared_ptr<AudioBuffers> data (new AudioBuffers (channels, frames));
for (int i = 0; i < frames; ++i) {
}
}
- audio (data, _next);
+ audio (_dcp_content->audio_stream(), data, _next);
}
/* XXX: subtitle */
return Time<S, O> (1);
}
+ static Time<S, O> min () {
+ return Time<S, O> (-INT64_MAX);
+ }
+
static Time<S, O> max () {
return Time<S, O> (INT64_MAX);
}
virtual ~Decoder () {}
protected:
-
+ friend class AudioDecoderStream;
+
/** Seek so that the next pass() will yield the next thing
* (video/sound frame, subtitle etc.) at or after the requested
* time. Pass accurate = true to try harder to ensure that, at worst,
return _format_context->streams[_video_stream]->codec;
}
-AVCodecContext *
-FFmpeg::audio_codec_context () const
-{
- if (!_ffmpeg_content->audio_stream ()) {
- return 0;
- }
-
- return _ffmpeg_content->audio_stream()->stream(_format_context)->codec;
-}
-
AVCodecContext *
FFmpeg::subtitle_codec_context () const
{
protected:
AVCodecContext* video_codec_context () const;
- AVCodecContext* audio_codec_context () const;
AVCodecContext* subtitle_codec_context () const;
boost::shared_ptr<const FFmpegContent> _ffmpeg_content;
FFmpegAudioStream::FFmpegAudioStream (cxml::ConstNodePtr node, int version)
: FFmpegStream (node)
- , _frame_rate (node->number_child<int> ("FrameRate"))
- , _channels (node->number_child<int64_t> ("Channels"))
- , _mapping (node->node_child ("Mapping"), version)
+ , AudioStream (node->number_child<int> ("FrameRate"), AudioMapping (node->node_child ("Mapping"), version))
{
first_audio = node->optional_number_child<double> ("FirstAudio");
}
FFmpegAudioStream::as_xml (xmlpp::Node* root) const
{
FFmpegStream::as_xml (root);
- root->add_child("FrameRate")->add_child_text (raw_convert<string> (_frame_rate));
- root->add_child("Channels")->add_child_text (raw_convert<string> (_channels));
+ root->add_child("FrameRate")->add_child_text (raw_convert<string> (frame_rate ()));
+ mapping().as_xml (root->add_child("Mapping"));
if (first_audio) {
- root->add_child("FirstAudio")->add_child_text (raw_convert<string> (first_audio.get().get()));
+ root->add_child("FirstAudio")->add_child_text (raw_convert<string> (first_audio.get ()));
}
- _mapping.as_xml (root->add_child("Mapping"));
}
#include "ffmpeg_stream.h"
#include "audio_mapping.h"
+#include "audio_stream.h"
#include "dcpomatic_time.h"
struct ffmpeg_pts_offset_test;
-class FFmpegAudioStream : public FFmpegStream
+class FFmpegAudioStream : public FFmpegStream, public AudioStream
{
public:
- FFmpegAudioStream (std::string n, int i, int f, int c)
- : FFmpegStream (n, i)
- , _frame_rate (f)
- , _channels (c)
- , _mapping (c)
- {
- _mapping.make_default ();
- }
+ FFmpegAudioStream (std::string name, int id, int frame_rate, int channels)
+ : FFmpegStream (name, id)
+ , AudioStream (frame_rate, channels)
+ {}
FFmpegAudioStream (cxml::ConstNodePtr, int);
void as_xml (xmlpp::Node *) const;
- int frame_rate () const {
- return _frame_rate;
- }
-
- int channels () const {
- return _channels;
- }
-
- AudioMapping mapping () const {
- return _mapping;
- }
-
- void set_mapping (AudioMapping m) {
- _mapping = m;
- }
+ /* XXX: should probably be locked */
boost::optional<ContentTime> first_audio;
/* Constructor for tests */
FFmpegAudioStream ()
: FFmpegStream ("", 0)
- , _frame_rate (0)
- , _channels (0)
- , _mapping (1)
+ , AudioStream (0, 0)
{}
-
- int _frame_rate;
- int _channels;
- AudioMapping _mapping;
};
extern "C" {
#include <libavformat/avformat.h>
}
+#include <boost/foreach.hpp>
#include "i18n.h"
int const FFmpegContentProperty::SUBTITLE_STREAMS = 100;
int const FFmpegContentProperty::SUBTITLE_STREAM = 101;
int const FFmpegContentProperty::AUDIO_STREAMS = 102;
-int const FFmpegContentProperty::AUDIO_STREAM = 103;
-int const FFmpegContentProperty::FILTERS = 104;
+int const FFmpegContentProperty::FILTERS = 103;
FFmpegContent::FFmpegContent (shared_ptr<const Film> f, boost::filesystem::path p)
: Content (f, p)
c = node->node_children ("AudioStream");
for (list<cxml::NodePtr>::const_iterator i = c.begin(); i != c.end(); ++i) {
_audio_streams.push_back (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream (*i, version)));
- if ((*i)->optional_number_child<int> ("Selected")) {
- _audio_stream = _audio_streams.back ();
- }
}
c = node->node_children ("Filter");
if (fc->use_subtitles() && *(fc->_subtitle_stream.get()) != *(ref->_subtitle_stream.get())) {
throw JoinError (_("Content to be joined must use the same subtitle stream."));
}
-
- if (*(fc->_audio_stream.get()) != *(ref->_audio_stream.get())) {
- throw JoinError (_("Content to be joined must use the same audio stream."));
- }
}
_subtitle_streams = ref->subtitle_streams ();
_subtitle_stream = ref->subtitle_stream ();
- _audio_streams = ref->audio_streams ();
- _audio_stream = ref->audio_stream ();
+ _audio_streams = ref->ffmpeg_audio_streams ();
_first_video = ref->_first_video;
}
}
for (vector<shared_ptr<FFmpegAudioStream> >::const_iterator i = _audio_streams.begin(); i != _audio_streams.end(); ++i) {
- xmlpp::Node* t = node->add_child("AudioStream");
- if (_audio_stream && *i == _audio_stream) {
- t->add_child("Selected")->add_child_text("1");
- }
- (*i)->as_xml (t);
+ (*i)->as_xml (node->add_child("AudioStream"));
}
for (vector<Filter const *>::const_iterator i = _filters.begin(); i != _filters.end(); ++i) {
}
_audio_streams = examiner->audio_streams ();
+
if (!_audio_streams.empty ()) {
- _audio_stream = _audio_streams.front ();
+ _audio_streams.front()->mapping().make_default ();
}
_first_video = examiner->first_video ();
signal_changed (FFmpegContentProperty::SUBTITLE_STREAMS);
signal_changed (FFmpegContentProperty::SUBTITLE_STREAM);
signal_changed (FFmpegContentProperty::AUDIO_STREAMS);
- signal_changed (FFmpegContentProperty::AUDIO_STREAM);
- signal_changed (AudioContentProperty::AUDIO_CHANNELS);
}
string
string
FFmpegContent::technical_summary () const
{
- string as = "none";
- if (_audio_stream) {
- as = _audio_stream->technical_summary ();
+ string as = "";
+ BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_audio_streams ()) {
+ as += i->technical_summary () + " " ;
+ }
+
+ if (as.empty ()) {
+ as = "none";
}
string ss = "none";
+ VideoContent::technical_summary() + " - "
+ AudioContent::technical_summary() + " - "
+ String::compose (
- "ffmpeg: audio %1, subtitle %2, filters %3", as, ss, filt
+ "ffmpeg: audio %1 subtitle %2 filters %3", as, ss, filt
);
}
signal_changed (FFmpegContentProperty::SUBTITLE_STREAM);
}
-void
-FFmpegContent::set_audio_stream (shared_ptr<FFmpegAudioStream> s)
-{
- {
- boost::mutex::scoped_lock lm (_mutex);
- _audio_stream = s;
- }
-
- signal_changed (FFmpegContentProperty::AUDIO_STREAM);
-}
-
-int
-FFmpegContent::audio_channels () const
-{
- boost::mutex::scoped_lock lm (_mutex);
-
- if (!_audio_stream) {
- return 0;
- }
-
- return _audio_stream->channels ();
-}
-
-int
-FFmpegContent::audio_frame_rate () const
-{
- boost::mutex::scoped_lock lm (_mutex);
-
- if (!_audio_stream) {
- return 0;
- }
-
- return _audio_stream->frame_rate ();
-}
-
bool
operator== (FFmpegStream const & a, FFmpegStream const & b)
{
return DCPTime::from_frames (rint (video_length_after_3d_combine() * frc.factor()), film->video_frame_rate());
}
-AudioMapping
-FFmpegContent::audio_mapping () const
-{
- boost::mutex::scoped_lock lm (_mutex);
-
- if (!_audio_stream) {
- return AudioMapping ();
- }
-
- return _audio_stream->mapping ();
-}
-
void
FFmpegContent::set_filters (vector<Filter const *> const & filters)
{
signal_changed (FFmpegContentProperty::FILTERS);
}
-void
-FFmpegContent::set_audio_mapping (AudioMapping m)
-{
- audio_stream()->set_mapping (m);
- AudioContent::set_audio_mapping (m);
-}
-
string
FFmpegContent::identifier () const
{
return s.str ();
}
-boost::filesystem::path
-FFmpegContent::audio_analysis_path () const
-{
- shared_ptr<const Film> film = _film.lock ();
- if (!film) {
- return boost::filesystem::path ();
- }
-
- /* We need to include the stream ID in this path so that we get different
- analyses for each stream.
- */
-
- boost::filesystem::path p = AudioContent::audio_analysis_path ();
- if (audio_stream ()) {
- p = p.string() + "_" + audio_stream()->identifier ();
- }
- return p;
-}
-
list<ContentTimePeriod>
FFmpegContent::subtitles_during (ContentTimePeriod period, bool starting) const
{
}
}
-
+vector<AudioStreamPtr>
+FFmpegContent::audio_streams () const
+{
+ boost::mutex::scoped_lock lm (_mutex);
+
+ vector<AudioStreamPtr> s;
+ copy (_audio_streams.begin(), _audio_streams.end(), back_inserter (s));
+ return s;
+}
class FFmpegSubtitleStream;
class FFmpegAudioStream;
struct ffmpeg_pts_offset_test;
+struct audio_sampling_rate_test;
class FFmpegContentProperty : public VideoContentProperty
{
static int const SUBTITLE_STREAMS;
static int const SUBTITLE_STREAM;
static int const AUDIO_STREAMS;
- static int const AUDIO_STREAM;
static int const FILTERS;
};
void set_default_colour_conversion ();
/* AudioContent */
- int audio_channels () const;
- int audio_frame_rate () const;
- AudioMapping audio_mapping () const;
- void set_audio_mapping (AudioMapping);
- boost::filesystem::path audio_analysis_path () const;
+ std::vector<AudioStreamPtr> audio_streams () const;
/* SubtitleContent */
bool has_subtitles () const;
return _subtitle_stream;
}
- std::vector<boost::shared_ptr<FFmpegAudioStream> > audio_streams () const {
+ std::vector<boost::shared_ptr<FFmpegAudioStream> > ffmpeg_audio_streams () const {
boost::mutex::scoped_lock lm (_mutex);
return _audio_streams;
}
-
- boost::shared_ptr<FFmpegAudioStream> audio_stream () const {
- boost::mutex::scoped_lock lm (_mutex);
- return _audio_stream;
- }
std::vector<Filter const *> filters () const {
boost::mutex::scoped_lock lm (_mutex);
}
void set_subtitle_stream (boost::shared_ptr<FFmpegSubtitleStream>);
- void set_audio_stream (boost::shared_ptr<FFmpegAudioStream>);
boost::optional<ContentTime> first_video () const {
boost::mutex::scoped_lock lm (_mutex);
private:
friend struct ffmpeg_pts_offset_test;
+ friend struct audio_sampling_rate_test;
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > _subtitle_streams;
boost::shared_ptr<FFmpegSubtitleStream> _subtitle_stream;
std::vector<boost::shared_ptr<FFmpegAudioStream> > _audio_streams;
- boost::shared_ptr<FFmpegAudioStream> _audio_stream;
boost::optional<ContentTime> _first_video;
/** Video filters that should be used when generating DCPs */
std::vector<Filter const *> _filters;
/*
- Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
* @brief A decoder using FFmpeg to decode content.
*/
-#include <stdexcept>
-#include <vector>
-#include <iomanip>
-#include <iostream>
-#include <stdint.h>
-#include <sndfile.h>
-extern "C" {
-#include <libavcodec/avcodec.h>
-#include <libavformat/avformat.h>
-}
#include "filter.h"
#include "exceptions.h"
#include "image.h"
#include "raw_image_proxy.h"
#include "film.h"
#include "timer.h"
+extern "C" {
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+}
+#include <boost/foreach.hpp>
+#include <stdexcept>
+#include <vector>
+#include <iomanip>
+#include <iostream>
+#include <stdint.h>
+#include <sndfile.h>
#include "i18n.h"
using std::min;
using std::pair;
using std::make_pair;
+using std::max;
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
Then we remove big initial gaps in PTS and we allow our
insertion of black frames to work.
- We will do pts_to_use = pts_from_ffmpeg + pts_offset;
+ We will do:
+ audio_pts_to_use = audio_pts_from_ffmpeg + pts_offset;
+ video_pts_to_use = video_pts_from_ffmpeg + pts_offset;
*/
- bool const have_video = c->first_video();
- bool const have_audio = c->audio_stream () && c->audio_stream()->first_audio;
-
/* First, make one of them start at 0 */
- if (have_audio && have_video) {
- _pts_offset = - min (c->first_video().get(), c->audio_stream()->first_audio.get());
- } else if (have_video) {
- _pts_offset = - c->first_video().get();
- } else if (have_audio) {
- _pts_offset = - c->audio_stream()->first_audio.get();
+ vector<shared_ptr<FFmpegAudioStream> > streams = c->ffmpeg_audio_streams ();
+
+ _pts_offset = ContentTime::min ();
+
+ if (c->first_video ()) {
+ _pts_offset = - c->first_video().get ();
+ }
+
+ BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, streams) {
+ if (i->first_audio) {
+ _pts_offset = max (_pts_offset, - i->first_audio.get ());
+ }
}
/* If _pts_offset is positive we would be pushing things from a -ve PTS to be played.
_pts_offset = ContentTime ();
}
- /* Now adjust both so that the video pts starts on a frame */
- if (have_video && have_audio) {
+ /* Now adjust so that the video pts starts on a frame */
+ if (c->first_video ()) {
ContentTime first_video = c->first_video().get() + _pts_offset;
ContentTime const old_first_video = first_video;
_pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video;
while (decode_video_packet ()) {}
- if (_ffmpeg_content->audio_stream()) {
- decode_audio_packet ();
- AudioDecoder::flush ();
- }
+ decode_audio_packet ();
+ AudioDecoder::flush ();
}
bool
if (si == _video_stream && !_ignore_video && reason != PASS_REASON_SUBTITLE) {
decode_video_packet ();
- } else if (fc->audio_stream() && fc->audio_stream()->uses_index (_format_context, si) && reason != PASS_REASON_SUBTITLE) {
+ } else if (reason != PASS_REASON_SUBTITLE) {
decode_audio_packet ();
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
decode_subtitle_packet ();
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (uint8_t** data, int size)
+FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t** data, int size)
{
- DCPOMATIC_ASSERT (_ffmpeg_content->audio_channels());
- DCPOMATIC_ASSERT (bytes_per_audio_sample());
+ DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
/* Deinterleave and convert to float */
/* total_samples and frames will be rounded down here, so if there are stray samples at the end
of the block that do not form a complete sample or frame they will be dropped.
*/
- int const total_samples = size / bytes_per_audio_sample();
- int const frames = total_samples / _ffmpeg_content->audio_channels();
- shared_ptr<AudioBuffers> audio (new AudioBuffers (_ffmpeg_content->audio_channels(), frames));
+ int const total_samples = size / bytes_per_audio_sample (stream);
+ int const frames = total_samples / stream->channels();
+ shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
- switch (audio_sample_format()) {
+ switch (audio_sample_format (stream)) {
case AV_SAMPLE_FMT_U8:
{
uint8_t* p = reinterpret_cast<uint8_t *> (data[0]);
audio->data(channel)[sample] = float(*p++) / (1 << 23);
++channel;
- if (channel == _ffmpeg_content->audio_channels()) {
+ if (channel == stream->channels()) {
channel = 0;
++sample;
}
audio->data(channel)[sample] = float(*p++) / (1 << 15);
++channel;
- if (channel == _ffmpeg_content->audio_channels()) {
+ if (channel == stream->channels()) {
channel = 0;
++sample;
}
case AV_SAMPLE_FMT_S16P:
{
int16_t** p = reinterpret_cast<int16_t **> (data);
- for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) {
+ for (int i = 0; i < stream->channels(); ++i) {
for (int j = 0; j < frames; ++j) {
audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
}
audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
++channel;
- if (channel == _ffmpeg_content->audio_channels()) {
+ if (channel == stream->channels()) {
channel = 0;
++sample;
}
audio->data(channel)[sample] = *p++;
++channel;
- if (channel == _ffmpeg_content->audio_channels()) {
+ if (channel == stream->channels()) {
channel = 0;
++sample;
}
case AV_SAMPLE_FMT_FLTP:
{
float** p = reinterpret_cast<float**> (data);
- for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) {
+ for (int i = 0; i < stream->channels(); ++i) {
memcpy (audio->data(i), p[i], frames * sizeof(float));
}
}
break;
default:
- throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format())));
+ throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
}
return audio;
}
AVSampleFormat
-FFmpegDecoder::audio_sample_format () const
+FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
{
- if (!_ffmpeg_content->audio_stream()) {
- return (AVSampleFormat) 0;
- }
-
- return audio_codec_context()->sample_fmt;
+ return stream->stream (_format_context)->codec->sample_fmt;
}
int
-FFmpegDecoder::bytes_per_audio_sample () const
+FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
{
- return av_get_bytes_per_sample (audio_sample_format ());
+ return av_get_bytes_per_sample (audio_sample_format (stream));
}
void
av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), 0);
avcodec_flush_buffers (video_codec_context());
- if (audio_codec_context ()) {
- avcodec_flush_buffers (audio_codec_context ());
- }
+
+ /* XXX: should be flushing audio buffers? */
+
if (subtitle_codec_context ()) {
avcodec_flush_buffers (subtitle_codec_context ());
}
*/
AVPacket copy_packet = _packet;
+
+ /* XXX: inefficient */
+ vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
+ vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
+ while (stream != streams.end () && !(*stream)->uses_index (_format_context, copy_packet.stream_index)) {
+ ++stream;
+ }
+
+ if (stream == streams.end ()) {
+ /* The packet's stream may not be an audio one; just ignore it in this method if so */
+ return;
+ }
while (copy_packet.size > 0) {
int frame_finished;
- int decode_result = avcodec_decode_audio4 (audio_codec_context(), _frame, &frame_finished, ©_packet);
+ int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, ©_packet);
if (decode_result < 0) {
/* avcodec_decode_audio4 can sometimes return an error even though it has decoded
some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
if (frame_finished) {
ContentTime const ct = ContentTime::from_seconds (
av_frame_get_best_effort_timestamp (_frame) *
- av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base))
+ av_q2d ((*stream)->stream (_format_context)->time_base))
+ _pts_offset;
int const data_size = av_samples_get_buffer_size (
- 0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1
+ 0, (*stream)->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (*stream), 1
);
- audio (deinterleave_audio (_frame->data, data_size), ct);
+ audio (*stream, deinterleave_audio (*stream, _frame->data, data_size), ct);
}
copy_packet.data += decode_result;
class Log;
class FilterGraph;
+class FFmpegAudioStream;
struct ffmpeg_pts_offset_test;
/** @class FFmpegDecoder
void seek (ContentTime time, bool);
void flush ();
- AVSampleFormat audio_sample_format () const;
- int bytes_per_audio_sample () const;
+ AVSampleFormat audio_sample_format (boost::shared_ptr<FFmpegAudioStream> stream) const;
+ int bytes_per_audio_sample (boost::shared_ptr<FFmpegAudioStream> stream) const;
bool decode_video_packet ();
void decode_audio_packet ();
void decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period);
void maybe_add_subtitle ();
- boost::shared_ptr<AudioBuffers> deinterleave_audio (uint8_t** data, int size);
+ boost::shared_ptr<AudioBuffers> deinterleave_audio (boost::shared_ptr<FFmpegAudioStream> stream, uint8_t** data, int size);
std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod, bool starting) const;
{
if (p == VideoContentProperty::VIDEO_FRAME_RATE) {
set_video_frame_rate (_playlist->best_dcp_frame_rate ());
- } else if (
- p == AudioContentProperty::AUDIO_MAPPING ||
- p == AudioContentProperty::AUDIO_CHANNELS) {
+ } else if (p == AudioContentProperty::AUDIO_STREAMS) {
signal_changed (NAME);
}
shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
DCPOMATIC_ASSERT (decoder);
- if (content->audio_frame_rate() == 0) {
- /* This AudioContent has no audio (e.g. if it is an FFmpegContent with no
- * audio stream).
- */
- continue;
- }
-
/* The time that we should request from the content */
DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
Frame request_frames = length_frames;
Frame const content_frame = dcp_to_content_audio (*i, request);
- /* Audio from this piece's decoder (which might be more or less than what we asked for) */
- shared_ptr<ContentAudio> all = decoder->get_audio (content_frame, request_frames, accurate);
-
- /* Gain */
- if (content->audio_gain() != 0) {
- shared_ptr<AudioBuffers> gain (new AudioBuffers (all->audio));
- gain->apply_gain (content->audio_gain ());
- all->audio = gain;
- }
+ BOOST_FOREACH (AudioStreamPtr j, content->audio_streams ()) {
+
+ /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
+ ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate);
+
+ /* Gain */
+ if (content->audio_gain() != 0) {
+ shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
+ gain->apply_gain (content->audio_gain ());
+ all.audio = gain;
+ }
- /* Remap channels */
- shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all->audio->frames()));
- dcp_mapped->make_silent ();
- AudioMapping map = content->audio_mapping ();
- for (int i = 0; i < map.content_channels(); ++i) {
- for (int j = 0; j < _film->audio_channels(); ++j) {
- if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
- dcp_mapped->accumulate_channel (
- all->audio.get(),
- i,
- j,
- map.get (i, static_cast<dcp::Channel> (j))
- );
+ /* Remap channels */
+ shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
+ dcp_mapped->make_silent ();
+ AudioMapping map = j->mapping ();
+ for (int i = 0; i < map.content_channels(); ++i) {
+ for (int j = 0; j < _film->audio_channels(); ++j) {
+ if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
+ dcp_mapped->accumulate_channel (
+ all.audio.get(),
+ i,
+ j,
+ map.get (i, static_cast<dcp::Channel> (j))
+ );
+ }
}
}
- }
- all->audio = dcp_mapped;
+ all.audio = dcp_mapped;
- audio->accumulate_frames (
- all->audio.get(),
- content_frame - all->frame,
- offset.frames (_film->audio_frame_rate()),
- min (Frame (all->audio->frames()), request_frames)
- );
+ audio->accumulate_frames (
+ all.audio.get(),
+ content_frame - all.frame,
+ offset.frames (_film->audio_frame_rate()),
+ min (Frame (all.audio->frames()), request_frames)
+ );
+ }
}
return audio;
/*
- Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2014-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
using std::string;
using std::cout;
+using std::vector;
using boost::shared_ptr;
SingleStreamAudioContent::SingleStreamAudioContent (shared_ptr<const Film> f)
: Content (f)
, AudioContent (f)
- , _audio_channels (0)
- , _audio_length (0)
- , _audio_frame_rate (0)
{
}
SingleStreamAudioContent::SingleStreamAudioContent (shared_ptr<const Film> f, boost::filesystem::path p)
: Content (f, p)
, AudioContent (f, p)
- , _audio_channels (0)
- , _audio_length (0)
- , _audio_frame_rate (0)
{
}
SingleStreamAudioContent::SingleStreamAudioContent (shared_ptr<const Film> f, cxml::ConstNodePtr node, int version)
: Content (f, node)
, AudioContent (f, node)
- , _audio_mapping (node->node_child ("AudioMapping"), version)
+ , _audio_stream (new AudioStream (node->number_child<int> ("AudioFrameRate"), AudioMapping (node->node_child ("AudioMapping"), version)))
{
- _audio_channels = node->number_child<int> ("AudioChannels");
- _audio_length = node->number_child<Frame> ("AudioLength");
- _audio_frame_rate = node->number_child<int> ("AudioFrameRate");
-}
-
-void
-SingleStreamAudioContent::set_audio_mapping (AudioMapping m)
-{
- {
- boost::mutex::scoped_lock lm (_mutex);
- _audio_mapping = m;
- }
- AudioContent::set_audio_mapping (m);
}
-
void
SingleStreamAudioContent::as_xml (xmlpp::Node* node) const
{
AudioContent::as_xml (node);
- node->add_child("AudioChannels")->add_child_text (raw_convert<string> (audio_channels ()));
- node->add_child("AudioLength")->add_child_text (raw_convert<string> (audio_length ()));
- node->add_child("AudioFrameRate")->add_child_text (raw_convert<string> (audio_frame_rate ()));
- _audio_mapping.as_xml (node->add_child("AudioMapping"));
+ node->add_child("AudioFrameRate")->add_child_text (raw_convert<string> (audio_stream()->frame_rate ()));
+ audio_stream()->mapping().as_xml (node->add_child("AudioMapping"));
}
void
{
{
boost::mutex::scoped_lock lm (_mutex);
- _audio_channels = examiner->audio_channels ();
- _audio_length = examiner->audio_length ();
- _audio_frame_rate = examiner->audio_frame_rate ();
+ _audio_stream.reset (new AudioStream (examiner->audio_frame_rate(), examiner->audio_channels ()));
+ _audio_stream->mapping().make_default ();
}
- signal_changed (AudioContentProperty::AUDIO_CHANNELS);
- signal_changed (AudioContentProperty::AUDIO_FRAME_RATE);
-
- int const p = processed_audio_channels ();
+ signal_changed (AudioContentProperty::AUDIO_STREAMS);
+}
- {
- boost::mutex::scoped_lock lm (_mutex);
- /* XXX: do this in signal_changed...? */
- _audio_mapping = AudioMapping (p);
- _audio_mapping.make_default ();
- }
-
- signal_changed (AudioContentProperty::AUDIO_MAPPING);
+vector<AudioStreamPtr>
+SingleStreamAudioContent::audio_streams () const
+{
+ vector<AudioStreamPtr> s;
+ s.push_back (_audio_stream);
+ return s;
}
void as_xml (xmlpp::Node* node) const;
- int audio_channels () const {
- boost::mutex::scoped_lock lm (_mutex);
- return _audio_channels;
- }
-
- Frame audio_length () const {
- boost::mutex::scoped_lock lm (_mutex);
- return _audio_length;
- }
-
- int audio_frame_rate () const {
- boost::mutex::scoped_lock lm (_mutex);
- return _audio_frame_rate;
- }
+ std::vector<AudioStreamPtr> audio_streams () const;
- AudioMapping audio_mapping () const {
- boost::mutex::scoped_lock lm (_mutex);
- return _audio_mapping;
+ AudioStreamPtr audio_stream () const {
+ return _audio_stream;
}
-
- void set_audio_mapping (AudioMapping);
-
+
void take_from_audio_examiner (boost::shared_ptr<AudioExaminer>);
protected:
- int _audio_channels;
- Frame _audio_length;
- int _audio_frame_rate;
- AudioMapping _audio_mapping;
+ boost::shared_ptr<AudioStream> _audio_stream;
};
#endif
--- /dev/null
+/*
+ Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "sndfile_base.h"
+#include "sndfile_content.h"
+#include "exceptions.h"
+
+#include "i18n.h"
+
+using boost::shared_ptr;
+
+Sndfile::Sndfile (shared_ptr<const SndfileContent> c)
+ : _sndfile_content (c)
+{
+ _info.format = 0;
+
+ /* Here be monsters. See fopen_boost for similar shenanigans */
+#ifdef DCPOMATIC_WINDOWS
+ _sndfile = sf_wchar_open (_sndfile_content->path(0).c_str(), SFM_READ, &_info);
+#else
+ _sndfile = sf_open (_sndfile_content->path(0).string().c_str(), SFM_READ, &_info);
+#endif
+
+ if (!_sndfile) {
+ throw DecodeError (_("could not open audio file for reading"));
+ }
+}
+
+Sndfile::~Sndfile ()
+{
+ sf_close (_sndfile);
+}
--- /dev/null
+/*
+ Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef DCPOMATIC_SNDFILE_BASE_H
+#define DCPOMATIC_SNDFILE_BASE_H
+
+#include <sndfile.h>
+#include <boost/shared_ptr.hpp>
+
+class SndfileContent;
+
+class Sndfile
+{
+public:
+ Sndfile (boost::shared_ptr<const SndfileContent> content);
+ virtual ~Sndfile ();
+
+protected:
+ boost::shared_ptr<const SndfileContent> _sndfile_content;
+ SNDFILE* _sndfile;
+ SF_INFO _info;
+};
+
+#endif
*/
-#include <libcxml/cxml.h>
#include "sndfile_content.h"
#include "sndfile_decoder.h"
+#include "sndfile_examiner.h"
#include "film.h"
#include "compose.hpp"
#include "job.h"
#include "util.h"
#include "safe_stringstream.h"
+#include "raw_convert.h"
+#include <libcxml/cxml.h>
#include "i18n.h"
SndfileContent::SndfileContent (shared_ptr<const Film> f, cxml::ConstNodePtr node, int version)
: Content (f, node)
, SingleStreamAudioContent (f, node, version)
+ , _audio_length (node->number_child<int64_t> ("AudioLength"))
{
-
+
}
void
node->add_child("Type")->add_child_text ("Sndfile");
Content::as_xml (node);
SingleStreamAudioContent::as_xml (node);
+ node->add_child("AudioLength")->add_child_text (raw_convert<string> (audio_length ()));
}
{
job->set_progress_unknown ();
Content::examine (job);
- shared_ptr<AudioExaminer> dec (new SndfileDecoder (shared_from_this()));
+ shared_ptr<AudioExaminer> dec (new SndfileExaminer (shared_from_this ()));
take_from_audio_examiner (dec);
}
+void
+SndfileContent::take_from_audio_examiner (shared_ptr<AudioExaminer> examiner)
+{
+ SingleStreamAudioContent::take_from_audio_examiner (examiner);
+
+ boost::mutex::scoped_lock lm (_mutex);
+ _audio_length = examiner->audio_length ();
+}
+
DCPTime
SndfileContent::full_length () const
{
shared_ptr<const Film> film = _film.lock ();
DCPOMATIC_ASSERT (film);
FrameRateChange const frc = film->active_frame_rate_change (position ());
- return DCPTime::from_frames (audio_length() / frc.speed_up, audio_frame_rate ());
+ return DCPTime::from_frames (audio_length() / frc.speed_up, audio_stream()->frame_rate ());
}
/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
std::string technical_summary () const;
std::string information () const;
void as_xml (xmlpp::Node *) const;
+
+ void take_from_audio_examiner (boost::shared_ptr<AudioExaminer>);
static bool valid_file (boost::filesystem::path);
+
+private:
+ Frame audio_length () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _audio_length;
+ }
+
+ Frame _audio_length;
};
#endif
using boost::shared_ptr;
SndfileDecoder::SndfileDecoder (shared_ptr<const SndfileContent> c)
- : AudioDecoder (c)
- , _sndfile_content (c)
+ : Sndfile (c)
+ , AudioDecoder (c)
+ , _done (0)
+ , _remaining (_info.frames)
, _deinterleave_buffer (0)
{
- _info.format = 0;
-
- /* Here be monsters. See fopen_boost for similar shenanigans */
-#ifdef DCPOMATIC_WINDOWS
- _sndfile = sf_wchar_open (_sndfile_content->path(0).c_str(), SFM_READ, &_info);
-#else
- _sndfile = sf_open (_sndfile_content->path(0).string().c_str(), SFM_READ, &_info);
-#endif
- if (!_sndfile) {
- throw DecodeError (_("could not open audio file for reading"));
- }
-
- _done = 0;
- _remaining = _info.frames;
}
SndfileDecoder::~SndfileDecoder ()
{
- sf_close (_sndfile);
delete[] _deinterleave_buffer;
}
/* Do things in half second blocks as I think there may be limits
to what FFmpeg (and in particular the resampler) can cope with.
*/
- sf_count_t const block = _sndfile_content->audio_frame_rate() / 2;
+ sf_count_t const block = _sndfile_content->audio_stream()->frame_rate() / 2;
sf_count_t const this_time = min (block, _remaining);
- int const channels = _sndfile_content->audio_channels ();
+ int const channels = _sndfile_content->audio_stream()->channels ();
shared_ptr<AudioBuffers> data (new AudioBuffers (channels, this_time));
- if (_sndfile_content->audio_channels() == 1) {
+ if (_sndfile_content->audio_stream()->channels() == 1) {
/* No de-interleaving required */
sf_read_float (_sndfile, data->data(0), this_time);
} else {
}
data->set_frames (this_time);
- audio (data, ContentTime::from_frames (_done, audio_frame_rate ()));
+ audio (_sndfile_content->audio_stream (), data, ContentTime::from_frames (_done, _info.samplerate));
_done += this_time;
_remaining -= this_time;
return _remaining == 0;
}
-int
-SndfileDecoder::audio_channels () const
-{
- return _info.channels;
-}
-
-Frame
-SndfileDecoder::audio_length () const
-{
- return _info.frames;
-}
-
-int
-SndfileDecoder::audio_frame_rate () const
-{
- return _info.samplerate;
-}
-
void
SndfileDecoder::seek (ContentTime t, bool accurate)
{
AudioDecoder::seek (t, accurate);
- _done = t.frames (audio_frame_rate ());
+ _done = t.frames (_info.samplerate);
_remaining = _info.frames - _done;
}
#include "decoder.h"
#include "audio_decoder.h"
#include "audio_examiner.h"
-#include <sndfile.h>
+#include "sndfile_base.h"
class SndfileContent;
-class SndfileDecoder : public AudioDecoder, public AudioExaminer
+class SndfileDecoder : public Sndfile, public AudioDecoder
{
public:
SndfileDecoder (boost::shared_ptr<const SndfileContent> c);
~SndfileDecoder ();
- int audio_channels () const;
- Frame audio_length () const;
- int audio_frame_rate () const;
-
private:
bool pass (PassReason);
void seek (ContentTime, bool);
- boost::shared_ptr<const SndfileContent> _sndfile_content;
- SNDFILE* _sndfile;
- SF_INFO _info;
int64_t _done;
int64_t _remaining;
float* _deinterleave_buffer;
--- /dev/null
+/*
+ Copyright (C) 2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "sndfile_examiner.h"
+
+using boost::shared_ptr;
+
+SndfileExaminer::SndfileExaminer (shared_ptr<const SndfileContent> content)
+ : Sndfile (content)
+{
+
+}
+
+int
+SndfileExaminer::audio_channels () const
+{
+ return _info.channels;
+}
+
+Frame
+SndfileExaminer::audio_length () const
+{
+ return _info.frames;
+}
+
+int
+SndfileExaminer::audio_frame_rate () const
+{
+ return _info.samplerate;
+}
--- /dev/null
+/*
+ Copyright (C) 2015 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "sndfile_base.h"
+#include "audio_examiner.h"
+
+class SndfileExaminer : public Sndfile, public AudioExaminer
+{
+public:
+ SndfileExaminer (boost::shared_ptr<const SndfileContent> content);
+
+ int audio_channels () const;
+ Frame audio_length () const;
+ int audio_frame_rate () const;
+};
audio_buffers.cc
audio_content.cc
audio_decoder.cc
+ audio_decoder_stream.cc
audio_filter.cc
audio_mapping.cc
audio_processor.cc
+ audio_stream.cc
cinema.cc
cinema_sound_processor.cc
colour_conversion.cc
server.cc
server_finder.cc
single_stream_audio_content.cc
+ sndfile_base.cc
sndfile_content.cc
sndfile_decoder.cc
+ sndfile_examiner.cc
subrip.cc
subrip_content.cc
subrip_decoder.cc
if (p == AudioContentProperty::AUDIO_GAIN) {
_plot->set_gain (_content->audio_gain ());
setup_peak_time ();
- } else if (p == AudioContentProperty::AUDIO_MAPPING) {
+ } else if (p == AudioContentProperty::AUDIO_STREAMS) {
try_to_load_analysis ();
}
}
#include "lib/config.h"
#include "lib/ffmpeg_content.h"
-#include "lib/ffmpeg_audio_stream.h"
-#include "lib/audio_processor.h"
#include "lib/cinema_sound_processor.h"
#include "audio_dialog.h"
#include "audio_panel.h"
add_label_to_grid_bag_sizer (grid, this, _("ms"), false, wxGBPosition (r, 2));
++r;
- add_label_to_grid_bag_sizer (grid, this, _("Stream"), true, wxGBPosition (r, 0));
- _stream = new wxChoice (this, wxID_ANY);
- grid->Add (_stream, wxGBPosition (r, 1), wxGBSpan (1, 3), wxEXPAND);
- ++r;
-
- add_label_to_grid_bag_sizer (grid, this, _("Process with"), true, wxGBPosition (r, 0));
- _processor = new wxChoice (this, wxID_ANY);
- setup_processors ();
- grid->Add (_processor, wxGBPosition (r, 1), wxGBSpan (1, 3), wxEXPAND);
- ++r;
-
_mapping = new AudioMappingView (this);
_sizer->Add (_mapping, 1, wxEXPAND | wxALL, 6);
++r;
_gain->wrapped()->SetIncrement (0.5);
_delay->wrapped()->SetRange (-1000, 1000);
- _stream->Bind (wxEVT_COMMAND_CHOICE_SELECTED, boost::bind (&AudioPanel::stream_changed, this));
_show->Bind (wxEVT_COMMAND_BUTTON_CLICKED, boost::bind (&AudioPanel::show_clicked, this));
_gain_calculate_button->Bind (wxEVT_COMMAND_BUTTON_CLICKED, boost::bind (&AudioPanel::gain_calculate_button_clicked, this));
- _processor->Bind (wxEVT_COMMAND_CHOICE_SELECTED, boost::bind (&AudioPanel::processor_changed, this));
_mapping_connection = _mapping->Changed.connect (boost::bind (&AudioPanel::mapping_changed, this, _1));
}
fcs = dynamic_pointer_cast<FFmpegContent> (acs);
}
- if (property == AudioContentProperty::AUDIO_MAPPING) {
+ if (property == AudioContentProperty::AUDIO_STREAMS) {
_mapping->set (acs ? acs->audio_mapping () : AudioMapping ());
_sizer->Layout ();
- } else if (property == AudioContentProperty::AUDIO_FRAME_RATE) {
setup_description ();
- } else if (property == FFmpegContentProperty::AUDIO_STREAM) {
- _mapping->set (acs ? acs->audio_mapping () : AudioMapping ());
- _sizer->Layout ();
- } else if (property == FFmpegContentProperty::AUDIO_STREAMS) {
- if (fcs) {
- vector<pair<string, string> > data;
- BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, fcs->audio_streams ()) {
- data.push_back (make_pair (i->name, i->identifier ()));
- }
- checked_set (_stream, data);
-
- if (fcs->audio_stream()) {
- checked_set (_stream, fcs->audio_stream()->identifier ());
- }
- } else {
- _stream->Clear ();
- }
- } else if (property == AudioContentProperty::AUDIO_PROCESSOR) {
- if (acs) {
- checked_set (_processor, acs->audio_processor() ? acs->audio_processor()->id() : N_("none"));
- } else {
- checked_set (_processor, N_("none"));
- }
}
}
_audio_dialog->set_content (ac.front ());
}
-void
-AudioPanel::stream_changed ()
-{
- FFmpegContentList fc = _parent->selected_ffmpeg ();
- if (fc.size() != 1) {
- return;
- }
-
- shared_ptr<FFmpegContent> fcs = fc.front ();
-
- if (_stream->GetSelection() == -1) {
- return;
- }
-
- vector<shared_ptr<FFmpegAudioStream> > a = fcs->audio_streams ();
- vector<shared_ptr<FFmpegAudioStream> >::iterator i = a.begin ();
- string const s = string_client_data (_stream->GetClientObject (_stream->GetSelection ()));
- while (i != a.end() && (*i)->identifier () != s) {
- ++i;
- }
-
- if (i != a.end ()) {
- fcs->set_audio_stream (*i);
- }
-}
-
-void
-AudioPanel::processor_changed ()
-{
- string const s = string_client_data (_processor->GetClientObject (_processor->GetSelection ()));
- AudioProcessor const * p = 0;
- if (s != wx_to_std (N_("none"))) {
- p = AudioProcessor::from_id (s);
- }
-
- AudioContentList c = _parent->selected_audio ();
- for (AudioContentList::const_iterator i = c.begin(); i != c.end(); ++i) {
- (*i)->set_audio_processor (p);
- }
-}
-
void
AudioPanel::setup_description ()
{
_gain_calculate_button->Enable (sel.size() == 1);
_show->Enable (sel.size() == 1);
- _stream->Enable (sel.size() == 1);
- _processor->Enable (!sel.empty());
_mapping->Enable (sel.size() == 1);
- setup_processors ();
-
- film_content_changed (AudioContentProperty::AUDIO_MAPPING);
- film_content_changed (AudioContentProperty::AUDIO_PROCESSOR);
- film_content_changed (AudioContentProperty::AUDIO_FRAME_RATE);
- film_content_changed (FFmpegContentProperty::AUDIO_STREAM);
film_content_changed (FFmpegContentProperty::AUDIO_STREAMS);
}
-
-void
-AudioPanel::setup_processors ()
-{
- AudioContentList sel = _parent->selected_audio ();
-
- _processor->Clear ();
- list<AudioProcessor const *> ap = AudioProcessor::all ();
- _processor->Append (_("None"), new wxStringClientData (N_("none")));
- for (list<AudioProcessor const *>::const_iterator i = ap.begin(); i != ap.end(); ++i) {
-
- AudioContentList::const_iterator j = sel.begin();
- while (j != sel.end() && (*i)->in_channels().includes ((*j)->audio_channels ())) {
- ++j;
- }
-
- if (j == sel.end ()) {
- _processor->Append (std_to_wx ((*i)->name ()), new wxStringClientData (std_to_wx ((*i)->id ())));
- }
- }
-}
private:
void gain_calculate_button_clicked ();
void show_clicked ();
- void stream_changed ();
void mapping_changed (AudioMapping);
- void processor_changed ();
- void setup_processors ();
void setup_description ();
ContentSpinCtrlDouble<AudioContent>* _gain;
wxButton* _gain_calculate_button;
wxButton* _show;
ContentSpinCtrl<AudioContent>* _delay;
- wxChoice* _stream;
- wxChoice* _processor;
AudioMappingView* _mapping;
wxStaticText* _description;
AudioDialog* _audio_dialog;
);
}
- shared_ptr<AudioContent> audio = dynamic_pointer_cast<AudioContent> (content);
- if (audio) {
- add_property (
- _("Audio channels"),
- std_to_wx (raw_convert<string> (audio->audio_channels ()))
- );
- }
-
+ /* XXX: this could be better wrt audio streams */
+
shared_ptr<SingleStreamAudioContent> single = dynamic_pointer_cast<SingleStreamAudioContent> (content);
if (single) {
add_property (
- _("Audio length"),
- std_to_wx (raw_convert<string> (single->audio_length())) + " " + _("audio frames")
+ _("Audio channels"),
+ std_to_wx (raw_convert<string> (single->audio_stream()->channels ()))
);
}
void
DCPPanel::film_content_changed (int property)
{
- if (property == FFmpegContentProperty::AUDIO_STREAM ||
+ if (property == FFmpegContentProperty::AUDIO_STREAMS ||
property == SubtitleContentProperty::USE_SUBTITLES ||
property == VideoContentProperty::VIDEO_SCALE) {
setup_dcp_name ();
setup_pixels_per_second ();
}
Refresh ();
- } else if (property == AudioContentProperty::AUDIO_MAPPING) {
+ } else if (property == AudioContentProperty::AUDIO_STREAMS) {
recreate_views ();
}
}
#include <boost/test/unit_test.hpp>
#include "test.h"
#include "lib/audio_decoder.h"
-#include "lib/audio_content.h"
+#include "lib/single_stream_audio_content.h"
using std::string;
using std::cout;
using std::min;
using boost::shared_ptr;
-class TestAudioContent : public AudioContent
+class TestAudioContent : public SingleStreamAudioContent
{
public:
- TestAudioContent (shared_ptr<Film> film)
+ TestAudioContent (shared_ptr<const Film> film)
: Content (film)
- , AudioContent (film, DCPTime ())
- {}
-
- string summary () const {
- return "";
+ , SingleStreamAudioContent (film)
+ {
+ _audio_stream.reset (new AudioStream (48000, 2));
}
- string information () const {
+ std::string summary () const {
return "";
}
DCPTime full_length () const {
- return DCPTime::from_seconds (float (audio_length()) / audio_frame_rate ());
+ return DCPTime::from_seconds (float (audio_length()) / audio_stream()->frame_rate ());
}
-
- int audio_channels () const {
- return 2;
- }
-
+
Frame audio_length () const {
- return rint (61.2942 * audio_frame_rate ());
- }
-
- int audio_frame_rate () const {
- return 48000;
+ return rint (61.2942 * audio_stream()->frame_rate ());
}
-
- AudioMapping audio_mapping () const {
- return AudioMapping (audio_channels ());
- }
-
- void set_audio_mapping (AudioMapping) {}
};
class TestAudioDecoder : public AudioDecoder
_test_audio_content->audio_length() - _position
);
- shared_ptr<AudioBuffers> buffers (new AudioBuffers (_audio_content->audio_channels(), N));
- for (int i = 0; i < _audio_content->audio_channels(); ++i) {
+ shared_ptr<AudioBuffers> buffers (new AudioBuffers (_test_audio_content->audio_stream()->channels(), N));
+ for (int i = 0; i < _test_audio_content->audio_stream()->channels(); ++i) {
for (int j = 0; j < N; ++j) {
buffers->data(i)[j] = j + _position;
}
}
- audio (buffers, ContentTime::from_frames (_position, _audio_content->resampled_audio_frame_rate ()));
+ audio (_test_audio_content->audio_stream(), buffers, ContentTime::from_frames (_position, 48000));
_position += N;
return N < 2000;
void seek (ContentTime t, bool accurate)
{
AudioDecoder::seek (t, accurate);
- _position = t.frames (_audio_content->resampled_audio_frame_rate ());
+ _position = t.frames (_test_audio_content->resampled_audio_frame_rate ());
}
private:
shared_ptr<TestAudioContent> content;
shared_ptr<TestAudioDecoder> decoder;
-static shared_ptr<ContentAudio>
+static ContentAudio
get (Frame from, Frame length)
{
decoder->seek (ContentTime::from_frames (from, content->resampled_audio_frame_rate ()), true);
- shared_ptr<ContentAudio> ca = decoder->get_audio (from, length, true);
- BOOST_CHECK_EQUAL (ca->frame, from);
+ ContentAudio ca = decoder->get_audio (content->audio_stream(), from, length, true);
+ BOOST_CHECK_EQUAL (ca.frame, from);
return ca;
}
static void
check (Frame from, Frame length)
{
- shared_ptr<ContentAudio> ca = get (from, length);
- for (int i = 0; i < content->audio_channels(); ++i) {
+ ContentAudio ca = get (from, length);
+ for (int i = 0; i < content->audio_stream()->channels(); ++i) {
for (int j = 0; j < length; ++j) {
- BOOST_CHECK_EQUAL (ca->audio->data(i)[j], j + from);
- assert (ca->audio->data(i)[j] == j + from);
+ BOOST_REQUIRE_EQUAL (ca.audio->data(i)[j], j + from);
}
}
}
Frame const from = content->resampled_audio_frame_rate() * 61;
Frame const length = content->resampled_audio_frame_rate() * 4;
- shared_ptr<ContentAudio> ca = get (from, length);
+ ContentAudio ca = get (from, length);
- for (int i = 0; i < content->audio_channels(); ++i) {
- for (int j = 0; j < ca->audio->frames(); ++j) {
- BOOST_REQUIRE_EQUAL (ca->audio->data(i)[j], j + from);
+ for (int i = 0; i < content->audio_stream()->channels(); ++i) {
+ for (int j = 0; j < ca.audio->frames(); ++j) {
+ BOOST_REQUIRE_EQUAL (ca.audio->data(i)[j], j + from);
}
}
}
static
void test_audio_delay (int delay_in_ms)
{
+ BOOST_TEST_MESSAGE ("Testing delay of " << delay_in_ms);
+
string const film_name = "audio_delay_test_" + lexical_cast<string> (delay_in_ms);
shared_ptr<Film> film = new_test_film (film_name);
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("FTR"));
delayed = 0;
}
- BOOST_CHECK_EQUAL (sample, delayed);
+ BOOST_REQUIRE_EQUAL (sample, delayed);
++n;
}
}
/*
- Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
using boost::optional;
static void
-check (FFmpegDecoder& decoder, int frame)
+check (shared_ptr<FFmpegDecoder> decoder, int frame)
{
list<ContentVideo> v;
- v = decoder.get_video (frame, true);
+ v = decoder->get_video (frame, true);
BOOST_CHECK (v.size() == 1);
BOOST_CHECK_EQUAL (v.front().frame, frame);
}
film->examine_and_add_content (content);
wait_for_jobs ();
shared_ptr<Log> log (new NullLog);
- FFmpegDecoder decoder (content, log);
+ shared_ptr<FFmpegDecoder> decoder (new FFmpegDecoder (content, log));
for (vector<int>::const_iterator i = frames.begin(); i != frames.end(); ++i) {
check (decoder, *i);
film->examine_and_add_content (content);
wait_for_jobs ();
shared_ptr<Log> log (new NullLog);
- FFmpegDecoder decoder (content, log);
+ shared_ptr<FFmpegDecoder> decoder (new FFmpegDecoder (content, log));
- BOOST_CHECK_CLOSE (decoder.video_content()->video_frame_rate(), fps, 0.01);
+ BOOST_CHECK_CLOSE (decoder->video_content()->video_frame_rate(), fps, 0.01);
- Frame const N = decoder.video_content()->video_length();
+ Frame const N = decoder->video_content()->video_length();
#ifdef DCPOMATIC_DEBUG
- decoder.test_gaps = 0;
+ decoder->test_gaps = 0;
#endif
for (Frame i = 0; i < N; ++i) {
list<ContentVideo> v;
- v = decoder.get_video (i, true);
+ v = decoder->get_video (i, true);
BOOST_CHECK_EQUAL (v.size(), 1);
BOOST_CHECK_EQUAL (v.front().frame, i);
}
#ifdef DCPOMATIC_DEBUG
- BOOST_CHECK_EQUAL (decoder.test_gaps, gaps);
+ BOOST_CHECK_EQUAL (decoder->test_gaps, gaps);
#endif
}
{
shared_ptr<Film> film = new_test_film ("ffmpeg_pts_offset_test");
shared_ptr<FFmpegContent> content (new FFmpegContent (film, "test/data/test.mp4"));
- content->_audio_stream.reset (new FFmpegAudioStream);
+ content->_audio_streams.push_back (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream));
content->_video_frame_rate = 24;
{
/* Sound == video so no offset required */
content->_first_video = ContentTime ();
- content->_audio_stream->first_audio = ContentTime ();
+ content->_audio_streams.front()->first_audio = ContentTime ();
FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime ());
}
{
/* Common offset should be removed */
content->_first_video = ContentTime::from_seconds (600);
- content->_audio_stream->first_audio = ContentTime::from_seconds (600);
+ content->_audio_streams.front()->first_audio = ContentTime::from_seconds (600);
FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime::from_seconds (-600));
}
{
/* Video is on a frame boundary */
content->_first_video = ContentTime::from_frames (1, 24);
- content->_audio_stream->first_audio = ContentTime ();
+ content->_audio_streams.front()->first_audio = ContentTime ();
FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime ());
}
/* Video is off a frame boundary */
double const frame = 1.0 / 24.0;
content->_first_video = ContentTime::from_seconds (frame + 0.0215);
- content->_audio_stream->first_audio = ContentTime ();
+ content->_audio_streams.front()->first_audio = ContentTime ();
FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_CLOSE (decoder._pts_offset.seconds(), (frame - 0.0215), 0.00001);
}
/* Video is off a frame boundary and both have a common offset */
double const frame = 1.0 / 24.0;
content->_first_video = ContentTime::from_seconds (frame + 0.0215 + 4.1);
- content->_audio_stream->first_audio = ContentTime::from_seconds (4.1);
+ content->_audio_streams.front()->first_audio = ContentTime::from_seconds (4.1);
FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_CLOSE (decoder._pts_offset.seconds(), (frame - 0.0215) - 4.1, 0.1);
}
/*
- Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
BOOST_CHECK_EQUAL (film->playlist()->best_dcp_frame_rate(), 25);
}
-
BOOST_AUTO_TEST_CASE (audio_sampling_rate_test)
{
shared_ptr<Film> film = new_test_film ("audio_sampling_rate_test");
afr.push_back (30);
Config::instance()->set_allowed_dcp_frame_rates (afr);
+ shared_ptr<FFmpegAudioStream> stream (new FFmpegAudioStream ("foo", 0, 0, 0));
+ content->_audio_streams.push_back (stream);
content->_video_frame_rate = 24;
film->set_video_frame_rate (24);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 48000, 0)));
+ stream->_frame_rate = 48000;
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), 48000);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 44100, 0)));
+ stream->_frame_rate = 44100;
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), 48000);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 80000, 0)));
+ stream->_frame_rate = 80000;
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), 96000);
content->_video_frame_rate = 23.976;
film->set_video_frame_rate (24);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 48000, 0)));
+ stream->_frame_rate = 48000;
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), 47952);
content->_video_frame_rate = 29.97;
film->set_video_frame_rate (30);
BOOST_CHECK_EQUAL (film->video_frame_rate (), 30);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 48000, 0)));
+ stream->_frame_rate = 48000;
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), 47952);
content->_video_frame_rate = 25;
film->set_video_frame_rate (24);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 48000, 0)));
+ stream->_frame_rate = 48000;
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), 50000);
content->_video_frame_rate = 25;
film->set_video_frame_rate (24);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 44100, 0)));
+ stream->_frame_rate = 44100;
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), 50000);
/* Check some out-there conversions (not the best) */
content->_video_frame_rate = 14.99;
film->set_video_frame_rate (25);
- content->set_audio_stream (shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream ("a", 42, 16000, 0)));
+ stream->_frame_rate = 16000;
/* The FrameRateChange within resampled_audio_frame_rate should choose to double-up
the 14.99 fps video to 30 and then run it slow at 25.
*/
BOOST_CHECK_EQUAL (content->resampled_audio_frame_rate(), rint (48000 * 2 * 14.99 / 25));
}
-
/* Work out the first video frame index that we will be given, taking into account
* the difference between first video and first audio.
*/
- ContentTime video_delay = content->first_video().get() - content->audio_stream()->first_audio.get();
+ ContentTime video_delay = content->first_video().get() - content->ffmpeg_audio_streams().front()->first_audio.get();
if (video_delay < ContentTime ()) {
video_delay = ContentTime ();
}
using boost::shared_ptr;
+#if 0
+/* XXX: no audio processors in content any more */
BOOST_AUTO_TEST_CASE (upmixer_a_test)
{
shared_ptr<Film> film = new_test_film ("upmixer_a_test");
check_audio_file ("test/data/upmixer_a_test/Ls.wav", "build/test/upmixer_a_test/Ls.wav");
check_audio_file ("test/data/upmixer_a_test/Rs.wav", "build/test/upmixer_a_test/Rs.wav");
}
+#endif