_da->Video.connect (bind (&ABTranscoder::process_video, this, _1, _2, _3, 0));
_db->Video.connect (bind (&ABTranscoder::process_video, this, _1, _2, _3, 1));
- _da->Audio.connect (bind (&Encoder::process_audio, e, _1));
+ _da->Audio.connect (bind (&Encoder::process_audio, e, _1, _2));
}
ABTranscoder::~ABTranscoder ()
* @param minimal true to do the bare minimum of work; just run through the content. Useful for acquiring
* accurate frame counts as quickly as possible. This generates no video or audio output.
*/
-Decoder::Decoder (boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j, bool minimal)
+Decoder::Decoder (boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j)
: _film (f)
, _opt (o)
, _job (j)
- , _minimal (minimal)
- , _video_frames_in (0)
- , _video_frames_out (0)
- , _audio_frames_in (0)
- , _audio_frames_out (0)
+ , _video_frame (0)
+ , _audio_frame (0)
, _delay_line (0)
, _delay_in_frames (0)
{
/* Ensure that our video and audio emissions are the same length */
- int64_t audio_short_by_frames = video_frames_to_audio_frames (_video_frames_out) - _audio_frames_out;
+ int64_t audio_short_by_frames = video_frames_to_audio_frames (_video_frame, audio_sample_rate(), frames_per_second()) - _audio_frame;
_film->log()->log (
- String::compose ("Decoder has emitted %1 video frames (which equals %2 audio frames) and %3 audio frames",
- _video_frames_out,
- video_frames_to_audio_frames (_video_frames_out),
- _audio_frames_out)
+ String::compose (
+ "Decoder has emitted %1 video frames (which equals %2 audio frames) and %3 audio frames",
+ _video_frame,
+ video_frames_to_audio_frames (_video_frame, audio_sample_rate(), frames_per_second()),
+ _audio_frame
+ )
);
if (audio_short_by_frames < 0) {
black->make_black ();
for (int i = 0; i < black_video_frames; ++i) {
emit_video (black, shared_ptr<Subtitle> ());
-
- /* This is a bit of a hack, but you can sort-of justify it if you squint at it right.
- It's important because the encoder will probably use this to name its output frame.
- */
- ++_video_frames_in;
}
/* Now recompute our check value */
- audio_short_by_frames = video_frames_to_audio_frames (_video_frames_out) - _audio_frames_out;
+ audio_short_by_frames = video_frames_to_audio_frames (_video_frame, audio_sample_rate(), frames_per_second()) - _audio_frame;
}
if (audio_short_by_frames > 0) {
}
while (pass () == false) {
- if (_job && _film->dcp_length()) {
- _job->set_progress (float (_video_frames_out) / _film->dcp_length().get());
+ if (_job) {
+ _job->set_progress (float (_video_frame) / _film->length().get());
}
}
void
Decoder::process_audio (uint8_t* data, int size)
{
+ /* XXX: could this be removed? */
if (size == 0) {
return;
}
}
_delay_line->feed (audio);
-
- int const in_frames = audio->frames ();
-
- if (_opt->decode_range) {
- /* Decode range in audio frames */
- pair<int64_t, int64_t> required_range (
- video_frames_to_audio_frames (_opt->decode_range.get().first),
- video_frames_to_audio_frames (_opt->decode_range.get().second)
- );
-
- /* Range of this block of data */
- pair<int64_t, int64_t> this_range (
- _audio_frames_in,
- _audio_frames_in + audio->frames()
- );
-
- if (this_range.second < required_range.first || required_range.second < this_range.first) {
- /* No part of this audio is within the required range */
- audio->set_frames (0);
- } else if (required_range.first >= this_range.first && required_range.first < this_range.second) {
- /* Trim start */
- int64_t const shift = required_range.first - this_range.first;
- audio->move (shift, 0, audio->frames() - shift);
- audio->set_frames (audio->frames() - shift);
- } else if (required_range.second >= this_range.first && required_range.second < this_range.second) {
- /* Trim end */
- audio->set_frames (required_range.second - this_range.first);
- }
- }
-
- if (audio->frames()) {
- emit_audio (audio);
- }
-
- _audio_frames_in += in_frames;
-}
-
-void
-Decoder::emit_audio (shared_ptr<AudioBuffers> audio)
-{
- Audio (audio);
- _audio_frames_out += audio->frames ();
+ emit_audio (audio);
}
/** Called by subclasses to tell the world that some video data is ready.
void
Decoder::process_video (AVFrame* frame)
{
- if (_minimal) {
- ++_video_frames_in;
- return;
- }
-
- if (_opt->decode_video_skip != 0 && (_video_frames_in % _opt->decode_video_skip) != 0) {
- ++_video_frames_in;
- return;
- }
-
- if (_opt->decode_range) {
- pair<SourceFrame, SourceFrame> r = _opt->decode_range.get();
- if (_video_frames_in < r.first || _video_frames_in >= r.second) {
- ++_video_frames_in;
- return;
- }
- }
-
shared_ptr<FilterGraph> graph;
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
shared_ptr<Subtitle> sub;
- if (_timed_subtitle && _timed_subtitle->displayed_at (double (video_frames_in()) / _film->frames_per_second())) {
+ if (_timed_subtitle && _timed_subtitle->displayed_at (double (video_frame()) / _film->frames_per_second())) {
sub = _timed_subtitle->subtitle ();
}
emit_video (*i, sub);
- ++_video_frames_in;
}
}
}
emit_video (_last_image, _last_subtitle);
- ++_video_frames_in;
}
void
Decoder::emit_video (shared_ptr<Image> image, shared_ptr<Subtitle> sub)
{
- TIMING ("Decoder emits %1", _video_frames_out);
- Video (image, _video_frames_in, sub);
- ++_video_frames_out;
+ TIMING ("Decoder emits %1", _video_frame);
+ Video (image, _video_frame, sub);
+ ++_video_frame;
+
_last_image = image;
_last_subtitle = sub;
}
+void
+Decoder::emit_audio (shared_ptr<AudioBuffers> audio)
+{
+ Audio (audio, _audio_frame);
+ _audio_frame += audio->frames ();
+}
+
void
Decoder::process_subtitle (shared_ptr<TimedSubtitle> s)
{
}
}
-
int
Decoder::bytes_per_audio_sample () const
{
return av_get_bytes_per_sample (audio_sample_format ());
}
-
-int64_t
-Decoder::video_frames_to_audio_frames (SourceFrame v) const
-{
- return ((int64_t) v * audio_sample_rate() / frames_per_second());
-}
class Decoder
{
public:
- Decoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *, bool);
+ Decoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *);
virtual ~Decoder ();
/* Methods to query our input video */
void process_end ();
void go ();
- /** @return the number of video frames we got from the source in the last run */
- SourceFrame video_frames_in () const {
- return _video_frames_in;
+ SourceFrame video_frame () const {
+ return _video_frame;
}
virtual std::vector<AudioStream> audio_streams () const {
boost::signals2::signal<void (boost::shared_ptr<Image>, SourceFrame, boost::shared_ptr<Subtitle>)> Video;
/** Emitted when some audio data is ready */
- boost::signals2::signal<void (boost::shared_ptr<AudioBuffers>)> Audio;
+ boost::signals2::signal<void (boost::shared_ptr<AudioBuffers>, int64_t)> Audio;
protected:
/** associated Job, or 0 */
Job* _job;
- /** true to do the bare minimum of work; just run through the content. Useful for acquiring
- * accurate frame counts as quickly as possible. This generates no video or audio output.
- */
- bool _minimal;
-
private:
void emit_video (boost::shared_ptr<Image>, boost::shared_ptr<Subtitle>);
void emit_audio (boost::shared_ptr<AudioBuffers>);
- int64_t video_frames_to_audio_frames (SourceFrame v) const;
-
- SourceFrame _video_frames_in;
- SourceFrame _video_frames_out;
- int64_t _audio_frames_in;
- int64_t _audio_frames_out;
+ SourceFrame _video_frame;
+ int64_t _audio_frame;
std::list<boost::shared_ptr<FilterGraph> > _filter_graphs;
shared_ptr<Decoder>
decoder_factory (
- shared_ptr<Film> f, shared_ptr<const Options> o, Job* j, bool minimal = false
+ shared_ptr<Film> f, shared_ptr<const Options> o, Job* j
)
{
if (boost::filesystem::is_directory (f->content_path ())) {
/* Assume a directory contains TIFFs */
- return shared_ptr<Decoder> (new TIFFDecoder (f, o, j, minimal));
+ return shared_ptr<Decoder> (new TIFFDecoder (f, o, j));
}
if (f->content_type() == STILL) {
- return shared_ptr<Decoder> (new ImageMagickDecoder (f, o, j, minimal));
+ return shared_ptr<Decoder> (new ImageMagickDecoder (f, o, j));
}
- return shared_ptr<Decoder> (new FFmpegDecoder (f, o, j, minimal));
+ return shared_ptr<Decoder> (new FFmpegDecoder (f, o, j));
}
class Log;
extern boost::shared_ptr<Decoder> decoder_factory (
- boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *, bool minimal = false
+ boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *
);
#include "encoder.h"
#include "util.h"
+#include "options.h"
+using std::pair;
using namespace boost;
int const Encoder::_history_size = 25;
boost::mutex::scoped_lock lock (_history_mutex);
_just_skipped = true;
}
+
+void
+Encoder::process_video (shared_ptr<const Image> i, SourceFrame f, boost::shared_ptr<Subtitle> s)
+{
+ if (_opt->decode_video_skip != 0 && (f % _opt->decode_video_skip) != 0) {
+ return;
+ }
+
+ if (_opt->video_decode_range) {
+ pair<SourceFrame, SourceFrame> const r = _opt->video_decode_range.get();
+ if (f < r.first || f >= r.second) {
+ return;
+ }
+ }
+
+ do_process_video (i, f, s);
+}
+
+void
+Encoder::process_audio (shared_ptr<const AudioBuffers> data, int64_t f)
+{
+ if (_opt->audio_decode_range) {
+
+ shared_ptr<AudioBuffers> trimmed (new AudioBuffers (*data.get ()));
+
+ /* Range that we are encoding */
+ pair<int64_t, int64_t> required_range = _opt->audio_decode_range.get();
+ /* Range of this block of data */
+ pair<int64_t, int64_t> this_range (f, f + trimmed->frames());
+
+ if (this_range.second < required_range.first || required_range.second < this_range.first) {
+ /* No part of this audio is within the required range */
+ return;
+ } else if (required_range.first >= this_range.first && required_range.first < this_range.second) {
+ /* Trim start */
+ int64_t const shift = required_range.first - this_range.first;
+ trimmed->move (shift, 0, trimmed->frames() - shift);
+ trimmed->set_frames (trimmed->frames() - shift);
+ } else if (required_range.second >= this_range.first && required_range.second < this_range.second) {
+ /* Trim end */
+ trimmed->set_frames (required_range.second - this_range.first);
+ }
+
+ data = trimmed;
+ }
+
+ do_process_audio (data);
+}
/** Called to indicate that a processing run is about to begin */
virtual void process_begin (int64_t audio_channel_layout) = 0;
- /** Called with a frame of video.
+ /** Call with a frame of video.
* @param i Video frame image.
* @param f Frame number within the film's source.
* @param s A subtitle that should be on this frame, or 0.
*/
- virtual void process_video (boost::shared_ptr<const Image> i, SourceFrame f, boost::shared_ptr<Subtitle> s) = 0;
+ void process_video (boost::shared_ptr<const Image> i, SourceFrame f, boost::shared_ptr<Subtitle> s);
- /** Called with some audio data.
- * @param d Array of pointers to floating point sample data for each channel.
- * @param s Number of frames (ie number of samples in each channel)
- */
- virtual void process_audio (boost::shared_ptr<const AudioBuffers>) = 0;
+ /** Call with some audio data */
+ void process_audio (boost::shared_ptr<const AudioBuffers>, int64_t);
/** Called when a processing run has finished */
virtual void process_end () = 0;
SourceFrame last_frame () const;
protected:
+
+ /** Called with a frame of video.
+ * @param i Video frame image.
+ * @param f Frame number within the film's source.
+ * @param s A subtitle that should be on this frame, or 0.
+ */
+ virtual void do_process_video (boost::shared_ptr<const Image> i, SourceFrame f, boost::shared_ptr<Subtitle> s) = 0;
+
+ /** Called with some audio data */
+ virtual void do_process_audio (boost::shared_ptr<const AudioBuffers>) = 0;
+
void frame_done (SourceFrame n);
void frame_skipped ();
descend (0.5);
- _decoder = decoder_factory (_film, o, this, true);
+ _decoder = decoder_factory (_film, o, this);
_decoder->go ();
- _film->set_length (_decoder->video_frames_in());
+ _film->set_length (_decoder->video_frame());
_film->log()->log (String::compose ("Video length is %1 frames", _film->length()));
using std::stringstream;
using boost::shared_ptr;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const Options> o, Job* j, bool minimal)
- : Decoder (f, o, j, minimal)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const Options> o, Job* j)
+ : Decoder (f, o, j)
, _format_context (0)
, _video_stream (-1)
, _audio_stream (-1)
}
/* Where we are in the output, in seconds */
- double const out_pts_seconds = video_frames_in() / frames_per_second();
+ double const out_pts_seconds = video_frame() / frames_per_second();
/* Where we are in the source, in seconds */
double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
_film->log()->log (
String::compose (
"Extra frame inserted at %1s; source frame %2, source PTS %3",
- out_pts_seconds, video_frames_in(), source_pts_seconds
+ out_pts_seconds, video_frame(), source_pts_seconds
)
);
}
class FFmpegDecoder : public Decoder
{
public:
- FFmpegDecoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *, bool);
+ FFmpegDecoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *);
~FFmpegDecoder ();
/* Methods to query our input video */
o->padding = format()->dcp_padding (shared_from_this ());
o->ratio = format()->ratio_as_float (shared_from_this ());
if (dcp_length ()) {
- o->decode_range = make_pair (dcp_trim_start(), dcp_trim_start() + dcp_length().get());
+ o->video_decode_range = make_pair (dcp_trim_start(), dcp_trim_start() + dcp_length().get());
+ o->audio_decode_range = make_pair (
+ video_frames_to_audio_frames (o->video_decode_range.get().first, audio_sample_rate(), frames_per_second()),
+ video_frames_to_audio_frames (o->video_decode_range.get().second, audio_sample_rate(), frames_per_second())
+ );
+
}
o->decode_subtitles = with_subtitles ();
o->decode_video_skip = dcp_frame_rate (frames_per_second()).skip;
shared_ptr<Options> o (new Options ("", "", ""));
o->out_size = Size (1024, 1024);
- shared_ptr<Decoder> d = decoder_factory (shared_from_this(), o, 0, 0);
+ shared_ptr<Decoder> d = decoder_factory (shared_from_this(), o, 0);
set_size (d->native_size ());
set_frames_per_second (d->frames_per_second ());
{
_dci_date = boost::gregorian::day_clock::local_day ();
}
+
std::string thumb_base_for_frame (SourceFrame) const;
void signal_changed (Property);
void examine_content_finished ();
-
+
/** Complete path to directory containing the film metadata;
* must not be relative.
*/
using namespace boost;
ImageMagickDecoder::ImageMagickDecoder (
- boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j, bool minimal)
- : Decoder (f, o, j, minimal)
+ boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j)
+ : Decoder (f, o, j)
, _done (false)
{
_magick_image = new Magick::Image (_film->content_path ());
class ImageMagickDecoder : public Decoder
{
public:
- ImageMagickDecoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *, bool);
+ ImageMagickDecoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *);
float frames_per_second () const {
return static_frames_per_second ();
}
void
-ImageMagickEncoder::process_video (shared_ptr<const Image> image, SourceFrame frame, shared_ptr<Subtitle> sub)
+ImageMagickEncoder::do_process_video (shared_ptr<const Image> image, SourceFrame frame, shared_ptr<Subtitle> sub)
{
shared_ptr<Image> scaled = image->scale_and_convert_to_rgb (_opt->out_size, _opt->padding, _film->scaler());
shared_ptr<Image> compact (new CompactImage (scaled));
ImageMagickEncoder (boost::shared_ptr<const Film> f, boost::shared_ptr<const Options> o);
void process_begin (int64_t audio_channel_layout) {}
- void process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
- void process_audio (boost::shared_ptr<const AudioBuffers>) {}
void process_end () {}
+
+private:
+ void do_process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
+ void do_process_audio (boost::shared_ptr<const AudioBuffers>) {}
};
}
void
-J2KStillEncoder::process_video (shared_ptr<const Image> yuv, SourceFrame frame, shared_ptr<Subtitle> sub)
+J2KStillEncoder::do_process_video (shared_ptr<const Image> yuv, SourceFrame frame, shared_ptr<Subtitle> sub)
{
pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
DCPVideoFrame* f = new DCPVideoFrame (
J2KStillEncoder (boost::shared_ptr<const Film>, boost::shared_ptr<const Options>);
void process_begin (int64_t audio_channel_layout) {}
- void process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
- void process_audio (boost::shared_ptr<const AudioBuffers>) {}
void process_end () {}
+
+private:
+ void do_process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
+ void do_process_audio (boost::shared_ptr<const AudioBuffers>) {}
};
}
void
-J2KWAVEncoder::process_video (shared_ptr<const Image> yuv, SourceFrame frame, shared_ptr<Subtitle> sub)
+J2KWAVEncoder::do_process_video (shared_ptr<const Image> yuv, SourceFrame frame, shared_ptr<Subtitle> sub)
{
boost::mutex::scoped_lock lock (_worker_mutex);
}
void
-J2KWAVEncoder::process_audio (shared_ptr<const AudioBuffers> audio)
+J2KWAVEncoder::do_process_audio (shared_ptr<const AudioBuffers> audio)
{
shared_ptr<AudioBuffers> resampled;
~J2KWAVEncoder ();
void process_begin (int64_t audio_channel_layout);
- void process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
- void process_audio (boost::shared_ptr<const AudioBuffers>);
void process_end ();
private:
+ void do_process_video (boost::shared_ptr<const Image>, SourceFrame, boost::shared_ptr<Subtitle>);
+ void do_process_audio (boost::shared_ptr<const AudioBuffers>);
+
void write_audio (boost::shared_ptr<const AudioBuffers> audio);
void encoder_thread (ServerDescription *);
void close_sound_files ();
#include <string>
#include <iomanip>
#include <sstream>
+#include <boost/optional.hpp>
#include "util.h"
/** @class Options
bool apply_crop; ///< true to apply cropping
/** Range of video frames to decode */
- boost::optional<std::pair<SourceFrame, SourceFrame> > decode_range;
-
+ boost::optional<std::pair<SourceFrame, SourceFrame> > video_decode_range;
+ /** Range of audio frames to decode */
+ boost::optional<std::pair<int64_t, int64_t> > audio_decode_range;
+
/** Skip frames such that we don't decode any frame where (index % decode_video_skip) != 0; e.g.
* 1 for every frame, 2 for every other frame, etc.
*/
/** @param f Our Film.
* @param o Options.
* @param j Job that we are associated with, or 0.
- * @param minimal true to do the bare minimum of work; just run through the content. Useful for acquiring
- * accurate frame counts as quickly as possible. This generates no video or audio output.
*/
-TIFFDecoder::TIFFDecoder (boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j, bool minimal)
- : Decoder (f, o, j, minimal)
+TIFFDecoder::TIFFDecoder (boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j)
+ : Decoder (f, o, j)
{
string const dir = _film->content_path ();
class TIFFDecoder : public Decoder
{
public:
- TIFFDecoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *, bool);
+ TIFFDecoder (boost::shared_ptr<Film>, boost::shared_ptr<const Options>, Job *);
/* Methods to query our input video */
float frames_per_second () const;
assert (_encoder);
_decoder->Video.connect (bind (&Encoder::process_video, e, _1, _2, _3));
- _decoder->Audio.connect (bind (&Encoder::process_audio, e, _1));
+ _decoder->Audio.connect (bind (&Encoder::process_audio, e, _1, _2));
}
/** Run the decoder, passing its output to the encoder, until the decoder
{
assert (this_thread::get_id() == ui_thread);
}
+
+int64_t
+video_frames_to_audio_frames (SourceFrame v, float audio_sample_rate, float frames_per_second)
+{
+ return ((int64_t) v * audio_sample_rate / frames_per_second);
+}
float** _data;
};
+extern int64_t video_frames_to_audio_frames (SourceFrame v, float audio_sample_rate, float frames_per_second);
+
#endif
film_changed (Film::AUDIO_DELAY);
film_changed (Film::STILL_DURATION);
film_changed (Film::WITH_SUBTITLES);
+ film_changed (Film::HAS_SUBTITLES);
film_changed (Film::SUBTITLE_OFFSET);
film_changed (Film::SUBTITLE_SCALE);
film_changed (Film::USE_DCI_NAME);