AudioDecoder::AudioDecoder (shared_ptr<const Film> f, shared_ptr<const AudioContent> c)
: Decoder (f)
+ , _next_audio (0)
, _audio_content (c)
, _output_audio_frame_rate (_audio_content->output_audio_frame_rate (f))
{
{
/* XXX: map audio to 5.1 */
- /* Maybe sample-rate convert */
+ /* Maybe resample */
if (_swr_context) {
/* Compute the resampled frames count and add 32 for luck */
}
Audio (data, time);
+
+ _next_audio = time + _film->audio_frames_to_time (data->frames());
}
void emit_audio (boost::shared_ptr<const AudioBuffers>, Time);
+protected:
+ Time _next_audio;
+
private:
boost::shared_ptr<const AudioContent> _audio_content;
SwrContext* _swr_context;
using boost::shared_ptr;
using boost::lexical_cast;
+Content::Content (Time s)
+ : _start (s)
+{
+
+}
+
Content::Content (boost::filesystem::path f)
: _file (f)
- , _time (0)
+ , _start (0)
{
}
{
_file = node->string_child ("File");
_digest = node->string_child ("Digest");
- _time = node->number_child<Time> ("Time");
+ _start = node->number_child<Time> ("Start");
}
Content::Content (Content const & o)
: boost::enable_shared_from_this<Content> (o)
, _file (o._file)
, _digest (o._digest)
- , _time (o._time)
+ , _start (o._start)
{
}
boost::mutex::scoped_lock lm (_mutex);
node->add_child("File")->add_child_text (_file.string());
node->add_child("Digest")->add_child_text (_digest);
- node->add_child("Time")->add_child_text (lexical_cast<string> (_time));
+ node->add_child("Start")->add_child_text (lexical_cast<string> (_start));
}
void
class Content : public boost::enable_shared_from_this<Content>
{
public:
+ Content (Time);
Content (boost::filesystem::path);
Content (boost::shared_ptr<const cxml::Node>);
Content (Content const &);
virtual void as_xml (xmlpp::Node *) const;
virtual boost::shared_ptr<Content> clone () const = 0;
virtual Time length (boost::shared_ptr<const Film>) const = 0;
-
+
boost::filesystem::path file () const {
boost::mutex::scoped_lock lm (_mutex);
return _file;
return _digest;
}
- Time time () const {
+ Time start () const {
boost::mutex::scoped_lock lm (_mutex);
- return _time;
+ return _start;
+ }
+
+ Time end (boost::shared_ptr<const Film> f) const {
+ return start() + length(f);
}
boost::signals2::signal<void (boost::weak_ptr<Content>, int)> Changed;
private:
boost::filesystem::path _file;
std::string _digest;
- Time _time;
+ Time _start;
};
#endif
* @brief Parent class for decoders of content.
*/
-#include <iostream>
#include "film.h"
-#include "exceptions.h"
-#include "util.h"
#include "decoder.h"
#include "i18n.h"
-using std::string;
using boost::shared_ptr;
/** @param f Film.
{
_film_connection = f->Changed.connect (bind (&Decoder::film_changed, this, _1));
}
-
-/** @return true on error */
-bool
-Decoder::seek (Time)
-{
- throw DecodeError (N_("decoder does not support seek"));
-}
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2013 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
/** @class Decoder.
* @brief Parent class for decoders of content.
- *
- * These classes can be instructed run through their content (by
- * calling ::go), and they emit signals when video or audio data is
- * ready for something else to process.
*/
class Decoder
{
Decoder (boost::shared_ptr<const Film>);
virtual ~Decoder () {}
- virtual bool pass () = 0;
- virtual bool seek (Time);
- virtual bool seek_back () {
- return true;
- }
- virtual bool seek_forward () {
- return true;
- }
+ /** Perform one decode pass of the content, which may or may not
+ * cause the object to emit some data.
+ */
+ virtual void pass () = 0;
- boost::signals2::signal<void()> OutputChanged;
+ /** Seek this decoder to as close as possible to some time,
+ * expressed relative to our source's start.
+ * @param t Time.
+ */
+ virtual void seek (Time t) {}
+
+ /** Seek back one video frame */
+ virtual void seek_back () {}
+
+ /** Seek forward one video frame */
+ virtual void seek_forward () {}
+
+ /** @return Approximate time of the next content that we will emit,
+ * expressed relative to the start of our source.
+ */
+ virtual Time next () const = 0;
protected:
- boost::shared_ptr<const Film> _film;
+
+ /** The Film that we are decoding in */
+ boost::weak_ptr<const Film> _film;
private:
+ /** This will be called when our Film emits Changed */
virtual void film_changed (Film::Property) {}
+ /** Connection to our Film */
boost::signals2::scoped_connection _film_connection;
};
using std::vector;
using std::stringstream;
using std::list;
+using std::min;
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
bool
FFmpegDecoder::pass ()
{
- cout << "ffd pass.\n";
int r = av_read_frame (_format_context, &_packet);
- cout << "A " << r << "\n";
if (r < 0) {
if (r != AVERROR_EOF) {
avcodec_get_frame_defaults (_frame);
if (_packet.stream_index == _video_stream && _decode_video) {
- cout << "dvp\n";
decode_video_packet ();
- cout << "ok.\n";
} else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) {
decode_audio_packet ();
} else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles) {
}
}
- cout << "out.\n";
av_free_packet (&_packet);
return false;
}
return _video_codec_context->pix_fmt;
}
-int
-FFmpegDecoder::time_base_numerator () const
-{
- return _video_codec_context->time_base.num;
-}
-
-int
-FFmpegDecoder::time_base_denominator () const
-{
- return _video_codec_context->time_base.den;
-}
-
-int
-FFmpegDecoder::sample_aspect_ratio_numerator () const
-{
- return _video_codec_context->sample_aspect_ratio.num;
-}
-
-int
-FFmpegDecoder::sample_aspect_ratio_denominator () const
-{
- return _video_codec_context->sample_aspect_ratio.den;
-}
-
string
FFmpegDecoder::stream_name (AVStream* s) const
{
bool
FFmpegDecoder::seek_back ()
{
- if (last_content_time() < 2.5) {
+ if (next() < 2.5) {
return true;
}
- return do_seek (last_content_time() - 2.5 * TIME_HZ / video_frame_rate(), true, true);
+ return do_seek (next() - 2.5 * TIME_HZ / video_frame_rate(), true, true);
}
bool
FFmpegDecoder::seek_forward ()
{
- if (last_content_time() >= (video_length() - video_frame_rate())) {
+ if (next() >= (video_length() - video_frame_rate())) {
return true;
}
- return do_seek (last_content_time() - 0.5 * TIME_HZ / video_frame_rate(), true, true);
+ return do_seek (next() - 0.5 * TIME_HZ / video_frame_rate(), true, true);
}
bool
{
int64_t const vt = t / (av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
- cout << "seek to " << vt << " (acc=" << accurate << ") (sec " << (vt * av_q2d (_format_context->streams[_video_stream]->time_base)) << "\n";
-
int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
avcodec_flush_buffers (_video_codec_context);
}
}
- cout << "seek ok.\n";
return r < 0;
}
FFmpegDecoder::decode_video_packet ()
{
int frame_finished;
- cout << "avc decode v2\n";
if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
return false;
}
- cout << "done that.\n";
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
- cout << "got lock.\n";
-
shared_ptr<FilterGraph> graph;
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
++i;
}
- cout << "found graph.\n";
-
if (i == _filter_graphs.end ()) {
graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
_filter_graphs.push_back (graph);
}
- cout << "pushed in.\n";
list<shared_ptr<Image> > images = graph->process (_frame);
- cout << "got " << images.size() << "\n";
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
/* XXX: may need to insert extra frames / remove frames here ...
(as per old Matcher)
*/
- cout << "emitting.\n";
- emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
- cout << "emitted.\n";
+ Time const t = bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ;
+ emit_video (*i, false, t);
} else {
_film->log()->log ("Dropping frame without PTS");
}
return true;
}
+
+Time
+FFmpegDecoder::next () const
+{
+ return min (_next_video, _next_audio);
+}
FFmpegDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const FFmpegContent>, bool video, bool audio, bool subtitles);
~FFmpegDecoder ();
+ bool pass ();
+ bool seek (Time);
+ bool seek_back ();
+ bool seek_forward ();
+ Time next () const;
+
float video_frame_rate () const;
libdcp::Size native_size () const;
ContentVideoFrame video_length () const;
- int time_base_numerator () const;
- int time_base_denominator () const;
- int sample_aspect_ratio_numerator () const;
- int sample_aspect_ratio_denominator () const;
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > subtitle_streams () const {
return _subtitle_streams;
return _audio_streams;
}
- bool seek (Time);
- bool seek_forward ();
- bool seek_back ();
- bool pass ();
-
boost::shared_ptr<const FFmpegContent> ffmpeg_content () const {
return _ffmpeg_content;
}
a << _size.width << N_(":")
<< _size.height << N_(":")
<< _pixel_format << N_(":")
- << decoder->time_base_numerator() << N_(":")
- << decoder->time_base_denominator() << N_(":")
- << decoder->sample_aspect_ratio_numerator() << N_(":")
- << decoder->sample_aspect_ratio_denominator();
+ << "0:1:0:1";
int r;
_position = f;
return false;
}
+
+Time
+ImageMagickDecoder::next () const
+{
+ return _next_video;
+}
public:
ImageMagickDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const ImageMagickContent>);
+ bool pass ();
+ bool seek (double);
+ Time next () const;
+
float video_frame_rate () const {
return 24;
}
libdcp::Size native_size () const;
ContentVideoFrame video_length () const;
- bool seek (double);
- bool pass ();
-
boost::shared_ptr<const ImageMagickContent> content () const {
return _imagemagick_content;
}
protected:
PixelFormat pixel_format () const;
- int time_base_numerator () const {
- return 0;
- }
-
- int time_base_denominator () const {
- return 0;
- }
-
- int sample_aspect_ratio_numerator () const {
- /* XXX */
- return 1;
- }
-
- int sample_aspect_ratio_denominator () const {
- /* XXX */
- return 1;
- }
-
private:
boost::shared_ptr<const ImageMagickContent> _imagemagick_content;
boost::shared_ptr<Image> _image;
using boost::weak_ptr;
using boost::dynamic_pointer_cast;
+struct Piece
+{
+ Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
+ : content (c)
+ , decoder (d)
+ {}
+
+ shared_ptr<Content> content;
+ shared_ptr<Decoder> decoder;
+};
+
Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
: _film (f)
, _playlist (p)
, _audio_buffers (MAX_AUDIO_CHANNELS, 0)
, _last_video (0)
, _last_was_black (false)
- , _last_audio (0)
+ , _next_audio (0)
{
_playlist->Changed.connect (bind (&Player::playlist_changed, this));
_playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2));
bool
Player::pass ()
{
- if (!_have_valid_decoders) {
- setup_decoders ();
- _have_valid_decoders = true;
+ if (!_have_valid_pieces) {
+ setup_pieces ();
+ _have_valid_pieces = true;
}
/* Here we are just finding the active decoder with the earliest last emission time, then
- calling pass on it. If there is no decoder, we skip our position on until there is.
- Hence this method will cause video and audio to be emitted, and it is up to the
- process_{video,audio} methods to tidy it up.
+ calling pass on it.
*/
- Time earliest_pos = TIME_MAX;
- shared_ptr<DecoderRecord> earliest;
- Time next_wait = TIME_MAX;
-
- for (list<shared_ptr<DecoderRecord> >::iterator i = _decoders.begin(); i != _decoders.end(); ++i) {
- Time const ts = (*i)->content->time();
- Time const te = (*i)->content->time() + (*i)->content->length (_film);
- if (ts <= _position && te > _position) {
- Time const pos = ts + (*i)->last;
- if (pos < earliest_pos) {
- earliest_pos = pos;
- earliest = *i;
- }
- }
+ Time earliest_t = TIME_MAX;
+ shared_ptr<Piece> earliest;
- if (ts > _position) {
- next_wait = min (next_wait, ts - _position);
- }
- }
+ for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+ if ((*i)->content->end(_film) < _position) {
+ continue;
+ }
+
+ Time const t = (*i)->content->start() + (*i)->decoder->next();
+ if (t < earliest_t) {
+ earliest_t = t;
+ earliest = *i;
+ }
+ }
- if (earliest) {
- cout << "pass on decoder...\n";
- earliest->decoder->pass ();
- _position = earliest->last;
- } else if (next_wait < TIME_MAX) {
- cout << "nw " << next_wait << " for " << _position << "\n";
- _position += next_wait;
- } else {
- return true;
- }
+ if (!earliest) {
+ return true;
+ }
+
+ earliest->decoder->pass ();
+
+ /* Move position to earliest active next emission */
+
+ for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+ if ((*i)->content->end(_film) < _position) {
+ continue;
+ }
+
+ Time const t = (*i)->content->start() + (*i)->decoder->next();
+
+ if (t < _position) {
+ _position = t;
+ }
+ }
return false;
}
void
-Player::process_video (shared_ptr<DecoderRecord> dr, shared_ptr<const Image> image, bool same, shared_ptr<Subtitle> sub, Time time)
+Player::process_video (shared_ptr<Piece> piece, shared_ptr<const Image> image, bool same, shared_ptr<Subtitle> sub, Time time)
{
- shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> (dr->decoder);
-
- Time const global_time = dr->content->time() + time;
- cout << "need to fill in " << global_time << " vs " << _last_video << "\n";
- while ((global_time - _last_video) > 1) {
- /* Fill in with black */
- cout << "(b)\n";
- emit_black_frame ();
- }
-
- Video (image, same, sub, global_time);
- dr->last = time;
- _last_video = global_time;
- _last_was_black = false;
+ time += piece->start ();
+
+ Video (image, same, sub, time);
}
void
-Player::process_audio (shared_ptr<DecoderRecord> dr, shared_ptr<const AudioBuffers> audio, Time time)
+Player::process_audio (shared_ptr<Piece> piece, shared_ptr<const AudioBuffers> audio, Time time)
{
/* XXX: mapping */
be added to any more, so it can be emitted.
*/
- if (time > _last_audio) {
+ time += piece->start ();
+
+ if (time > _next_audio) {
/* We can emit some audio from our buffers */
- OutputAudioFrame const N = min (_film->time_to_audio_frames (time - _last_audio), static_cast<OutputAudioFrame> (_audio_buffers.frames()));
+ OutputAudioFrame const N = min (_film->time_to_audio_frames (time - _next_audio), static_cast<OutputAudioFrame> (_audio_buffers.frames()));
shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), N));
emit->copy_from (&_audio_buffers, N, 0, 0);
- Audio (emit, _last_audio);
- _last_audio += _film->audio_frames_to_time (N);
+ Audio (emit, _next_audio);
+ _next_audio += _film->audio_frames_to_time (N);
/* And remove it from our buffers */
if (_audio_buffers.frames() > N) {
}
/* Now accumulate the new audio into our buffers */
-
- if (_audio_buffers.frames() == 0) {
- /* We have no remaining data. Emit silence up to the start of this new data */
- if ((time - _last_audio) > 0) {
- emit_silence (time - _last_audio);
- }
- }
-
- _audio_buffers.ensure_size (time - _last_audio + audio->frames());
- _audio_buffers.accumulate (audio.get(), 0, _film->time_to_audio_frames (time - _last_audio));
- dr->last = time + _film->audio_frames_to_time (audio->frames ());
+ _audio_buffers.ensure_size (time - _next_audio + audio->frames());
+ _audio_buffers.accumulate (audio.get(), 0, _film->time_to_audio_frames (time - _next_audio));
}
/** @return true on error */
bool
Player::seek (Time t)
{
- if (!_have_valid_decoders) {
- setup_decoders ();
- _have_valid_decoders = true;
+ if (!_have_valid_pieces) {
+ setup_pieces ();
+ _have_valid_pieces = true;
}
- if (_decoders.empty ()) {
+ if (_pieces.empty ()) {
return true;
}
- cout << "seek to " << t << "\n";
-
- Time current_time = 0;
- shared_ptr<VideoDecoder> current;
- for (list<shared_ptr<DecoderRecord> >::iterator i = _decoders.begin(); i != _decoders.end(); ++i) {
- shared_ptr<VideoDecoder> v = dynamic_pointer_cast<VideoDecoder> ((*i)->decoder);
- if (!v) {
- continue;
- }
-
- if ((*i)->content->time() < t && (*i)->content->time() >= current_time) {
- current_time = (*i)->content->time();
- current = v;
- }
- }
-
- if (current) {
- cout << "got a decoder to seek to " << (t - current_time) << ".\n";
- current->seek (t - current_time);
- _position = t;
- _last_video = t;
- }
-
/* XXX: don't seek audio because we don't need to... */
return false;
/* XXX */
}
+struct ContentSorter
+{
+ bool operator() (shared_ptr<Content> a, shared_ptr<Content> b)
+ {
+ return a->time() < b->time();
+ }
+};
void
Player::setup_decoders ()
{
- list<shared_ptr<DecoderRecord> > old_decoders = _decoders;
+ list<shared_ptr<Piece> > old_pieces = _pieces;
- _decoders.clear ();
+ _pieces.clear ();
Playlist::ContentList content = _playlist->content ();
+ content.sort (ContentSorter ());
+
for (Playlist::ContentList::iterator i = content.begin(); i != content.end(); ++i) {
- shared_ptr<DecoderRecord> dr (new DecoderRecord);
- dr->content = *i;
+ shared_ptr<Decoder> decoder;
/* XXX: into content? */
fd->Video.connect (bind (&Player::process_video, this, dr, _1, _2, _3, _4));
fd->Audio.connect (bind (&Player::process_audio, this, dr, _1, _2));
- dr->decoder = fd;
+ decoder = fd;
}
shared_ptr<const ImageMagickContent> ic = dynamic_pointer_cast<const ImageMagickContent> (*i);
shared_ptr<ImageMagickDecoder> id;
/* See if we can re-use an old ImageMagickDecoder */
- for (list<shared_ptr<DecoderRecord> >::const_iterator i = old_decoders.begin(); i != old_decoders.end(); ++i) {
- shared_ptr<ImageMagickDecoder> imd = dynamic_pointer_cast<ImageMagickDecoder> ((*i)->decoder);
- if (imd && imd->content() == ic) {
- id = imd;
+ for (list<shared_ptr<Piece> >::const_iterator i = old_pieces.begin(); i != old_pieces.end(); ++i) {
+ shared_ptr<ContentPiece> cp = dynamic_pointer_cast<ContentPiece> (*i);
+ if (cp) {
+ shared_ptr<ImageMagickDecoder> imd = dynamic_pointer_cast<ImageMagickDecoder> (cp->decoder ());
+ if (imd && imd->content() == ic) {
+ id = imd;
+ }
}
}
id->Video.connect (bind (&Player::process_video, this, dr, _1, _2, _3, _4));
}
- dr->decoder = id;
+ decoder = id;
}
shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
sd->Audio.connect (bind (&Player::process_audio, this, dr, _1, _2));
- dr->decoder = sd;
+ decoder = sd;
}
- _decoders.push_back (dr);
+ _pieces.push_back (shared_ptr<new ContentPiece> (*i, decoder));
+ }
+
+ /* Fill in visual gaps with black and audio gaps with silence */
+
+ Time video_pos = 0;
+ Time audio_pos = 0;
+ list<shared_ptr<Piece> > pieces_copy = _pieces;
+ for (list<shared_ptr<Piece> >::iterator i = pieces_copy.begin(); i != pieces_copy.end(); ++i) {
+ if (dynamic_pointer_cast<VideoContent> ((*i)->content)) {
+ Time const diff = video_pos - (*i)->content->time();
+ if (diff > 0) {
+ _pieces.push_back (
+ shared_ptr<Piece> (
+ shared_ptr<Content> (new NullContent (video_pos, diff)),
+ shared_ptr<Decoder> (new BlackDecoder (video_pos, diff))
+ )
+ );
+ }
+
+ video_pos = (*i)->content->time() + (*i)->content->length();
+ } else {
+ Time const diff = audio_pos - (*i)->content->time();
+ if (diff > 0) {
+ _pieces.push_back (
+ shared_ptr<Piece> (
+ shared_ptr<Content> (new NullContent (audio_pos, diff)),
+ shared_ptr<Decoder> (new SilenceDecoder (audio_pos, diff))
+ )
+ );
+ }
+ audio_pos = (*i)->content->time() + (*i)->content->length();
+ }
}
_position = 0;
}
if (p == VideoContentProperty::VIDEO_LENGTH) {
- _have_valid_decoders = false;
+ _have_valid_pieces = false;
}
}
void
Player::playlist_changed ()
{
- _have_valid_decoders = false;
+ _have_valid_pieces = false;
}
void
class Film;
class Playlist;
class AudioContent;
-class Decoder;
+class Piece;
/** @class Player
* @brief A class which can `play' a Playlist; emitting its audio and video.
private:
- struct DecoderRecord
- {
- DecoderRecord ()
- : last (0)
- {}
-
- boost::shared_ptr<Content> content;
- boost::shared_ptr<Decoder> decoder;
- Time last;
- };
-
- void process_video (boost::shared_ptr<DecoderRecord>, boost::shared_ptr<const Image>, bool, boost::shared_ptr<Subtitle>, Time);
- void process_audio (boost::shared_ptr<DecoderRecord>, boost::shared_ptr<const AudioBuffers>, Time);
- void setup_decoders ();
+ void process_video (boost::shared_ptr<Piece>, boost::shared_ptr<const Image>, bool, boost::shared_ptr<Subtitle>, Time);
+ void process_audio (boost::shared_ptr<Piece>, boost::shared_ptr<const AudioBuffers>, Time);
+ void setup_pieces ();
void playlist_changed ();
void content_changed (boost::weak_ptr<Content>, int);
void emit_black_frame ();
bool _audio;
bool _subtitles;
- /** Our decoders are ready to go; if this is false the decoders must be (re-)created before they are used */
- bool _have_valid_decoders;
- std::list<boost::shared_ptr<DecoderRecord> > _decoders;
+ /** Our pieces are ready to go; if this is false the pieces must be (re-)created before they are used */
+ bool _have_valid_pieces;
+ std::list<boost::shared_ptr<Piece> > _pieces;
- /* XXX: position and last_video? Need both? */
+ /** Time of the earliest thing not yet to have been emitted */
Time _position;
+ Time _last_black;
+ Time _last_silence;
+
+ /* XXX: position and last_video? Need both? */
AudioBuffers _audio_buffers;
Time _last_video;
bool _last_was_black;
- Time _last_audio;
+ Time _next_audio;
};
#endif
{
return _info.samplerate;
}
+
+Time
+SndfileDecoder::next () const
+{
+ return _next_audio;
+}
~SndfileDecoder ();
bool pass ();
+ Time next ();
int audio_channels () const;
ContentAudioFrame audio_length () const;
class VideoContent : public virtual Content
{
public:
+ VideoContent (Time);
VideoContent (boost::filesystem::path);
VideoContent (boost::shared_ptr<const cxml::Node>);
VideoContent (VideoContent const &);
VideoDecoder::VideoDecoder (shared_ptr<const Film> f, shared_ptr<const VideoContent> c)
: Decoder (f)
+ , _next_video (0)
, _video_content (c)
- , _video_frame (0)
- , _last_content_time (0)
{
}
Video (image, same, sub, t);
++_video_frame;
- _last_content_time = t;
+ /* XXX: who's doing skip / repeat? */
+ _next_video = t + _film->video_frames_to_time (1);
}
/** Set up the current subtitle. This will be put onto frames that
_timed_subtitle->subtitle()->set_position (Position (p.x - _video_content->crop().left, p.y - _video_content->crop().top));
}
}
-
-void
-VideoDecoder::set_progress (Job* j) const
-{
- assert (j);
-
- if (_film->length()) {
- j->set_progress (float (_video_frame) / _film->time_to_video_frames (_film->length()));
- }
-}
public:
VideoDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const VideoContent>);
+ /* Calls for VideoContent to find out about itself */
+
/** @return video frame rate second, or 0 if unknown */
virtual float video_frame_rate () const = 0;
- /** @return native size in pixels */
- virtual libdcp::Size native_size () const = 0;
+ /** @return video size in pixels */
+ virtual libdcp::Size video_size () const = 0;
/** @return length according to our content's header */
virtual ContentVideoFrame video_length () const = 0;
- virtual int time_base_numerator () const = 0;
- virtual int time_base_denominator () const = 0;
- virtual int sample_aspect_ratio_numerator () const = 0;
- virtual int sample_aspect_ratio_denominator () const = 0;
-
- void set_progress (Job *) const;
-
- int video_frame () const {
- return _video_frame;
- }
-
- Time last_content_time () const {
- return _last_content_time;
- }
-
protected:
- virtual PixelFormat pixel_format () const = 0;
-
void emit_video (boost::shared_ptr<Image>, bool, Time);
void emit_subtitle (boost::shared_ptr<TimedSubtitle>);
+ Time _next_video;
+
private:
boost::shared_ptr<const VideoContent> _video_content;
- int _video_frame;
- Time _last_content_time;
boost::shared_ptr<TimedSubtitle> _timed_subtitle;
};
* First parameter is the video image.
* Second parameter is true if the image is the same as the last one that was emitted.
* Third parameter is either 0 or a subtitle that should be on this frame.
+ * Fourth parameter is the time relative to the start of this source's content.
*/
boost::signals2::signal<void (boost::shared_ptr<const Image>, bool, boost::shared_ptr<Subtitle>, Time)> Video;
return;
}
- cout << "getting frame...\n";
-
try {
_got_frame = false;
while (!_got_frame) {
- cout << "pass...\n";
if (_player->pass ()) {
- cout << "passed.\n";
/* We didn't get a frame before the decoder gave up,
so clear our display frame.
*/
_display_frame.reset ();
break;
}
- cout << "passed.\n";
}
} catch (DecodeError& e) {
_play_button->SetValue (false);
check_play_state ();
error_dialog (this, wxString::Format (_("Could not decode video for view (%s)"), std_to_wx(e.what()).data()));
}
-
- cout << "...got frame.\n";
}
void