if (frc.change_speed) {
t *= video_frame_rate() * frc.factor() / film->dcp_video_frame_rate();
- cout << "-> " << t << "\n";
}
return rint (t);
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
bool
FFmpegDecoder::pass ()
{
- cout << "FFmpeg::pass\n";
-
int r = av_read_frame (_format_context, &_packet);
if (r < 0) {
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
if (_decode_video) {
- decode_video_packet ();
+ while (decode_video_packet ());
}
if (_ffmpeg_content->audio_stream() && _decode_audio) {
}
}
-void
+bool
FFmpegDecoder::decode_video_packet ()
{
int frame_finished;
- while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
-
- shared_ptr<FilterGraph> graph;
-
- list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
- ++i;
- }
+ if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
+ return false;
+ }
- if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
- _filter_graphs.push_back (graph);
- _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+
+ shared_ptr<FilterGraph> graph;
+
+ list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+ while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
+ ++i;
+ }
+
+ if (i == _filter_graphs.end ()) {
+ graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+ _filter_graphs.push_back (graph);
+ _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+ } else {
+ graph = *i;
+ }
+
+ list<shared_ptr<Image> > images = graph->process (_frame);
+
+ for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet != AV_NOPTS_VALUE) {
+ /* XXX: may need to insert extra frames / remove frames here ...
+ (as per old Matcher)
+ */
+ emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
} else {
- graph = *i;
- }
-
- list<shared_ptr<Image> > images = graph->process (_frame);
-
- for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
- int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
- if (bet != AV_NOPTS_VALUE) {
- /* XXX: may need to insert extra frames / remove frames here ...
- (as per old Matcher)
- */
- emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
- } else {
- _film->log()->log ("Dropping frame without PTS");
- }
+ _film->log()->log ("Dropping frame without PTS");
}
}
+
+ return true;
}
void setup_audio ();
void setup_subtitle ();
- void decode_video_packet ();
+ bool decode_video_packet ();
void decode_audio_packet ();
void maybe_add_subtitle ();
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
_have_valid_decoders = true;
}
- cout << "-> Player::pass\n";
-
/* Here we are just finding the active decoder with the earliest last emission time, then
calling pass on it. If there is no decoder, we skip our position on until there is.
Hence this method will cause video and audio to be emitted, and it is up to the
} else if (next_wait < TIME_MAX) {
_position += next_wait;
} else {
- cout << "<- Player::pass\n";
return true;
}
- cout << "<- Player::pass\n";
return false;
}
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
{
xmlpp::Node* sub = node->add_child ("Content");
content->as_xml (sub);
- sub->add_child ("Time")->add_child_text (lexical_cast<string> (time));
+ node->add_child ("Time")->add_child_text (lexical_cast<string> (time));
}
class FrameRateCandidate
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
double const packet_time = static_cast<double> (sub.pts) / AV_TIME_BASE;
/* hence start time for this sub */
- _from = packet_time + (double (sub.start_display_time) / 1e3);
- _to = packet_time + (double (sub.end_display_time) / 1e3);
+ _from = (packet_time + (double (sub.start_display_time) / 1e3)) * TIME_HZ;
+ _to = (packet_time + (double (sub.end_display_time) / 1e3)) * TIME_HZ;
if (sub.num_rects > 1) {
throw DecodeError (_("multi-part subtitles not yet supported"));
_subtitle.reset (new Subtitle (Position (rect->x, rect->y), image));
}
-/** @param t Time in seconds from the start of the source */
+/** @param t Time from the start of the source */
bool
-TimedSubtitle::displayed_at (double t) const
+TimedSubtitle::displayed_at (Time t) const
{
return t >= _from && t <= _to;
}
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
public:
TimedSubtitle (AVSubtitle const &);
- bool displayed_at (double t) const;
+ bool displayed_at (Time) const;
boost::shared_ptr<Subtitle> subtitle () const {
return _subtitle;
private:
/** the subtitle */
boost::shared_ptr<Subtitle> _subtitle;
- /** display from time in seconds from the start of the film */
- double _from;
- /** display to time in seconds from the start of the film */
- double _to;
+ /** display from time from the start of the content */
+ Time _from;
+ /** display to time from the start of the content */
+ Time _to;
};
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
/** Called by subclasses to tell the world that some video data is ready.
* We find a subtitle then emit it for listeners.
* @param image frame to emit.
- * @param t Time of the frame within the source, in seconds.
+ * @param t Time of the frame within the source.
*/
void
-VideoDecoder::emit_video (shared_ptr<Image> image, bool same, double t)
+VideoDecoder::emit_video (shared_ptr<Image> image, bool same, Time t)
{
shared_ptr<Subtitle> sub;
if (_timed_subtitle && _timed_subtitle->displayed_at (t)) {
virtual PixelFormat pixel_format () const = 0;
- void emit_video (boost::shared_ptr<Image>, bool, double);
+ void emit_video (boost::shared_ptr<Image>, bool, Time);
void emit_subtitle (boost::shared_ptr<TimedSubtitle>);
private:
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
wxBoxSizer* b = new wxBoxSizer (wxVERTICAL);
_content_add = new wxButton (_content_panel, wxID_ANY, _("Add..."));
- b->Add (_content_add);
+ b->Add (_content_add, 1, wxEXPAND | wxLEFT | wxRIGHT);
_content_remove = new wxButton (_content_panel, wxID_ANY, _("Remove"));
- b->Add (_content_remove);
+ b->Add (_content_remove, 1, wxEXPAND | wxLEFT | wxRIGHT);
_content_properties = new wxButton (_content_panel, wxID_ANY, _("Properties..."));
b->Add (_content_properties);
_content_timeline = new wxButton (_content_panel, wxID_ANY, _("Timeline..."));
- b->Add (_content_timeline);
+ b->Add (_content_timeline, 1, wxEXPAND | wxLEFT | wxRIGHT);
s->Add (b, 0, wxALL, 4);
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
}
void
-FilmViewer::process_video (shared_ptr<const Image> image, bool, shared_ptr<Subtitle> sub, double t)
+FilmViewer::process_video (shared_ptr<const Image> image, bool, shared_ptr<Subtitle> sub, Time t)
{
_raw_frame = image;
_raw_sub = sub;
_got_frame = true;
double const fps = _film->dcp_video_frame_rate ();
- _frame->SetLabel (wxString::Format (wxT("%d"), int (rint (t * fps))));
+ _frame->SetLabel (wxString::Format (wxT("%d"), int (rint (t * fps / TIME_HZ))));
- double w = t;
+ double w = static_cast<double>(t) / TIME_HZ;
int const h = (w / 3600);
w -= h * 3600;
int const m = (w / 60);
return;
}
- cout << "-> FilmViewer::get_frame()\n";
-
try {
_got_frame = false;
while (!_got_frame) {
check_play_state ();
error_dialog (this, wxString::Format (_("Could not decode video for view (%s)"), std_to_wx(e.what()).data()));
}
-
- cout << "<- FilmViewer::get_frame()\n";
}
void
void slider_moved (wxScrollEvent &);
void play_clicked (wxCommandEvent &);
void timer (wxTimerEvent &);
- void process_video (boost::shared_ptr<const Image>, bool, boost::shared_ptr<Subtitle>, double);
+ void process_video (boost::shared_ptr<const Image>, bool, boost::shared_ptr<Subtitle>, Time);
void calculate_sizes ();
void check_play_state ();
void update_from_raw ();