X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2Flib%2Fvideo_decoder.cc;h=6a7a62b742bad5655beb6915db7de5ec0dff60b1;hb=854f2e5bbb7ffb9758b823af87034033033f3cb8;hp=16a076698eff8c652061a693dc95960f38e0cf9e;hpb=bd58b201c3e45b44e804ce040cac9e6a8c26736e;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 16a076698..6a7a62b74 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012 Carl Hetherington + Copyright (C) 2012-2014 Carl Hetherington This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -18,77 +18,120 @@ */ #include "video_decoder.h" -#include "subtitle.h" -#include "film.h" #include "image.h" -#include "log.h" -#include "options.h" -#include "job.h" +#include "content_video.h" #include "i18n.h" using std::cout; +using std::list; using boost::shared_ptr; using boost::optional; -VideoDecoder::VideoDecoder (shared_ptr f, DecodeOptions o) - : Decoder (f, o) - , _video_frame (0) - , _last_source_time (0) +VideoDecoder::VideoDecoder (shared_ptr c) + : _video_content (c) { } -/** Called by subclasses to tell the world that some video data is ready. - * We find a subtitle then emit it for listeners. - * @param image frame to emit. - * @param t Time of the frame within the source, in seconds. - */ -void -VideoDecoder::emit_video (shared_ptr image, bool same, double t) +optional +VideoDecoder::decoded_video (VideoFrame frame) { - shared_ptr sub; - if (_timed_subtitle && _timed_subtitle->displayed_at (t)) { - sub = _timed_subtitle->subtitle (); + for (list::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) { + if (i->frame == frame) { + return *i; + } } - Video (image, same, sub, t); - ++_video_frame; - - _last_source_time = t; + return optional (); } -/** Set up the current subtitle. This will be put onto frames that - * fit within its time specification. s may be 0 to say that there - * is no current subtitle. - * @param s New current subtitle, or 0. - */ -void -VideoDecoder::emit_subtitle (shared_ptr s) +optional +VideoDecoder::get_video (VideoFrame frame, bool accurate) { - _timed_subtitle = s; - - if (_timed_subtitle) { - Position const p = _timed_subtitle->subtitle()->position (); - _timed_subtitle->subtitle()->set_position (Position (p.x - _film->crop().left, p.y - _film->crop().top)); + if (_decoded_video.empty() || (frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1))) { + /* Either we have no decoded data, or what we do have is a long way from what we want: seek */ + seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate); + } + + optional dec; + + /* Now enough pass() calls will either: + * (a) give us what we want, or + * (b) hit the end of the decoder. + */ + if (accurate) { + /* We are being accurate, so we want the right frame */ + while (!decoded_video (frame) && !pass ()) {} + dec = decoded_video (frame); + } else { + /* Any frame will do: use the first one that comes out of pass() */ + while (_decoded_video.empty() && !pass ()) {} + if (!_decoded_video.empty ()) { + dec = _decoded_video.front (); + } } + + /* Clean up decoded_video */ + while (!_decoded_video.empty() && _decoded_video.front().frame < (frame - 1)) { + _decoded_video.pop_front (); + } + + return dec; } -/** Set which stream of subtitles we should use from our source. - * @param s Stream to use. - */ + +/** Called by subclasses when they have a video frame ready */ void -VideoDecoder::set_subtitle_stream (shared_ptr s) +VideoDecoder::video (shared_ptr image, VideoFrame frame) { - _subtitle_stream = s; + /* Fill in gaps */ + /* XXX: 3D */ + while (!_decoded_video.empty () && (_decoded_video.back().frame + 1) < frame) { + _decoded_video.push_back ( + ContentVideo ( + _decoded_video.back().image, + _decoded_video.back().eyes, + _decoded_video.back().frame + 1 + ) + ); + } + + switch (_video_content->video_frame_type ()) { + case VIDEO_FRAME_TYPE_2D: + _decoded_video.push_back (ContentVideo (image, EYES_BOTH, frame)); + break; + case VIDEO_FRAME_TYPE_3D_ALTERNATE: + _decoded_video.push_back (ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, frame)); + break; + case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: + { + int const half = image->size().width / 2; + _decoded_video.push_back (ContentVideo (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, frame)); + _decoded_video.push_back (ContentVideo (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, frame)); + break; + } + case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: + { + int const half = image->size().height / 2; + _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, frame)); + _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, frame)); + break; + } + case VIDEO_FRAME_TYPE_3D_LEFT: + _decoded_video.push_back (ContentVideo (image, EYES_LEFT, frame)); + break; + case VIDEO_FRAME_TYPE_3D_RIGHT: + _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, frame)); + break; + default: + assert (false); + } } void -VideoDecoder::set_progress (Job* j) const +VideoDecoder::seek (ContentTime, bool) { - assert (j); - - if (_film->length()) { - j->set_progress (float (_video_frame) / _film->length().get()); - } + _decoded_video.clear (); } +