X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=edc746010f082a0558617763798544b750a940d1;hp=31dc3cdc204836e821b385b0280d2c13f2defb31;hb=a8a0dfd1b21de6c0facf965ab119833ff6f790bf;hpb=6f0a590bc3266f21ba577116219bd019e891d480 diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 31dc3cdc2..edc746010 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,29 +1,31 @@ /* - Copyright (C) 2012-2014 Carl Hetherington + Copyright (C) 2012-2016 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ #include "video_decoder.h" #include "image.h" -#include "image_proxy.h" -#include "raw_image_proxy.h" #include "raw_image_proxy.h" #include "film.h" #include "log.h" +#include "compose.hpp" +#include +#include #include "i18n.h" @@ -34,28 +36,31 @@ using std::back_inserter; using boost::shared_ptr; using boost::optional; -VideoDecoder::VideoDecoder (shared_ptr c) +VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c, shared_ptr log) #ifdef DCPOMATIC_DEBUG : test_gaps (0) - , _video_content (c) + , _parent (parent), + _content (c) #else - : _video_content (c) + : _parent (parent) + , _content (c) #endif + , _log (log) , _last_seek_accurate (true) - , _ignore_video (false) + , _ignore (false) { - _black_image.reset (new Image (PIX_FMT_RGB24, _video_content->video_size(), true)); + _black_image.reset (new Image (AV_PIX_FMT_RGB24, _content->video->size(), true)); _black_image->make_black (); } list -VideoDecoder::decoded_video (VideoFrame frame) +VideoDecoder::decoded (Frame frame) { list output; - - for (list::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) { - if (i->frame == frame) { - output.push_back (*i); + + BOOST_FOREACH (ContentVideo const & i, _decoded) { + if (i.frame.index() == frame) { + output.push_back (i); } } @@ -68,17 +73,25 @@ VideoDecoder::decoded_video (VideoFrame frame) * @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D. */ list -VideoDecoder::get_video (VideoFrame frame, bool accurate) +VideoDecoder::get (Frame frame, bool accurate) { - /* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this - method returned (and possibly a few more). If the requested frame is not in _decoded_video and it is not the next - one after the end of _decoded_video we need to seek. + if (_no_data_frame && frame >= _no_data_frame.get()) { + return list (); + } + + /* At this stage, if we have get_video()ed before, _decoded will contain the last frame that this + method returned (and possibly a few more). If the requested frame is not in _decoded and it is not the next + one after the end of _decoded we need to seek. */ - if (_decoded_video.empty() || frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1)) { - seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate); + _log->log (String::compose ("VD has request for %1", frame), LogEntry::TYPE_DEBUG_DECODE); + + if (_decoded.empty() || frame < _decoded.front().frame.index() || frame > (_decoded.back().frame.index() + 1)) { + _parent->seek (ContentTime::from_frames (frame, _content->active_video_frame_rate()), accurate); } + unsigned int const frames_wanted = _content->video->frame_type() == VIDEO_FRAME_TYPE_2D ? 1 : 2; + list dec; /* Now enough pass() calls should either: @@ -90,18 +103,21 @@ VideoDecoder::get_video (VideoFrame frame, bool accurate) /* We are being accurate, so we want the right frame. * This could all be one statement but it's split up for clarity. */ + bool no_data = false; + while (true) { - if (!decoded_video(frame).empty ()) { + if (decoded(frame).size() == frames_wanted) { /* We got what we want */ break; } - if (pass (PASS_REASON_VIDEO)) { + if (_parent->pass (Decoder::PASS_REASON_VIDEO, accurate)) { /* The decoder has nothing more for us */ + no_data = true; break; } - if (!_decoded_video.empty() && _decoded_video.front().frame > frame) { + if (!_decoded.empty() && _decoded.front().frame.index() > frame) { /* We're never going to get the frame we want. Perhaps the caller is asking * for a video frame before the content's video starts (if its audio * begins before its video, for example). @@ -110,27 +126,39 @@ VideoDecoder::get_video (VideoFrame frame, bool accurate) } } - dec = decoded_video (frame); + dec = decoded (frame); + + if (no_data && dec.empty()) { + _no_data_frame = frame; + } + } else { - /* Any frame will do: use the first one that comes out of pass() */ - while (_decoded_video.empty() && !pass (PASS_REASON_VIDEO)) {} - if (!_decoded_video.empty ()) { - dec.push_back (_decoded_video.front ()); + /* Any frame(s) will do: use the first one(s) that comes out of pass() */ + while (_decoded.size() < frames_wanted && !_parent->pass (Decoder::PASS_REASON_VIDEO, accurate)) {} + list::const_iterator i = _decoded.begin(); + unsigned int j = 0; + while (i != _decoded.end() && j < frames_wanted) { + dec.push_back (*i); + ++i; + ++j; } } - /* Clean up _decoded_video; keep the frame we are returning (which may have two images + /* Clean up _decoded; keep the frame we are returning, if any (which may have two images for 3D), but nothing before that */ - while (!_decoded_video.empty() && _decoded_video.front().frame < dec.front().frame) { - _decoded_video.pop_front (); + while (!_decoded.empty() && !dec.empty() && _decoded.front().frame.index() < dec.front().frame.index()) { + _decoded.pop_front (); } return dec; } -/** Fill _decoded_video from `from' up to, but not including, `to' */ +/** Fill _decoded from `from' up to, but not including, `to' with + * a frame for one particular Eyes value (which could be EYES_BOTH, + * EYES_LEFT or EYES_RIGHT) + */ void -VideoDecoder::fill_2d (VideoFrame from, VideoFrame to) +VideoDecoder::fill_one_eye (Frame from, Frame to, Eyes eye) { if (to == 0) { /* Already OK */ @@ -138,51 +166,43 @@ VideoDecoder::fill_2d (VideoFrame from, VideoFrame to) } /* Fill with black... */ - boost::shared_ptr filler_image (new RawImageProxy (_black_image)); + shared_ptr filler_image (new RawImageProxy (_black_image)); Part filler_part = PART_WHOLE; /* ...unless there's some video we can fill with */ - if (!_decoded_video.empty ()) { - filler_image = _decoded_video.back().image; - filler_part = _decoded_video.back().part; + if (!_decoded.empty ()) { + filler_image = _decoded.back().image; + filler_part = _decoded.back().part; } - VideoFrame filler_frame = from; - - while (filler_frame < to) { - + for (Frame i = from; i < to; ++i) { #ifdef DCPOMATIC_DEBUG test_gaps++; #endif - _decoded_video.push_back ( - ContentVideo (filler_image, EYES_BOTH, filler_part, filler_frame) + _decoded.push_back ( + ContentVideo (filler_image, VideoFrame (i, eye), filler_part) ); - - ++filler_frame; } } -/** Fill _decoded_video from `from' up to, but not including, `to' */ +/** Fill _decoded from `from' up to, but not including, `to' + * adding both left and right eye frames. + */ void -VideoDecoder::fill_3d (VideoFrame from, VideoFrame to, Eyes eye) +VideoDecoder::fill_both_eyes (VideoFrame from, VideoFrame to) { - if (to == 0 && eye == EYES_LEFT) { - /* Already OK */ - return; - } - /* Fill with black... */ - boost::shared_ptr filler_left_image (new RawImageProxy (_black_image)); - boost::shared_ptr filler_right_image (new RawImageProxy (_black_image)); + shared_ptr filler_left_image (new RawImageProxy (_black_image)); + shared_ptr filler_right_image (new RawImageProxy (_black_image)); Part filler_left_part = PART_WHOLE; Part filler_right_part = PART_WHOLE; /* ...unless there's some video we can fill with */ - for (list::const_reverse_iterator i = _decoded_video.rbegin(); i != _decoded_video.rend(); ++i) { - if (i->eyes == EYES_LEFT && !filler_left_image) { + for (list::const_reverse_iterator i = _decoded.rbegin(); i != _decoded.rend(); ++i) { + if (i->frame.eyes() == EYES_LEFT && !filler_left_image) { filler_left_image = i->image; filler_left_part = i->part; - } else if (i->eyes == EYES_RIGHT && !filler_right_image) { + } else if (i->frame.eyes() == EYES_RIGHT && !filler_right_image) { filler_right_image = i->image; filler_right_part = i->part; } @@ -192,138 +212,136 @@ VideoDecoder::fill_3d (VideoFrame from, VideoFrame to, Eyes eye) } } - VideoFrame filler_frame = from; - Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes; - - if (_decoded_video.empty ()) { - filler_frame = 0; - filler_eye = EYES_LEFT; - } else if (_decoded_video.back().eyes == EYES_LEFT) { - filler_frame = _decoded_video.back().frame; - filler_eye = EYES_RIGHT; - } else if (_decoded_video.back().eyes == EYES_RIGHT) { - filler_frame = _decoded_video.back().frame + 1; - filler_eye = EYES_LEFT; - } - - while (filler_frame != to || filler_eye != eye) { + while (from != to) { #ifdef DCPOMATIC_DEBUG test_gaps++; #endif - _decoded_video.push_back ( + _decoded.push_back ( ContentVideo ( - filler_eye == EYES_LEFT ? filler_left_image : filler_right_image, - filler_eye, - filler_eye == EYES_LEFT ? filler_left_part : filler_right_part, - filler_frame + from.eyes() == EYES_LEFT ? filler_left_image : filler_right_image, + from, + from.eyes() == EYES_LEFT ? filler_left_part : filler_right_part ) ); - if (filler_eye == EYES_LEFT) { - filler_eye = EYES_RIGHT; - } else { - filler_eye = EYES_LEFT; - ++filler_frame; - } + ++from; } } - -/** Called by subclasses when they have a video frame ready */ + +/** Called by decoder classes when they have a video frame ready */ void -VideoDecoder::video (shared_ptr image, VideoFrame frame) +VideoDecoder::give (shared_ptr image, Frame frame) { - if (_ignore_video) { + if (_ignore) { return; } - /* We may receive the same frame index twice for 3D, and we need to know - when that happens. - */ - bool const same = (!_decoded_video.empty() && frame == _decoded_video.back().frame); + _log->log (String::compose ("VD receives %1", frame), LogEntry::TYPE_DEBUG_DECODE); - /* Work out what we are going to push into _decoded_video next */ + /* Work out what we are going to push into _decoded next */ list to_push; - switch (_video_content->video_frame_type ()) { + switch (_content->video->frame_type ()) { case VIDEO_FRAME_TYPE_2D: - to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame)); + to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_BOTH), PART_WHOLE)); break; + case VIDEO_FRAME_TYPE_3D: case VIDEO_FRAME_TYPE_3D_ALTERNATE: - to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame)); + { + /* We receive the same frame index twice for 3D-alternate; hence we know which + frame this one is. + */ + bool const same = (!_decoded.empty() && frame == _decoded.back().frame.index()); + to_push.push_back (ContentVideo (image, VideoFrame (frame, same ? EYES_RIGHT : EYES_LEFT), PART_WHOLE)); break; + } case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame)); - to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame)); + to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_LEFT_HALF)); + to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_RIGHT_HALF)); break; case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - to_push.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame)); - to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame)); + to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_TOP_HALF)); + to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_BOTTOM_HALF)); break; case VIDEO_FRAME_TYPE_3D_LEFT: - to_push.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame)); + to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_WHOLE)); break; case VIDEO_FRAME_TYPE_3D_RIGHT: - to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame)); + to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_WHOLE)); break; default: DCPOMATIC_ASSERT (false); } /* Now VideoDecoder is required never to have gaps in the frames that it presents - via get_video(). Hence we need to fill in any gap between the last thing in _decoded_video + via get_video(). Hence we need to fill in any gap between the last thing in _decoded and the things we are about to push. */ - boost::optional from; - boost::optional to; - - if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) { - from = _last_seek_time->frames (_video_content->video_frame_rate ()); - to = to_push.front().frame; - } else if (!_decoded_video.empty ()) { - from = _decoded_video.back().frame + 1; - to = to_push.front().frame; + optional from; + + if (_decoded.empty() && _last_seek_time && _last_seek_accurate) { + from = VideoFrame ( + _last_seek_time->frames_round (_content->active_video_frame_rate ()), + _content->video->frame_type() == VIDEO_FRAME_TYPE_2D ? EYES_BOTH : EYES_LEFT + ); + } else if (!_decoded.empty ()) { + from = _decoded.back().frame; + ++(*from); } - /* It has been known that this method receives frames out of order; at this - point I'm not sure why, but we'll just ignore them. + /* If we've pre-rolled on a seek we may now receive out-of-order frames + (frames before the last seek time) which we can just ignore. */ - if (from && to && from.get() > to.get()) { - _video_content->film()->log()->log ( - String::compose ("Ignoring out-of-order decoded frame %1 after %2", to.get(), from.get()), Log::TYPE_WARNING - ); + if (from && (*from) > to_push.front().frame) { return; } if (from) { - if (_video_content->video_frame_type() == VIDEO_FRAME_TYPE_2D) { - fill_2d (from.get(), to.get ()); - } else { - fill_3d (from.get(), to.get(), to_push.front().eyes); + switch (_content->video->frame_type ()) { + case VIDEO_FRAME_TYPE_2D: + fill_one_eye (from->index(), to_push.front().frame.index(), EYES_BOTH); + break; + case VIDEO_FRAME_TYPE_3D: + case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: + case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: + case VIDEO_FRAME_TYPE_3D_ALTERNATE: + fill_both_eyes (from.get(), to_push.front().frame); + break; + case VIDEO_FRAME_TYPE_3D_LEFT: + fill_one_eye (from->index(), to_push.front().frame.index(), EYES_LEFT); + break; + case VIDEO_FRAME_TYPE_3D_RIGHT: + fill_one_eye (from->index(), to_push.front().frame.index(), EYES_RIGHT); + break; } } - copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video)); + copy (to_push.begin(), to_push.end(), back_inserter (_decoded)); - /* We can't let this build up too much or we will run out of memory. We need to allow - the most frames that can exist between blocks of sound in a multiplexed file. + /* We can't let this build up too much or we will run out of memory. There is a + `best' value for the allowed size of _decoded which balances memory use + with decoding efficiency (lack of seeks). Throwing away video frames here + is not a problem for correctness, so do it. */ - DCPOMATIC_ASSERT (_decoded_video.size() <= 96); + while (_decoded.size() > 96) { + _decoded.pop_back (); + } } void VideoDecoder::seek (ContentTime s, bool accurate) { - _decoded_video.clear (); + _decoded.clear (); _last_seek_time = s; _last_seek_accurate = accurate; } -/** Set this player never to produce any video data */ +/** Set this decoder never to produce any data */ void -VideoDecoder::set_ignore_video () +VideoDecoder::set_ignore () { - _ignore_video = true; + _ignore = true; }