X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=508ed90b71c71f62e874f0a763b4b19f2d9db8d7;hb=6b2c68525a3e8a6ea2152ba966f2ca1d861fed35;hp=9afbd31c46bbd4d52304b478c054fee62fa2e97d;hpb=130577d2e4e67de15ac8f5d6447729736d4bcac6;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 9afbd31c4..508ed90b7 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2016 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -35,9 +35,10 @@ using std::max; using std::back_inserter; using boost::shared_ptr; using boost::optional; +using namespace dcpomatic; -VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c, shared_ptr log) - : DecoderPart (parent, log) +VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c) + : DecoderPart (parent) , _content (c) { @@ -53,30 +54,91 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c, shared * and so on. */ void -VideoDecoder::emit (shared_ptr image, Frame frame) +VideoDecoder::emit (shared_ptr film, shared_ptr image, Frame decoder_frame) { if (ignore ()) { return; } - /* Work out what we are going to emit next */ + /* Before we `re-write' the frame indexes of these incoming data we need to check for + the case where the user has some 2D content which they have marked as 3D. With 3D + we should get two frames for each frame index, but in this `bad' case we only get + one. We need to throw an exception if this happens. + */ + + if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D) { + if (_last_threed_frames.size() > 4) { + _last_threed_frames.erase (_last_threed_frames.begin()); + } + _last_threed_frames.push_back (decoder_frame); + if (_last_threed_frames.size() == 4) { + if (_last_threed_frames[0] != _last_threed_frames[1] || _last_threed_frames[2] != _last_threed_frames[3]) { + boost::throw_exception ( + DecodeError( + String::compose( + _("The content file %1 is set as 3D but does not appear to contain 3D images. Please set it to 2D. " + "You can still make a 3D DCP from this content by ticking the 3D option in the DCP video tab."), + _content->path(0) + ) + ) + ); + } + } + } + + double const afr = _content->active_video_frame_rate(film); + + Frame frame; + Eyes eyes = EYES_BOTH; + if (!_position) { + /* This is the first data we have received since initialisation or seek. Set + the position based on the frame that was given. After this first time + we just cound frames, since (as with audio) it seems that ContentTimes + are unreliable from FFmpegDecoder. They are much better than audio times + but still we get the occasional one which is duplicated. In this case + ffmpeg seems to carry on regardless, processing the video frame as normal. + If we drop the frame with the duplicated timestamp we obviously lose sync. + */ + _position = ContentTime::from_frames (decoder_frame, afr); + if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D_ALTERNATE) { + frame = decoder_frame / 2; + _last_emitted_eyes = EYES_RIGHT; + } else { + frame = decoder_frame; + } + } else { + VideoFrameType const ft = _content->video->frame_type (); + if (ft == VIDEO_FRAME_TYPE_3D_ALTERNATE || ft == VIDEO_FRAME_TYPE_3D) { + DCPOMATIC_ASSERT (_last_emitted_eyes); + if (_last_emitted_eyes.get() == EYES_RIGHT) { + frame = _position->frames_round(afr) + 1; + eyes = EYES_LEFT; + } else { + frame = _position->frames_round(afr); + eyes = EYES_RIGHT; + } + } else { + frame = _position->frames_round(afr) + 1; + } + } + switch (_content->video->frame_type ()) { case VIDEO_FRAME_TYPE_2D: Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); break; case VIDEO_FRAME_TYPE_3D: { - /* We receive the same frame index twice for 3D; hence we know which - frame this one is. - */ - bool const same = (_last_emitted && _last_emitted.get() == frame); - Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); - _last_emitted = frame; + Data (ContentVideo (image, frame, eyes, PART_WHOLE)); + _last_emitted_frame = frame; + _last_emitted_eyes = eyes; break; } case VIDEO_FRAME_TYPE_3D_ALTERNATE: - Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + { + Data (ContentVideo (image, frame, eyes, PART_WHOLE)); + _last_emitted_eyes = eyes; break; + } case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); @@ -95,5 +157,13 @@ VideoDecoder::emit (shared_ptr image, Frame frame) DCPOMATIC_ASSERT (false); } - _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ()); + _position = ContentTime::from_frames (frame, afr); +} + +void +VideoDecoder::seek () +{ + _position = boost::optional(); + _last_emitted_frame.reset (); + _last_emitted_eyes.reset (); }