X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=ce42f428f6711c460bc37632cf26418796f990c6;hb=b88874e34d429b4624965da09edfde3a56329e38;hp=2850b5aa031a939700bad58ff6a0e7ee2b15b614;hpb=1679c3dc40262733f46dda9f4151367bf93f2b76;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 2850b5aa0..ce42f428f 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2020 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,26 +18,31 @@ */ -#include "video_decoder.h" -#include "image.h" -#include "raw_image_proxy.h" + +#include "compose.hpp" #include "film.h" -#include "log.h" #include "frame_interval_checker.h" -#include "compose.hpp" -#include +#include "image.h" +#include "j2k_image_proxy.h" +#include "log.h" +#include "raw_image_proxy.h" +#include "video_decoder.h" #include #include "i18n.h" + +using std::back_inserter; using std::cout; +using std::dynamic_pointer_cast; using std::list; +using std::make_shared; using std::max; -using std::back_inserter; -using boost::shared_ptr; +using std::shared_ptr; using boost::optional; using namespace dcpomatic; + VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c) : DecoderPart (parent) , _content (c) @@ -46,6 +51,7 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c) } + /** Called by decoder classes when they have a video frame ready. * @param frame Frame index within the content; this does not take into account 3D * so for 3D_ALTERNATE this value goes: @@ -62,10 +68,10 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im return; } - double const afr = _content->active_video_frame_rate(film); - VideoFrameType const vft = _content->video->frame_type(); + auto const afr = _content->active_video_frame_rate(film); + auto const vft = _content->video->frame_type(); - ContentTime frame_time = ContentTime::from_frames (decoder_frame, afr); + auto frame_time = ContentTime::from_frames (decoder_frame, afr); /* Do some heuristics to try and spot the case where the user sets content to 3D * when it is not. We try to tell this by looking at the differences in time between @@ -73,7 +79,7 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im */ if (_frame_interval_checker) { _frame_interval_checker->feed (frame_time, afr); - if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VIDEO_FRAME_TYPE_3D) { + if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) { boost::throw_exception ( DecodeError( String::compose( @@ -91,7 +97,7 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im } Frame frame; - Eyes eyes = EYES_BOTH; + Eyes eyes = Eyes::BOTH; if (!_position) { /* This is the first data we have received since initialisation or seek. Set the position based on the frame that was given. After this first time @@ -101,22 +107,47 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im ffmpeg seems to carry on regardless, processing the video frame as normal. If we drop the frame with the duplicated timestamp we obviously lose sync. */ - _position = ContentTime::from_frames (decoder_frame, afr); - if (vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) { + + if (vft == VideoFrameType::THREE_D_ALTERNATE) { frame = decoder_frame / 2; - _last_emitted_eyes = EYES_RIGHT; + eyes = (decoder_frame % 1) ? Eyes::RIGHT : Eyes::LEFT; } else { frame = decoder_frame; + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy + * knows which eye it is. + */ + if (j2k && j2k->eye()) { + eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT; + } + } } + + _position = ContentTime::from_frames (frame, afr); } else { - if (vft == VIDEO_FRAME_TYPE_3D || vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) { + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + if (j2k && j2k->eye()) { + if (j2k->eye() == dcp::Eye::LEFT) { + frame = _position->frames_round(afr) + 1; + eyes = Eyes::LEFT; + } else { + frame = _position->frames_round(afr); + eyes = Eyes::RIGHT; + } + } else { + /* This should not happen; see above */ + frame = _position->frames_round(afr) + 1; + } + } else if (vft == VideoFrameType::THREE_D_ALTERNATE) { DCPOMATIC_ASSERT (_last_emitted_eyes); - if (_last_emitted_eyes.get() == EYES_RIGHT) { + if (_last_emitted_eyes.get() == Eyes::RIGHT) { frame = _position->frames_round(afr) + 1; - eyes = EYES_LEFT; + eyes = Eyes::LEFT; } else { frame = _position->frames_round(afr); - eyes = EYES_RIGHT; + eyes = Eyes::RIGHT; } } else { frame = _position->frames_round(afr) + 1; @@ -124,35 +155,29 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im } switch (vft) { - case VIDEO_FRAME_TYPE_2D: - Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); - break; - case VIDEO_FRAME_TYPE_3D: - { - Data (ContentVideo (image, frame, eyes, PART_WHOLE)); - _last_emitted_frame = frame; - _last_emitted_eyes = eyes; + case VideoFrameType::TWO_D: + case VideoFrameType::THREE_D: + Data (ContentVideo (image, frame, eyes, Part::WHOLE)); break; - } - case VIDEO_FRAME_TYPE_3D_ALTERNATE: + case VideoFrameType::THREE_D_ALTERNATE: { - Data (ContentVideo (image, frame, eyes, PART_WHOLE)); + Data (ContentVideo (image, frame, eyes, Part::WHOLE)); _last_emitted_eyes = eyes; break; } - case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); - Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); + case VideoFrameType::THREE_D_LEFT_RIGHT: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF)); + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF)); break; - case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); - Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); + case VideoFrameType::THREE_D_TOP_BOTTOM: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF)); + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF)); break; - case VIDEO_FRAME_TYPE_3D_LEFT: - Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); + case VideoFrameType::THREE_D_LEFT: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE)); break; - case VIDEO_FRAME_TYPE_3D_RIGHT: - Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); + case VideoFrameType::THREE_D_RIGHT: + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE)); break; default: DCPOMATIC_ASSERT (false); @@ -161,11 +186,11 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im _position = ContentTime::from_frames (frame, afr); } + void VideoDecoder::seek () { _position = boost::none; - _last_emitted_frame.reset (); _last_emitted_eyes.reset (); _frame_interval_checker.reset (new FrameIntervalChecker()); }