Updated cs_CZ translation from Tomáš Begeni.
[dcpomatic.git] / src / lib / video_decoder.cc
index cab8197cb75420eb2ba29e88ab51dfaaa7c68cdf..cf21f885a3fbc6af45ba2efde1cfc18ad645eb9b 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2012-2020 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
 
 */
 
-#include "video_decoder.h"
-#include "image.h"
-#include "raw_image_proxy.h"
+
+#include "compose.hpp"
 #include "film.h"
-#include "log.h"
 #include "frame_interval_checker.h"
-#include "compose.hpp"
+#include "image.h"
+#include "j2k_image_proxy.h"
+#include "log.h"
+#include "raw_image_proxy.h"
+#include "video_decoder.h"
 #include <iostream>
 
 #include "i18n.h"
 
+
 using std::cout;
-using std::list;
-using std::max;
-using std::back_inserter;
+using std::dynamic_pointer_cast;
 using std::shared_ptr;
-using boost::optional;
 using namespace dcpomatic;
 
+
 VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c)
        : DecoderPart (parent)
        , _content (c)
@@ -45,6 +46,7 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c)
 
 }
 
+
 /** Called by decoder classes when they have a video frame ready.
  *  @param frame Frame index within the content; this does not take into account 3D
  *  so for 3D_ALTERNATE this value goes:
@@ -61,10 +63,10 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
                return;
        }
 
-       double const afr = _content->active_video_frame_rate(film);
-       VideoFrameType const vft = _content->video->frame_type();
+       auto const afr = _content->active_video_frame_rate(film);
+       auto const vft = _content->video->frame_type();
 
-       ContentTime frame_time = ContentTime::from_frames (decoder_frame, afr);
+       auto frame_time = ContentTime::from_frames (decoder_frame, afr);
 
        /* Do some heuristics to try and spot the case where the user sets content to 3D
         * when it is not.  We try to tell this by looking at the differences in time between
@@ -72,7 +74,7 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
         */
        if (_frame_interval_checker) {
                _frame_interval_checker->feed (frame_time, afr);
-               if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VIDEO_FRAME_TYPE_3D) {
+               if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) {
                        boost::throw_exception (
                                DecodeError(
                                        String::compose(
@@ -90,32 +92,57 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
        }
 
        Frame frame;
-       Eyes eyes = EYES_BOTH;
+       Eyes eyes = Eyes::BOTH;
        if (!_position) {
                /* This is the first data we have received since initialisation or seek.  Set
                   the position based on the frame that was given.  After this first time
-                  we just cound frames, since (as with audio) it seems that ContentTimes
+                  we just count frames, since (as with audio) it seems that ContentTimes
                   are unreliable from FFmpegDecoder.  They are much better than audio times
                   but still we get the occasional one which is duplicated.  In this case
                   ffmpeg seems to carry on regardless, processing the video frame as normal.
                   If we drop the frame with the duplicated timestamp we obviously lose sync.
                */
-               _position = ContentTime::from_frames (decoder_frame, afr);
-               if (vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+
+               if (vft == VideoFrameType::THREE_D_ALTERNATE) {
                        frame = decoder_frame / 2;
-                       _last_emitted_eyes = EYES_RIGHT;
+                       eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
                } else {
                        frame = decoder_frame;
+                       if (vft == VideoFrameType::THREE_D) {
+                               auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
+                               /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy
+                                * knows which eye it is.
+                                */
+                               if (j2k && j2k->eye()) {
+                                       eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
+                               }
+                       }
                }
+
+               _position = ContentTime::from_frames (frame, afr);
        } else {
-               if (vft == VIDEO_FRAME_TYPE_3D || vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+               if (vft == VideoFrameType::THREE_D) {
+                       auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
+                       if (j2k && j2k->eye()) {
+                               if (j2k->eye() == dcp::Eye::LEFT) {
+                                       frame = _position->frames_round(afr) + 1;
+                                       eyes = Eyes::LEFT;
+                               } else {
+                                       frame = _position->frames_round(afr);
+                                       eyes = Eyes::RIGHT;
+                               }
+                       } else {
+                               /* This should not happen; see above */
+                               frame = _position->frames_round(afr) + 1;
+                       }
+               } else if (vft == VideoFrameType::THREE_D_ALTERNATE) {
                        DCPOMATIC_ASSERT (_last_emitted_eyes);
-                       if (_last_emitted_eyes.get() == EYES_RIGHT) {
+                       if (_last_emitted_eyes.get() == Eyes::RIGHT) {
                                frame = _position->frames_round(afr) + 1;
-                               eyes = EYES_LEFT;
+                               eyes = Eyes::LEFT;
                        } else {
                                frame = _position->frames_round(afr);
-                               eyes = EYES_RIGHT;
+                               eyes = Eyes::RIGHT;
                        }
                } else {
                        frame = _position->frames_round(afr) + 1;
@@ -123,35 +150,29 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
        }
 
        switch (vft) {
-       case VIDEO_FRAME_TYPE_2D:
-               Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
-               break;
-       case VIDEO_FRAME_TYPE_3D:
-       {
-               Data (ContentVideo (image, frame, eyes, PART_WHOLE));
-               _last_emitted_frame = frame;
-               _last_emitted_eyes = eyes;
+       case VideoFrameType::TWO_D:
+       case VideoFrameType::THREE_D:
+               Data (ContentVideo (image, frame, eyes, Part::WHOLE));
                break;
-       }
-       case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+       case VideoFrameType::THREE_D_ALTERNATE:
        {
-               Data (ContentVideo (image, frame, eyes, PART_WHOLE));
+               Data (ContentVideo (image, frame, eyes, Part::WHOLE));
                _last_emitted_eyes = eyes;
                break;
        }
-       case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
-               Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
-               Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
+       case VideoFrameType::THREE_D_LEFT_RIGHT:
+               Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF));
+               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF));
                break;
-       case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
-               Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
-               Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
+       case VideoFrameType::THREE_D_TOP_BOTTOM:
+               Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF));
+               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF));
                break;
-       case VIDEO_FRAME_TYPE_3D_LEFT:
-               Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
+       case VideoFrameType::THREE_D_LEFT:
+               Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE));
                break;
-       case VIDEO_FRAME_TYPE_3D_RIGHT:
-               Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
+       case VideoFrameType::THREE_D_RIGHT:
+               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE));
                break;
        default:
                DCPOMATIC_ASSERT (false);
@@ -160,11 +181,11 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
        _position = ContentTime::from_frames (frame, afr);
 }
 
+
 void
 VideoDecoder::seek ()
 {
        _position = boost::none;
-       _last_emitted_frame.reset ();
        _last_emitted_eyes.reset ();
        _frame_interval_checker.reset (new FrameIntervalChecker());
 }