Logging improvements to allow prettier displays in the server GUI.
[dcpomatic.git] / src / lib / video_decoder.cc
index e4d5516a17ab020008cd02d7f665727d4f5db46e..88f88c1296a16c35c833c36172cc438fdaa61804 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
 
 #include "video_decoder.h"
 #include "image.h"
+#include "raw_image_proxy.h"
+#include "film.h"
+#include "log.h"
+#include "compose.hpp"
+#include <iostream>
 
 #include "i18n.h"
 
 using std::cout;
+using std::list;
+using std::max;
+using std::back_inserter;
 using boost::shared_ptr;
 using boost::optional;
 
-VideoDecoder::VideoDecoder (shared_ptr<const Film> f, shared_ptr<const VideoContent> c)
-       : Decoder (f)
+VideoDecoder::VideoDecoder (shared_ptr<const VideoContent> c)
+#ifdef DCPOMATIC_DEBUG
+       : test_gaps (0)
        , _video_content (c)
+#else
+       : _video_content (c)
+#endif
+       , _last_seek_accurate (true)
+       , _ignore_video (false)
 {
+       _black_image.reset (new Image (PIX_FMT_RGB24, _video_content->video_size(), true));
+       _black_image->make_black ();
+}
+
+list<ContentVideo>
+VideoDecoder::decoded_video (Frame frame)
+{
+       list<ContentVideo> output;
+
+       for (list<ContentVideo>::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) {
+               if (i->frame == frame) {
+                       output.push_back (*i);
+               }
+       }
+
+       return output;
+}
+
+/** Get all frames which exist in the content at a given frame index.
+ *  @param frame Frame index.
+ *  @param accurate true to try hard to return frames at the precise time that was requested, otherwise frames nearby may be returned.
+ *  @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D.
+ */
+list<ContentVideo>
+VideoDecoder::get_video (Frame frame, bool accurate)
+{
+       /* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this
+          method returned (and possibly a few more).  If the requested frame is not in _decoded_video and it is not the next
+          one after the end of _decoded_video we need to seek.
+       */
+
+       _video_content->film()->log()->log (String::compose ("VD has request for %1", frame), LogEntry::TYPE_DEBUG_DECODE);
+
+       if (_decoded_video.empty() || frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1)) {
+               seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate);
+       }
+
+       list<ContentVideo> dec;
+
+       /* Now enough pass() calls should either:
+        *  (a) give us what we want, or
+        *  (b) give us something after what we want, indicating that we will never get what we want, or
+        *  (c) hit the end of the decoder.
+        */
+       if (accurate) {
+               /* We are being accurate, so we want the right frame.
+                * This could all be one statement but it's split up for clarity.
+                */
+               while (true) {
+                       if (!decoded_video(frame).empty ()) {
+                               /* We got what we want */
+                               break;
+                       }
+
+                       if (pass ()) {
+                               /* The decoder has nothing more for us */
+                               break;
+                       }
+
+                       if (!_decoded_video.empty() && _decoded_video.front().frame > frame) {
+                               /* We're never going to get the frame we want.  Perhaps the caller is asking
+                                * for a video frame before the content's video starts (if its audio
+                                * begins before its video, for example).
+                                */
+                               break;
+                       }
+               }
+
+               dec = decoded_video (frame);
+       } else {
+               /* Any frame will do: use the first one that comes out of pass() */
+               while (_decoded_video.empty() && !pass ()) {}
+               if (!_decoded_video.empty ()) {
+                       dec.push_back (_decoded_video.front ());
+               }
+       }
+
+       /* Clean up _decoded_video; keep the frame we are returning (which may have two images
+          for 3D), but nothing before that */
+       while (!_decoded_video.empty() && _decoded_video.front().frame < dec.front().frame) {
+               _decoded_video.pop_front ();
+       }
 
+       return dec;
+}
+
+/** Fill _decoded_video from `from' up to, but not including, `to' with
+ *  a frame for one particular Eyes value (which could be EYES_BOTH,
+ *  EYES_LEFT or EYES_RIGHT)
+ */
+void
+VideoDecoder::fill_one_eye (Frame from, Frame to, Eyes eye)
+{
+       if (to == 0) {
+               /* Already OK */
+               return;
+       }
+
+       /* Fill with black... */
+       shared_ptr<const ImageProxy> filler_image (new RawImageProxy (_black_image));
+       Part filler_part = PART_WHOLE;
+
+       /* ...unless there's some video we can fill with */
+       if (!_decoded_video.empty ()) {
+               filler_image = _decoded_video.back().image;
+               filler_part = _decoded_video.back().part;
+       }
+
+       for (Frame i = from; i < to; ++i) {
+#ifdef DCPOMATIC_DEBUG
+               test_gaps++;
+#endif
+               _decoded_video.push_back (
+                       ContentVideo (filler_image, eye, filler_part, i)
+                       );
+       }
+}
+
+/** Fill _decoded_video from `from' up to, but not including, `to'
+ *  adding both left and right eye frames.
+ */
+void
+VideoDecoder::fill_both_eyes (Frame from, Frame to, Eyes eye)
+{
+       if (to == 0 && eye == EYES_LEFT) {
+               /* Already OK */
+               return;
+       }
+
+       /* Fill with black... */
+       shared_ptr<const ImageProxy> filler_left_image (new RawImageProxy (_black_image));
+       shared_ptr<const ImageProxy> filler_right_image (new RawImageProxy (_black_image));
+       Part filler_left_part = PART_WHOLE;
+       Part filler_right_part = PART_WHOLE;
+
+       /* ...unless there's some video we can fill with */
+       for (list<ContentVideo>::const_reverse_iterator i = _decoded_video.rbegin(); i != _decoded_video.rend(); ++i) {
+               if (i->eyes == EYES_LEFT && !filler_left_image) {
+                       filler_left_image = i->image;
+                       filler_left_part = i->part;
+               } else if (i->eyes == EYES_RIGHT && !filler_right_image) {
+                       filler_right_image = i->image;
+                       filler_right_part = i->part;
+               }
+
+               if (filler_left_image && filler_right_image) {
+                       break;
+               }
+       }
+
+       Frame filler_frame = from;
+       Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes;
+
+       if (_decoded_video.empty ()) {
+               filler_frame = 0;
+               filler_eye = EYES_LEFT;
+       } else if (_decoded_video.back().eyes == EYES_LEFT) {
+               filler_frame = _decoded_video.back().frame;
+               filler_eye = EYES_RIGHT;
+       } else if (_decoded_video.back().eyes == EYES_RIGHT) {
+               filler_frame = _decoded_video.back().frame + 1;
+               filler_eye = EYES_LEFT;
+       }
+
+       while (filler_frame != to || filler_eye != eye) {
+
+#ifdef DCPOMATIC_DEBUG
+               test_gaps++;
+#endif
+
+               _decoded_video.push_back (
+                       ContentVideo (
+                               filler_eye == EYES_LEFT ? filler_left_image : filler_right_image,
+                               filler_eye,
+                               filler_eye == EYES_LEFT ? filler_left_part : filler_right_part,
+                               filler_frame
+                               )
+                       );
+
+               if (filler_eye == EYES_LEFT) {
+                       filler_eye = EYES_RIGHT;
+               } else {
+                       filler_eye = EYES_LEFT;
+                       ++filler_frame;
+               }
+       }
 }
 
 /** Called by subclasses when they have a video frame ready */
 void
-VideoDecoder::video (shared_ptr<const Image> image, bool same, ContentTime time)
+VideoDecoder::video (shared_ptr<const ImageProxy> image, Frame frame)
 {
+       if (_ignore_video) {
+               return;
+       }
+
+       _video_content->film()->log()->log (String::compose ("VD receives %1", frame), LogEntry::TYPE_DEBUG_DECODE);
+
+       /* We may receive the same frame index twice for 3D, and we need to know
+          when that happens.
+       */
+       bool const same = (!_decoded_video.empty() && frame == _decoded_video.back().frame);
+
+       /* Work out what we are going to push into _decoded_video next */
+       list<ContentVideo> to_push;
        switch (_video_content->video_frame_type ()) {
        case VIDEO_FRAME_TYPE_2D:
-               _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image, EYES_BOTH, same)));
+               to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
+               break;
+       case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+               to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
                break;
        case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
-       {
-               int const half = image->size().width / 2;
-               _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, same)));
-               _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, same)));
+               to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
+               to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame));
                break;
-       }
        case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
-       {
-               int const half = image->size().height / 2;
-               _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, same)));
-               _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, same)));
+               to_push.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame));
+               to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame));
+               break;
+       case VIDEO_FRAME_TYPE_3D_LEFT:
+               to_push.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame));
+               break;
+       case VIDEO_FRAME_TYPE_3D_RIGHT:
+               to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
                break;
-       }
        default:
-               assert (false);
+               DCPOMATIC_ASSERT (false);
        }
+
+       /* Now VideoDecoder is required never to have gaps in the frames that it presents
+          via get_video().  Hence we need to fill in any gap between the last thing in _decoded_video
+          and the things we are about to push.
+       */
+
+       optional<Frame> from;
+       optional<Frame> to;
+
+       if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) {
+               from = _last_seek_time->frames_round (_video_content->video_frame_rate ());
+               to = to_push.front().frame;
+       } else if (!_decoded_video.empty ()) {
+               from = _decoded_video.back().frame + 1;
+               to = to_push.front().frame;
+       }
+
+       /* If we've pre-rolled on a seek we may now receive out-of-order frames
+          (frames before the last seek time) which we can just ignore.
+       */
+
+       if (from && to && from.get() > to.get()) {
+               return;
+       }
+
+       if (from) {
+               switch (_video_content->video_frame_type ()) {
+               case VIDEO_FRAME_TYPE_2D:
+                       fill_one_eye (from.get(), to.get (), EYES_BOTH);
+                       break;
+               case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+               case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+               case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+                       fill_both_eyes (from.get(), to.get(), to_push.front().eyes);
+                       break;
+               case VIDEO_FRAME_TYPE_3D_LEFT:
+                       fill_one_eye (from.get(), to.get (), EYES_LEFT);
+                       break;
+               case VIDEO_FRAME_TYPE_3D_RIGHT:
+                       fill_one_eye (from.get(), to.get (), EYES_RIGHT);
+               }
+       }
+
+       copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video));
+
+       /* We can't let this build up too much or we will run out of memory.  We need to allow
+          the most frames that can exist between blocks of sound in a multiplexed file.
+       */
+       DCPOMATIC_ASSERT (_decoded_video.size() <= 96);
+}
+
+void
+VideoDecoder::seek (ContentTime s, bool accurate)
+{
+       _decoded_video.clear ();
+       _last_seek_time = s;
+       _last_seek_accurate = accurate;
+}
+
+/** Set this player never to produce any video data */
+void
+VideoDecoder::set_ignore_video ()
+{
+       _ignore_video = true;
 }