Change MagickImageProxy to FFmpegImageProxy and make it use FFmpeg
[dcpomatic.git] / src / lib / dcp_decoder.cc
index cc415629b2c3654a698ed470708d07e6f6cf5839..72db5369c81e9dd162457d5636bb0c3b420b44fa 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2014-2016 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2014-2018 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
@@ -24,7 +24,8 @@
 #include "video_decoder.h"
 #include "audio_decoder.h"
 #include "j2k_image_proxy.h"
-#include "caption_decoder.h"
+#include "text_decoder.h"
+#include "ffmpeg_image_proxy.h"
 #include "image.h"
 #include "config.h"
 #include <dcp/dcp.h>
 #include <dcp/reel_picture_asset.h>
 #include <dcp/reel_sound_asset.h>
 #include <dcp/reel_subtitle_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
 #include <dcp/mono_picture_frame.h>
 #include <dcp/stereo_picture_frame.h>
 #include <dcp/sound_frame.h>
 #include <dcp/sound_asset_reader.h>
+#include <dcp/subtitle_image.h>
 #include <boost/foreach.hpp>
 #include <iostream>
 
@@ -62,9 +65,9 @@ DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, boo
        if (c->audio) {
                audio.reset (new AudioDecoder (this, c->audio, log, fast));
        }
-       if (c->caption) {
+       BOOST_FOREACH (shared_ptr<TextContent> i, c->text) {
                /* XXX: this time here should be the time of the first subtitle, not 0 */
-               caption.reset (new CaptionDecoder (this, c->caption, log, ContentTime()));
+               text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, log, ContentTime())));
        }
 
        list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
@@ -109,20 +112,22 @@ DCPDecoder::pass ()
        /* Frame within the (played part of the) reel that is coming up next */
        int64_t const frame = _next.frames_round (vfr);
 
-       /* We must emit subtitles first as when we emit the video for this frame
-          it will expect already to have the subs.
+       shared_ptr<dcp::PictureAsset> picture_asset = (*_reel)->main_picture()->asset();
+       DCPOMATIC_ASSERT (picture_asset);
+
+       /* We must emit texts first as when we emit the video for this frame
+          it will expect already to have the texts.
        */
-       pass_subtitles (_next);
+       pass_texts (_next, picture_asset->size());
 
        if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
-               shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
                int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
                if (_mono_reader) {
                        video->emit (
                                shared_ptr<ImageProxy> (
                                        new J2KImageProxy (
                                                _mono_reader->get_frame (entry_point + frame),
-                                               asset->size(),
+                                               picture_asset->size(),
                                                AV_PIX_FMT_XYZ12LE,
                                                _forced_reduction
                                                )
@@ -134,7 +139,7 @@ DCPDecoder::pass ()
                                shared_ptr<ImageProxy> (
                                        new J2KImageProxy (
                                                _stereo_reader->get_frame (entry_point + frame),
-                                               asset->size(),
+                                               picture_asset->size(),
                                                dcp::EYE_LEFT,
                                                AV_PIX_FMT_XYZ12LE,
                                                _forced_reduction
@@ -147,7 +152,7 @@ DCPDecoder::pass ()
                                shared_ptr<ImageProxy> (
                                        new J2KImageProxy (
                                                _stereo_reader->get_frame (entry_point + frame),
-                                               asset->size(),
+                                               picture_asset->size(),
                                                dcp::EYE_RIGHT,
                                                AV_PIX_FMT_XYZ12LE,
                                                _forced_reduction
@@ -190,15 +195,34 @@ DCPDecoder::pass ()
 }
 
 void
-DCPDecoder::pass_subtitles (ContentTime next)
+DCPDecoder::pass_texts (ContentTime next, dcp::Size size)
+{
+       list<shared_ptr<TextDecoder> >::const_iterator decoder = text.begin ();
+       if ((*_reel)->main_subtitle()) {
+               DCPOMATIC_ASSERT (decoder != text.end ());
+               pass_texts (
+                       next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point(), *decoder, size
+                       );
+               ++decoder;
+       }
+       BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> i, (*_reel)->closed_captions()) {
+               DCPOMATIC_ASSERT (decoder != text.end ());
+               pass_texts (
+                       next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size
+                       );
+               ++decoder;
+       }
+}
+
+void
+DCPDecoder::pass_texts (ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size)
 {
        double const vfr = _dcp_content->active_video_frame_rate ();
        /* Frame within the (played part of the) reel that is coming up next */
        int64_t const frame = next.frames_round (vfr);
 
-       if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) {
-               int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
-               list<shared_ptr<dcp::Subtitle> > subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
+       if (_decode_referenced || !reference) {
+               list<shared_ptr<dcp::Subtitle> > subs = asset->subtitles_during (
                        dcp::Time (entry_point + frame, vfr, vfr),
                        dcp::Time (entry_point + frame + 1, vfr, vfr),
                        true
@@ -209,7 +233,7 @@ DCPDecoder::pass_subtitles (ContentTime next)
                        if (is) {
                                list<dcp::SubtitleString> s;
                                s.push_back (*is);
-                               caption->emit_plain (
+                               decoder->emit_plain (
                                        ContentTimePeriod (
                                                ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
                                                ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
@@ -218,7 +242,47 @@ DCPDecoder::pass_subtitles (ContentTime next)
                                        );
                        }
 
-                       /* XXX: image subtitles */
+                       shared_ptr<dcp::SubtitleImage> ii = dynamic_pointer_cast<dcp::SubtitleImage> (i);
+                       if (ii) {
+                               FFmpegImageProxy proxy (ii->png_image());
+                               shared_ptr<Image> image = proxy.image().first;
+                               /* set up rect with height and width */
+                               dcpomatic::Rect<double> rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height));
+
+                               /* add in position */
+
+                               switch (ii->h_align()) {
+                               case dcp::HALIGN_LEFT:
+                                       rect.x += ii->h_position();
+                                       break;
+                               case dcp::HALIGN_CENTER:
+                                       rect.x += 0.5 + ii->h_position() - rect.width / 2;
+                                       break;
+                               case dcp::HALIGN_RIGHT:
+                                       rect.x += 1 - ii->h_position() - rect.width;
+                                       break;
+                               }
+
+                               switch (ii->v_align()) {
+                               case dcp::VALIGN_TOP:
+                                       rect.y += ii->v_position();
+                                       break;
+                               case dcp::VALIGN_CENTER:
+                                       rect.y += 0.5 + ii->v_position() - rect.height / 2;
+                                       break;
+                               case dcp::VALIGN_BOTTOM:
+                                       rect.y += 1 - ii->v_position() - rect.height;
+                                       break;
+                               }
+
+                               decoder->emit_bitmap (
+                                       ContentTimePeriod (
+                                               ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
+                                               ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
+                                               ),
+                                       image, rect
+                                       );
+                       }
                }
        }
 }
@@ -296,11 +360,11 @@ DCPDecoder::seek (ContentTime t, bool accurate)
                next_reel ();
        }
 
-       /* Pass subtitles in the pre-roll */
+       /* Pass texts in the pre-roll */
 
        double const vfr = _dcp_content->active_video_frame_rate ();
        for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
-               pass_subtitles (pre);
+               pass_texts (pre, (*_reel)->main_picture()->asset()->size());
                pre += ContentTime::from_frames (1, vfr);
        }