Expand Player to support main and sign language video types.
authorCarl Hetherington <cth@carlh.net>
Wed, 19 Oct 2022 22:27:15 +0000 (00:27 +0200)
committerCarl Hetherington <cth@carlh.net>
Tue, 2 Jul 2024 23:04:28 +0000 (01:04 +0200)
13 files changed:
src/lib/butler.cc
src/lib/dcp_film_encoder.cc
src/lib/player.cc
src/lib/player.h
src/lib/player_video.cc
src/lib/player_video.h
src/lib/util.cc
src/lib/util.h
test/client_server_test.cc
test/j2k_encoder_test.cc
test/low_bitrate_test.cc
test/overlap_video_test.cc
test/player_test.cc

index dd98745878a1aadac6fb8699c6ee4ab8ceabb5bf..fbbd84408300aedf1a8d9ad33bfc9cca2691763b 100644 (file)
@@ -410,7 +410,7 @@ Butler::player_change (ChangeType type, int property)
                if (type == ChangeType::DONE) {
                        auto film = _film.lock();
                        if (film) {
-                               _video.reset_metadata(film, _player.video_container_size());
+                               _video.reset_metadata(film, _player.video_container_size(VideoType::MAIN));
                        }
                }
                return;
index 83da57756b65de252b5b2228a5b514612fc6e271..60b2198caae895bc4fa2df8c4743ea8fc65647a6 100644 (file)
@@ -156,12 +156,16 @@ DCPFilmEncoder::resume()
        _encoder->resume();
 }
 
+
 void
 DCPFilmEncoder::video(shared_ptr<PlayerVideo> data, DCPTime time)
 {
-       _encoder->encode(data, time);
+       if (data->type() == VideoType::MAIN) {
+               _encoder->encode(data, time);
+       }
 }
 
+
 void
 DCPFilmEncoder::audio(shared_ptr<AudioBuffers> data, DCPTime time)
 {
index 14cd959065f4f7cb737eb71c7153d6ba7bf1c5d3..41c9d4cfe858ccaed30a8f6c515b137ba14ff645 100644 (file)
@@ -28,6 +28,7 @@
 #include "config.h"
 #include "content_audio.h"
 #include "content_video.h"
+#include "constants.h"
 #include "dcp_content.h"
 #include "dcp_decoder.h"
 #include "dcpomatic_log.h"
@@ -132,14 +133,32 @@ Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist
 }
 
 
+dcp::Size
+Player::full_size(VideoType type) const
+{
+       switch (type) {
+       case VideoType::MAIN:
+       {
+               auto film = _film.lock();
+               DCPOMATIC_ASSERT(film);
+               return film->frame_size();
+       }
+       case VideoType::SIGN_LANGUAGE:
+               return { 480, 640 };
+       default:
+               DCPOMATIC_ASSERT(false);
+       }
+}
+
+
 void
 Player::construct ()
 {
-       auto film = _film.lock();
-       DCPOMATIC_ASSERT(film);
-
        connect();
-       set_video_container_size(film->frame_size());
+       set_video_container_size(VideoType::MAIN, full_size(VideoType::MAIN));
+       set_video_container_size(VideoType::SIGN_LANGUAGE, full_size(VideoType::SIGN_LANGUAGE));
+
+       _black_image[VideoType::SIGN_LANGUAGE] = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size(SIGN_LANGUAGE_WIDTH, SIGN_LANGUAGE_HEIGHT), Image::Alignment::PADDED);
 
        film_change(ChangeType::DONE, FilmProperty::AUDIO_PROCESSOR);
 
@@ -168,7 +187,7 @@ Player::Player(Player&& other)
        , _playlist(std::move(other._playlist))
        , _suspended(other._suspended.load())
        , _pieces(std::move(other._pieces))
-       , _video_container_size(other._video_container_size.load())
+       , _video_container_size(other._video_container_size)
        , _black_image(std::move(other._black_image))
        , _ignore_video(other._ignore_video.load())
        , _ignore_audio(other._ignore_audio.load())
@@ -208,7 +227,7 @@ Player::operator=(Player&& other)
        _playlist = std::move(other._playlist);
        _suspended = other._suspended.load();
        _pieces = std::move(other._pieces);
-       _video_container_size = other._video_container_size.load();
+       _video_container_size = other._video_container_size;
        _black_image = std::move(other._black_image);
        _ignore_video = other._ignore_video.load();
        _ignore_audio = other._ignore_audio.load();
@@ -240,9 +259,12 @@ Player::operator=(Player&& other)
 
 
 bool
-have_video (shared_ptr<const Content> content)
+have_video(shared_ptr<const Content> content, VideoType type)
 {
-       return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
+       return static_cast<bool>(content->video)
+               && content->video->use()
+               && content->can_be_played()
+               && content->video->type() == type;
 }
 
 
@@ -406,10 +428,15 @@ Player::setup_pieces ()
                }
        }
 
-       _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
+       _have_sign_language = contains_sign_language(playlist_content);
+
+       _black[VideoType::MAIN] = Empty(film, playlist(), bind(&have_video, _1, VideoType::MAIN), _playback_length);
+       if (_have_sign_language) {
+               _black[VideoType::SIGN_LANGUAGE] = Empty(film, playlist(), bind(&have_video, _1, VideoType::SIGN_LANGUAGE), _playback_length);
+       }
        _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
 
-       _next_video_time = boost::none;
+       _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = boost::none;
        _next_audio_time = boost::none;
 }
 
@@ -426,7 +453,7 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
                if (type == ChangeType::DONE) {
                        boost::mutex::scoped_lock lm (_mutex);
                        for (auto const& i: _delay) {
-                               i.first->reset_metadata(film, _video_container_size);
+                               i.first->reset_metadata(film, video_container_size(VideoType::MAIN));
                        }
                }
        } else {
@@ -450,21 +477,22 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
 
 
 void
-Player::set_video_container_size (dcp::Size s)
+Player::set_video_container_size(VideoType type, dcp::Size size)
 {
        ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
 
-       if (s == _video_container_size) {
+       if (size == video_container_size(type)) {
                cc.abort();
                return;
        }
 
-       _video_container_size = s;
+       boost::mutex::scoped_lock lm(_video_container_size_mutex);
+       _video_container_size[type] = size;
 
        {
                boost::mutex::scoped_lock lm(_black_image_mutex);
-               _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
-               _black_image->make_black ();
+               _black_image[type] = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size[type], Image::Alignment::PADDED);
+               _black_image[type]->make_black();
        }
 }
 
@@ -517,16 +545,19 @@ Player::film_change(ChangeType type, FilmProperty p)
 
 
 shared_ptr<PlayerVideo>
-Player::black_player_video_frame (Eyes eyes) const
+Player::black_player_video_frame(VideoType type, Eyes eyes) const
 {
        boost::mutex::scoped_lock lm(_black_image_mutex);
 
+       auto const image = _black_image[type];
+
        return std::make_shared<PlayerVideo> (
-               make_shared<const RawImageProxy>(_black_image),
+               make_shared<const RawImageProxy>(image),
                Crop(),
                optional<double>(),
-               _video_container_size,
-               _video_container_size,
+               image->size(),
+               image->size(),
+               type,
                eyes,
                Part::WHOLE,
                PresetColourConversion::all().front().conversion,
@@ -733,7 +764,8 @@ Player::pass ()
 
        if (_playback_length.load() == DCPTime() || !film) {
                /* Special; just give one black frame */
-               use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
+               use_video(black_player_video_frame(VideoType::MAIN, Eyes::BOTH), DCPTime(), one_video_frame());
+               use_video(black_player_video_frame(VideoType::SIGN_LANGUAGE, Eyes::BOTH), DCPTime(), one_video_frame());
                return true;
        }
 
@@ -752,14 +784,22 @@ Player::pass ()
                SILENT
        } which = NONE;
 
+       optional<VideoType> black_type;
+
        if (earliest_content) {
                which = CONTENT;
        }
 
-       if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
-               earliest_time = _black.position ();
-               which = BLACK;
-       }
+       auto check_black = [this, &earliest_time, &which, &black_type](VideoType type) {
+               if (!_black[type].done() && !_ignore_video && (!earliest_time || _black[type].position() < *earliest_time)) {
+                       earliest_time = _black[type].position();
+                       which = BLACK;
+                       black_type = type;
+               }
+       };
+
+       check_black(VideoType::MAIN);
+       check_black(VideoType::SIGN_LANGUAGE);
 
        if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
                earliest_time = _silent.position ();
@@ -774,7 +814,7 @@ Player::pass ()
                auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced) {
                        if (dcp->reference_video()) {
-                               _next_video_time = dcp->end(film);
+                               _next_video_time[earliest_content->content->video->type()] = dcp->end(film);
                        }
                        if (dcp->reference_audio()) {
                                /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
@@ -787,21 +827,25 @@ Player::pass ()
                break;
        }
        case BLACK:
-               LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
-               if (!_next_video_time) {
+       {
+               DCPOMATIC_ASSERT(black_type);
+               auto& black = _black[*black_type];
+               LOG_DEBUG_PLAYER("Emit black for gap at %1", to_string(black.position()));
+               if (!_next_video_time[*black_type]) {
                        /* Deciding to emit black has the same effect as getting some video from the content
                         * when we are inaccurately seeking.
                         */
-                       _next_video_time = _black.position();
+                       _next_video_time[*black_type] = black.position();
                }
                if (film->three_d()) {
-                       use_video(black_player_video_frame(Eyes::LEFT), _black.position(), _black.period_at_position().to);
-                       use_video(black_player_video_frame(Eyes::RIGHT), _black.position(), _black.period_at_position().to);
+                       use_video(black_player_video_frame(*black_type, Eyes::LEFT), black.position(), black.period_at_position().to);
+                       use_video(black_player_video_frame(*black_type, Eyes::RIGHT), black.position(), black.period_at_position().to);
                } else {
-                       use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
+                       use_video(black_player_video_frame(*black_type, Eyes::BOTH), black.position(), black.period_at_position().to);
                }
-               _black.set_position (_black.position() + one_video_frame());
+               black.set_position(black.position() + one_video_frame());
                break;
+       }
        case SILENT:
        {
                LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
@@ -901,9 +945,12 @@ Player::pass ()
        }
 
        if (done) {
-               if (_next_video_time) {
+               if (_next_video_time[VideoType::MAIN]) {
                        LOG_DEBUG_PLAYER("Done: emit video until end of film at %1", to_string(film->length()));
-                       emit_video_until(film->length());
+                       emit_video_until(VideoType::MAIN, film->length());
+               }
+               if (_next_video_time[VideoType::SIGN_LANGUAGE] && _have_sign_language) {
+                       emit_video_until(VideoType::SIGN_LANGUAGE, film->length());
                }
 
                if (_shuffler) {
@@ -929,6 +976,7 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
        list<PositionImage> captions;
        int const vfr = film->video_frame_rate();
+       auto const container = video_container_size(VideoType::MAIN);
 
        for (
                auto j:
@@ -941,15 +989,15 @@ Player::open_subtitles_for_frame (DCPTime time) const
                                continue;
                        }
 
-                       /* i.image will already have been scaled to fit _video_container_size */
-                       dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
+                       /* i.image will already have been scaled to fit video_container_size */
+                       dcp::Size scaled_size(i.rectangle.width * container.width, i.rectangle.height * container.height);
 
                        captions.push_back (
                                PositionImage (
                                        i.image,
                                        Position<int> (
-                                               lrint(_video_container_size.load().width * i.rectangle.x),
-                                               lrint(_video_container_size.load().height * i.rectangle.y)
+                                               lrint(container.width * i.rectangle.x),
+                                               lrint(container.height * i.rectangle.y)
                                                )
                                        )
                                );
@@ -957,7 +1005,7 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
                /* String subtitles (rendered to an image) */
                if (!j.string.empty()) {
-                       auto s = render_text(j.string, _video_container_size, time, vfr);
+                       auto s = render_text(j.string, container, time, vfr);
                        copy_if(s.begin(), s.end(), back_inserter(captions), [](PositionImage const& image) {
                                return image.image->size().width && image.image->size().height;
                        });
@@ -974,17 +1022,17 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
 
 void
-Player::emit_video_until(DCPTime time)
+Player::emit_video_until(VideoType type, DCPTime time)
 {
-       LOG_DEBUG_PLAYER("emit_video_until %1; next video time is %2", to_string(time), to_string(_next_video_time.get_value_or({})));
-       auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+       LOG_DEBUG_PLAYER("emit_video_until %1; next video time is %2", to_string(time), to_string(_next_video_time[type].get_value_or({})));
+       auto frame = [this, type](shared_ptr<PlayerVideo> pv, DCPTime time) {
                /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
                   player before the video that requires them.
                */
                _delay.push_back(make_pair(pv, time));
 
                if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-                       _next_video_time = time + one_video_frame();
+                       _next_video_time[type] = time + one_video_frame();
                }
 
                if (_delay.size() < 3) {
@@ -998,12 +1046,12 @@ Player::emit_video_until(DCPTime time)
 
        auto const age_threshold = one_video_frame() * 2;
 
-       while (_next_video_time.get_value_or({}) < time) {
-               auto left = _last_video[Eyes::LEFT];
-               auto right = _last_video[Eyes::RIGHT];
-               auto both = _last_video[Eyes::BOTH];
+       while (_next_video_time[type].get_value_or({}) < time) {
+               auto left = _last_video[type][Eyes::LEFT];
+               auto right = _last_video[type][Eyes::RIGHT];
+               auto both = _last_video[type][Eyes::BOTH];
 
-               auto const next = _next_video_time.get_value_or({});
+               auto const next = _next_video_time[type].get_value_or({});
 
                if (
                        left.first &&
@@ -1020,10 +1068,10 @@ Player::emit_video_until(DCPTime time)
                } else {
                        auto film = _film.lock();
                        if (film && film->three_d()) {
-                               frame(black_player_video_frame(Eyes::LEFT), next);
-                               frame(black_player_video_frame(Eyes::RIGHT), next);
+                               frame(black_player_video_frame(type, Eyes::LEFT), next);
+                               frame(black_player_video_frame(type, Eyes::RIGHT), next);
                        } else {
-                               frame(black_player_video_frame(Eyes::BOTH), next);
+                               frame(black_player_video_frame(type, Eyes::BOTH), next);
                        }
                        LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
                }
@@ -1043,7 +1091,8 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       if (!piece->content->video->use()) {
+       auto const content_video = piece->content->video;
+       if (!content_video->use()) {
                return;
        }
 
@@ -1053,6 +1102,7 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
        }
 
        vector<Eyes> eyes_to_emit;
+       auto const type = content_video->type();
 
        if (!film->three_d()) {
                if (video.eyes == Eyes::RIGHT) {
@@ -1091,12 +1141,11 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       if (!_next_video_time) {
-               _next_video_time = time.round(film->video_frame_rate());
+       if (!_next_video_time[type]) {
+               /* XXX: round to 24fps for SL? */
+               _next_video_time[type] = time.round(film->video_frame_rate());
        }
 
-       auto const content_video = piece->content->video;
-
        auto scaled_size = content_video->scaled_size(film->frame_size());
        DCPOMATIC_ASSERT(scaled_size);
 
@@ -1108,11 +1157,12 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                                content_video->fade(film, video.time),
                                scale_for_display(
                                        *scaled_size,
-                                       _video_container_size,
-                                       film->frame_size(),
+                                       video_container_size(type),
+                                       full_size(type),
                                        content_video->pixel_quanta()
                                        ),
-                               _video_container_size,
+                               video_container_size(type),
+                               type,
                                eyes,
                                video.part,
                                content_video->colour_conversion(),
@@ -1130,9 +1180,9 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 void
 Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
 {
-       _last_video[pv->eyes()] = { pv, time };
+       _last_video[pv->type()][pv->eyes()] = { pv, time };
        if (pv->eyes() != Eyes::LEFT) {
-               emit_video_until(std::min(time + one_video_frame() / 2, end));
+               emit_video_until(pv->type(), std::min(time + one_video_frame() / 2, end));
        }
 }
 
@@ -1263,8 +1313,9 @@ Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextConten
                auto image = sub.image;
 
                /* We will scale the subtitle up to fit _video_container_size */
-               int const width = sub.rectangle.width * _video_container_size.load().width;
-               int const height = sub.rectangle.height * _video_container_size.load().height;
+               auto const container = video_container_size(VideoType::MAIN);
+               int const width = sub.rectangle.width * container.width;
+               int const height = sub.rectangle.height * container.height;
                if (width == 0 || height == 0) {
                        return;
                }
@@ -1413,19 +1464,19 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        if (accurate) {
-               _next_video_time = time;
+               _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = time;
                _next_audio_time = time;
        } else {
-               _next_video_time = boost::none;
+               _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = boost::none;
                _next_audio_time = boost::none;
        }
 
-       _black.set_position (time);
+       _black[VideoType::MAIN].set_position(time);
+       _black[VideoType::SIGN_LANGUAGE].set_position(time);
        _silent.set_position (time);
 
-       _last_video[Eyes::LEFT] = {};
-       _last_video[Eyes::RIGHT] = {};
-       _last_video[Eyes::BOTH] = {};
+       _last_video[VideoType::MAIN][Eyes::LEFT] = _last_video[VideoType::MAIN][Eyes::RIGHT] = _last_video[VideoType::MAIN][Eyes::BOTH] = {};
+       _last_video[VideoType::SIGN_LANGUAGE][Eyes::LEFT] = _last_video[VideoType::SIGN_LANGUAGE][Eyes::RIGHT] = _last_video[VideoType::SIGN_LANGUAGE][Eyes::BOTH] = {};
 
        for (auto& state: _stream_states) {
                state.second.last_push_end = boost::none;
@@ -1436,13 +1487,15 @@ Player::seek (DCPTime time, bool accurate)
 void
 Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
 {
-       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
-       }
+       if (pv->type() == VideoType::MAIN) {
+               if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+                       std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
+               }
 
-       auto subtitles = open_subtitles_for_frame (time);
-       if (subtitles) {
-               pv->set_text (subtitles.get ());
+               auto subtitles = open_subtitles_for_frame (time);
+               if (subtitles) {
+                       pv->set_text (subtitles.get ());
+               }
        }
 
        Video (pv, time);
index 314031698a387e4a18190a61c93369e7cb0e5229..fc734e035bd351c58631f936a86e8cb30a495219 100644 (file)
@@ -39,6 +39,7 @@
 #include "player_text.h"
 #include "position_image.h"
 #include "shuffler.h"
+#include "video_type.h"
 #include <boost/atomic.hpp>
 #include <list>
 
@@ -95,11 +96,16 @@ public:
 
        std::vector<std::shared_ptr<dcpomatic::Font>> get_subtitle_fonts ();
 
-       dcp::Size video_container_size () const {
-               return _video_container_size;
+       dcp::Size video_container_size(VideoType type) const {
+               boost::mutex::scoped_lock lm(_video_container_size_mutex);
+               return _video_container_size[type];
        }
 
-       void set_video_container_size (dcp::Size);
+       bool have_sign_language() const {
+               return _have_sign_language;
+       }
+
+       void set_video_container_size(VideoType type, dcp::Size size);
        void set_ignore_video ();
        void set_ignore_audio ();
        void set_ignore_text ();
@@ -156,10 +162,11 @@ private:
        dcpomatic::DCPTime resampled_audio_to_dcp (std::shared_ptr<const Piece> piece, Frame f) const;
        dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
        dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const;
-       std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
-       void emit_video_until(dcpomatic::DCPTime time);
+       std::shared_ptr<PlayerVideo> black_player_video_frame(VideoType type, Eyes eyes) const;
+       void emit_video_until(VideoType type, dcpomatic::DCPTime time);
        void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
        std::pair<std::shared_ptr<Piece>, boost::optional<dcpomatic::DCPTime>> earliest_piece_and_time() const;
+       dcp::Size full_size(VideoType type) const;
 
        void video (std::weak_ptr<Piece>, ContentVideo);
        void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
@@ -193,13 +200,16 @@ private:
        boost::atomic<int> _suspended;
        std::vector<std::shared_ptr<Piece>> _pieces;
 
-       /** Size of the image we are rendering to; this may be the DCP frame size, or
-        *  the size of preview in a window.
+       boost::atomic<bool> _have_sign_language;
+
+       mutable boost::mutex _video_container_size_mutex;
+       /** Size of the images we are rendering to; for the MAIN video this
+        *  may be the DCP frame size, or the size of preview in a window.
         */
-       boost::atomic<dcp::Size> _video_container_size;
+       EnumIndexedVector<dcp::Size, VideoType> _video_container_size;
 
        mutable boost::mutex _black_image_mutex;
-       std::shared_ptr<Image> _black_image;
+       EnumIndexedVector<std::shared_ptr<Image>, VideoType> _black_image;
 
        /** true if the player should ignore all video; i.e. never produce any */
        boost::atomic<bool> _ignore_video;
@@ -215,13 +225,13 @@ private:
        boost::atomic<bool> _play_referenced;
 
        /** Time of the next video that we will emit, or the time of the last accurate seek */
-       boost::optional<dcpomatic::DCPTime> _next_video_time;
+       EnumIndexedVector<boost::optional<dcpomatic::DCPTime>, VideoType> _next_video_time;
        /** Time of the next audio that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_audio_time;
 
        boost::atomic<boost::optional<int>> _dcp_decode_reduction;
 
-       EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video;
+       EnumIndexedVector<EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes>, VideoType> _last_video;
 
        AudioMerger _audio_merger;
        std::unique_ptr<Shuffler> _shuffler;
@@ -241,7 +251,7 @@ private:
        };
        std::map<AudioStreamPtr, StreamState> _stream_states;
 
-       Empty _black;
+       EnumIndexedVector<Empty, VideoType> _black;
        Empty _silent;
 
        EnumIndexedVector<ActiveText, TextType> _active_texts;
index 247301d582312f83656bce2d270a2bcc1319d9b1..76a512f094d08d86f2e28587bd345d066b73357c 100644 (file)
@@ -54,6 +54,7 @@ PlayerVideo::PlayerVideo (
        boost::optional<double> fade,
        dcp::Size inter_size,
        dcp::Size out_size,
+       VideoType type,
        Eyes eyes,
        Part part,
        optional<ColourConversion> colour_conversion,
@@ -67,6 +68,7 @@ PlayerVideo::PlayerVideo (
        , _fade (fade)
        , _inter_size (inter_size)
        , _out_size (out_size)
+       , _type(type)
        , _eyes (eyes)
        , _part (part)
        , _colour_conversion (colour_conversion)
@@ -87,6 +89,7 @@ PlayerVideo::PlayerVideo (shared_ptr<cxml::Node> node, shared_ptr<Socket> socket
        _inter_size = dcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
        _out_size = dcp::Size (node->number_child<int> ("OutWidth"), node->number_child<int> ("OutHeight"));
        _eyes = static_cast<Eyes>(node->number_child<int>("Eyes"));
+       _type = static_cast<VideoType>(node->number_child<int>("Type"));
        _part = static_cast<Part>(node->number_child<int>("Part"));
        _video_range = static_cast<VideoRange>(node->number_child<int>("VideoRange"));
        _error = node->optional_bool_child("Error").get_value_or (false);
@@ -212,6 +215,7 @@ PlayerVideo::add_metadata(xmlpp::Element* element) const
        cxml::add_text_child(element, "OutWidth", raw_convert<string>(_out_size.width));
        cxml::add_text_child(element, "OutHeight", raw_convert<string>(_out_size.height));
        cxml::add_text_child(element, "Eyes", raw_convert<string>(static_cast<int>(_eyes)));
+       cxml::add_text_child(element, "Type", raw_convert<string>(static_cast<int>(_type)));
        cxml::add_text_child(element, "Part", raw_convert<string>(static_cast<int>(_part)));
        cxml::add_text_child(element, "VideoRange", raw_convert<string>(static_cast<int>(_video_range)));
        cxml::add_text_child(element, "Error", _error ? "1" : "0");
@@ -339,6 +343,7 @@ PlayerVideo::shallow_copy () const
                _fade,
                _inter_size,
                _out_size,
+               _type,
                _eyes,
                _part,
                _colour_conversion,
index e2968749c48dc3ebf2e693e05550aee9b4a51192..f15adeaed7a174e7f397b70760b9d5c3fba57b86 100644 (file)
@@ -29,6 +29,7 @@
 #include "position.h"
 #include "position_image.h"
 #include "types.h"
+#include "video_type.h"
 extern "C" {
 #include <libavutil/pixfmt.h>
 }
@@ -54,6 +55,7 @@ public:
                boost::optional<double> fade,
                dcp::Size inter_size,
                dcp::Size out_size,
+               VideoType type,
                Eyes eyes,
                Part part,
                boost::optional<ColourConversion> colour_conversion,
@@ -98,6 +100,10 @@ public:
                _eyes = e;
        }
 
+       VideoType type() const {
+               return _type;
+       }
+
        boost::optional<ColourConversion> colour_conversion () const {
                return _colour_conversion;
        }
@@ -134,6 +140,7 @@ private:
        boost::optional<double> _fade;
        dcp::Size _inter_size;
        dcp::Size _out_size;
+       VideoType _type;
        Eyes _eyes;
        Part _part;
        boost::optional<ColourConversion> _colour_conversion;
index 172b8d763e73dee9a959e0b0cd402a7e2f227f2d..2b03f8d7925e1fdf9ebd1a008c20a2a330dcf912 100644 (file)
@@ -1199,3 +1199,15 @@ join_strings(vector<string> const& in, string const& separator)
        });
 }
 
+
+bool
+contains_sign_language(ContentList const& content)
+{
+       return std::any_of(
+               content.begin(),
+               content.end(),
+               [](shared_ptr<const Content> c) {
+                       return c->video && c->video->type() == VideoType::SIGN_LANGUAGE;
+               });
+}
+
index 7c40c5ce873aced777590abab48677f1de11aad2..0a89bc9fce310212850778a6d5078fa3a44e3858 100644 (file)
@@ -102,6 +102,7 @@ extern void capture_ffmpeg_logs();
 extern void setup_grok_library_path();
 #endif
 extern std::string join_strings(std::vector<std::string> const& in, std::string const& separator = " ");
+extern bool contains_sign_language(ContentList const& content);
 
 
 template <class T>
index 43084bf31db5427829ea6ee001a3a98c6a0a3c23..c1b77caee5c4fbd1ba91f3ded5106293f7a17bb4 100644 (file)
@@ -100,6 +100,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_rgb)
                optional<double> (),
                dcp::Size (1998, 1080),
                dcp::Size (1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                ColourConversion(),
@@ -179,6 +180,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_yuv)
                optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                ColourConversion(),
@@ -245,6 +247,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                ColourConversion(),
@@ -270,6 +273,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                PresetColourConversion::all().front().conversion,
index 083a61cf897230557892ae339c28f62785e7a36e..9bcbcd51a1f2c3fdd2d7c98c197b871adfc27d1c 100644 (file)
@@ -66,6 +66,7 @@ BOOST_AUTO_TEST_CASE(j2k_encoder_deadlock_test)
                                optional<double>(),
                                dcp::Size(1998, 1080),
                                dcp::Size(1998, 1080),
+                               VideoType::MAIN,
                                Eyes::BOTH,
                                Part::WHOLE,
                                optional<ColourConversion>(),
index 52b8d54be118c33559b9f1722d58141cbd34c116..575b12d27c6702fef6ea4c35808be361c6d61ae3 100644 (file)
@@ -47,6 +47,7 @@ BOOST_AUTO_TEST_CASE (low_bitrate_test)
                boost::optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                boost::optional<ColourConversion>(),
index 01d7a9fcbe6877cb09150cc8bb815aaf54d1e200..516a6fb0e0dff64e9e5bd1e1c5cf99034dd14109 100644 (file)
@@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE (overlap_video_test1)
        BOOST_CHECK(pieces[0]->ignore_video[0] == dcpomatic::DCPTimePeriod(dcpomatic::DCPTime::from_seconds(1), dcpomatic::DCPTime::from_seconds(1) + B->length_after_trim(film)));
        BOOST_CHECK(pieces[0]->ignore_video[1] == dcpomatic::DCPTimePeriod(dcpomatic::DCPTime::from_seconds(3), dcpomatic::DCPTime::from_seconds(3) + C->length_after_trim(film)));
 
-       BOOST_CHECK (player->_black.done());
+       BOOST_CHECK(player->_black[VideoType::MAIN].done());
 
        make_and_verify_dcp (film);
 
index cac5dffe958e5c986ba48f369bbb046f9830e1c2..bb1f0df71939cae61c3ada29978b8b0ab46784c1 100644 (file)
@@ -155,11 +155,11 @@ BOOST_AUTO_TEST_CASE (player_subframe_test)
        BOOST_CHECK (film->length() == DCPTime::from_frames(3 * 24 + 1, 24));
 
        Player player(film, Image::Alignment::COMPACT);
-       player.setup_pieces();
-       BOOST_REQUIRE_EQUAL(player._black._periods.size(), 1U);
-       BOOST_CHECK(player._black._periods.front() == DCPTimePeriod(DCPTime::from_frames(3 * 24, 24), DCPTime::from_frames(3 * 24 + 1, 24)));
-       BOOST_REQUIRE_EQUAL(player._silent._periods.size(), 1U);
-       BOOST_CHECK(player._silent._periods.front() == DCPTimePeriod(DCPTime(289920), DCPTime::from_frames(3 * 24 + 1, 24)));
+       player.setup_pieces ();
+       BOOST_REQUIRE_EQUAL(player._black[VideoType::MAIN]._periods.size(), 1U);
+       BOOST_CHECK(player._black[VideoType::MAIN]._periods.front() == DCPTimePeriod(DCPTime::from_frames(3 * 24, 24), DCPTime::from_frames(3 * 24 + 1, 24)));
+       BOOST_REQUIRE_EQUAL (player._silent._periods.size(), 1U);
+       BOOST_CHECK (player._silent._periods.front() == DCPTimePeriod(DCPTime(289920), DCPTime::from_frames(3 * 24 + 1, 24)));
 }