Change end() to only do one thing, and copy the required stuff into pause()
[dcpomatic.git] / src / lib / player.h
index 5abd59de26a8c8fbda3940bde6d05a67077fbab1..d4fae9fc49ee380dadd2ce9c80a1a4e3f816a716 100644 (file)
 #include "content_audio.h"
 #include "content_text.h"
 #include "content_video.h"
+#include "dcp_text_track.h"
 #include "empty.h"
-#include "film.h"
+#include "enum_indexed_vector.h"
+#include "film_property.h"
 #include "image.h"
 #include "player_text.h"
 #include "position_image.h"
@@ -48,6 +50,7 @@ namespace dcp {
 class AtmosContent;
 class AudioBuffers;
 class Content;
+class Film;
 class PlayerVideo;
 class Playlist;
 class ReferencedReelAsset;
@@ -62,20 +65,28 @@ public:
        static int const FILM_VIDEO_FRAME_RATE;
        static int const DCP_DECODE_REDUCTION;
        static int const PLAYBACK_LENGTH;
+       static int const IGNORE_VIDEO;
+       static int const IGNORE_AUDIO;
+       static int const IGNORE_TEXT;
+       static int const ALWAYS_BURN_OPEN_SUBTITLES;
+       static int const PLAY_REFERENCED;
 };
 
 
 /** @class Player
  *  @brief A class which can play a Playlist.
  */
-class Player : public std::enable_shared_from_this<Player>
+class Player
 {
 public:
        Player (std::shared_ptr<const Film>, Image::Alignment subtitle_alignment);
        Player (std::shared_ptr<const Film>, std::shared_ptr<const Playlist> playlist);
 
-       Player (Player const& Player) = delete;
-       Player& operator= (Player const& Player) = delete;
+       Player (Player const&) = delete;
+       Player& operator= (Player const&) = delete;
+
+       Player(Player&& other);
+       Player& operator=(Player&& other);
 
        bool pass ();
        void seek (dcpomatic::DCPTime time, bool accurate);
@@ -98,6 +109,12 @@ public:
        boost::optional<dcpomatic::DCPTime> content_time_to_dcp (std::shared_ptr<const Content> content, dcpomatic::ContentTime t) const;
        boost::optional<dcpomatic::ContentTime> dcp_to_content_time (std::shared_ptr<const Content> content, dcpomatic::DCPTime t) const;
 
+       void signal_change(ChangeType type, int property);
+
+       /** First parameter is PENDING, DONE or CANCELLED.
+        *  Second parameter is the property.
+        *  Third parameter is true if these signals are currently likely to be frequent.
+        */
        boost::signals2::signal<void (ChangeType, int, bool)> Change;
 
        /** Emitted when a video frame is ready.  These emissions happen in the correct order. */
@@ -125,8 +142,9 @@ private:
        friend struct overlap_video_test1;
 
        void construct ();
+       void connect();
        void setup_pieces ();
-       void film_change (ChangeType, Film::Property);
+       void film_change(ChangeType, FilmProperty);
        void playlist_change (ChangeType);
        void playlist_content_change (ChangeType, int, bool);
        Frame dcp_to_content_video (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
@@ -136,6 +154,8 @@ private:
        dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
        dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const;
        std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
+       void emit_video_until(dcpomatic::DCPTime time);
+       void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
 
        void video (std::weak_ptr<Piece>, ContentVideo);
        void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
@@ -150,30 +170,31 @@ private:
                std::shared_ptr<const AudioBuffers> audio, dcpomatic::DCPTime time, dcpomatic::DCPTime discard_to
                ) const;
        boost::optional<PositionImage> open_subtitles_for_frame (dcpomatic::DCPTime time) const;
-       void emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
-       void do_emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+       void emit_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+       void use_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
        void emit_audio (std::shared_ptr<AudioBuffers> data, dcpomatic::DCPTime time);
        std::shared_ptr<const Playlist> playlist () const;
 
-       /** Mutex to protect the whole Player state.  When it's used for the preview we have
+       /** Mutex to protect the most of the Player state.  When it's used for the preview we have
            seek() and pass() called from the Butler thread and lots of other stuff called
            from the GUI thread.
        */
        mutable boost::mutex _mutex;
 
-       std::shared_ptr<const Film> const _film;
+       std::weak_ptr<const Film> _film;
        /** Playlist, or 0 if we are using the one from the _film */
-       std::shared_ptr<const Playlist> const _playlist;
+       std::shared_ptr<const Playlist> _playlist;
 
        /** > 0 if we are suspended (i.e. pass() and seek() do nothing) */
        boost::atomic<int> _suspended;
-       std::list<std::shared_ptr<Piece>> _pieces;
+       std::vector<std::shared_ptr<Piece>> _pieces;
 
        /** Size of the image we are rendering to; this may be the DCP frame size, or
         *  the size of preview in a window.
         */
        boost::atomic<dcp::Size> _video_container_size;
-       /** Should be accessed using the std::atomic... methods */
+
+       mutable boost::mutex _black_image_mutex;
        std::shared_ptr<Image> _black_image;
 
        /** true if the player should ignore all video; i.e. never produce any */
@@ -185,21 +206,18 @@ private:
        /** true if we should try to be fast rather than high quality */
        boost::atomic<bool> _fast;
        /** true if we should keep going in the face of `survivable' errors */
-       bool const _tolerant;
+       bool _tolerant;
        /** true if we should `play' (i.e output) referenced DCP data (e.g. for preview) */
        boost::atomic<bool> _play_referenced;
 
        /** Time of the next video that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_video_time;
-       /** Eyes of the next video that we will emit */
-       boost::optional<Eyes> _next_video_eyes;
        /** Time of the next audio that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_audio_time;
 
        boost::atomic<boost::optional<int>> _dcp_decode_reduction;
 
-       typedef std::map<std::weak_ptr<Piece>, std::shared_ptr<PlayerVideo>, std::owner_less<std::weak_ptr<Piece>>> LastVideoMap;
-       LastVideoMap _last_video;
+       EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video;
 
        AudioMerger _audio_merger;
        std::unique_ptr<Shuffler> _shuffler;
@@ -210,26 +228,25 @@ private:
        public:
                StreamState () {}
 
-               StreamState (std::shared_ptr<Piece> p, dcpomatic::DCPTime l)
+               explicit StreamState(std::shared_ptr<Piece> p)
                        : piece(p)
-                       , last_push_end(l)
                {}
 
                std::shared_ptr<Piece> piece;
-               dcpomatic::DCPTime last_push_end;
+               boost::optional<dcpomatic::DCPTime> last_push_end;
        };
        std::map<AudioStreamPtr, StreamState> _stream_states;
 
        Empty _black;
        Empty _silent;
 
-       ActiveText _active_texts[static_cast<int>(TextType::COUNT)];
+       EnumIndexedVector<ActiveText, TextType> _active_texts;
        std::shared_ptr<AudioProcessor> _audio_processor;
 
        boost::atomic<dcpomatic::DCPTime> _playback_length;
 
        /** Alignment for subtitle images that we create */
-       Image::Alignment const _subtitle_alignment = Image::Alignment::PADDED;
+       Image::Alignment _subtitle_alignment = Image::Alignment::PADDED;
 
        boost::signals2::scoped_connection _film_changed_connection;
        boost::signals2::scoped_connection _playlist_change_connection;