Updated cs_CZ translation from Tomáš Begeni.
[dcpomatic.git] / src / lib / ffmpeg_decoder.h
index ca42e013e886c2cc08c94ed2edb63d2aae6c831a..bd4b74f88395ddfbb9461ffbcfbd515632fac017 100644 (file)
 
 */
 
+
 /** @file  src/ffmpeg_decoder.h
  *  @brief A decoder using FFmpeg to decode content.
  */
 
-#include "util.h"
+
+#include "bitmap_text.h"
 #include "decoder.h"
 #include "ffmpeg.h"
+#include "video_filter_graph_set.h"
 extern "C" {
 #include <libavcodec/avcodec.h>
 }
 #include <boost/thread/mutex.hpp>
 #include <stdint.h>
 
-class Log;
-class VideoFilterGraph;
-class FFmpegAudioStream;
+
 class AudioBuffers;
+class FFmpegAudioStream;
 class Image;
+class Log;
+class VideoFilterGraph;
 struct ffmpeg_pts_offset_test;
 
+
 /** @class FFmpegDecoder
  *  @brief A decoder using FFmpeg to decode content.
  */
@@ -46,36 +51,55 @@ class FFmpegDecoder : public FFmpeg, public Decoder
 public:
        FFmpegDecoder (std::shared_ptr<const Film> film, std::shared_ptr<const FFmpegContent>, bool fast);
 
-       bool pass ();
-       void seek (dcpomatic::ContentTime time, bool);
+       bool pass () override;
+       void seek (dcpomatic::ContentTime time, bool) override;
 
 private:
        friend struct ::ffmpeg_pts_offset_test;
 
-       void flush ();
+       enum class FlushResult {
+               DONE,
+               AGAIN
+       };
+
+       FlushResult flush();
 
        AVSampleFormat audio_sample_format (std::shared_ptr<FFmpegAudioStream> stream) const;
        int bytes_per_audio_sample (std::shared_ptr<FFmpegAudioStream> stream) const;
 
-       bool decode_video_packet ();
-       void decode_audio_packet ();
-       void decode_subtitle_packet ();
+       std::shared_ptr<FFmpegAudioStream> audio_stream_from_index (int index) const;
+       void process_audio_frame (std::shared_ptr<FFmpegAudioStream> stream);
+
+       void process_video_frame ();
 
-       void decode_bitmap_subtitle (AVSubtitleRect const * rect, dcpomatic::ContentTime from);
-       void decode_ass_subtitle (std::string ass, dcpomatic::ContentTime from);
+       bool decode_and_process_video_packet (AVPacket* packet);
+       void decode_and_process_audio_packet (AVPacket* packet);
+       void decode_and_process_subtitle_packet (AVPacket* packet);
+
+       BitmapText process_bitmap_subtitle (AVSubtitleRect const * rect);
+       void process_ass_subtitle (std::string ass, dcpomatic::ContentTime from);
 
        void maybe_add_subtitle ();
-       std::shared_ptr<AudioBuffers> deinterleave_audio (std::shared_ptr<FFmpegAudioStream> stream) const;
 
-       std::list<std::shared_ptr<VideoFilterGraph> > _filter_graphs;
-       boost::mutex _filter_graphs_mutex;
+       FlushResult flush_codecs();
+       FlushResult flush_fill();
+
+       VideoFilterGraphSet _filter_graphs;
 
        dcpomatic::ContentTime _pts_offset;
        boost::optional<dcpomatic::ContentTime> _current_subtitle_to;
        /** true if we have a subtitle which has not had emit_stop called for it yet */
-       bool _have_current_subtitle;
+       bool _have_current_subtitle = false;
 
        std::shared_ptr<Image> _black_image;
 
-       std::vector<boost::optional<dcpomatic::ContentTime> > _next_time;
+       std::map<std::shared_ptr<FFmpegAudioStream>, boost::optional<dcpomatic::ContentTime>> _next_time;
+
+       enum class FlushState {
+               CODECS,
+               AUDIO_DECODER,
+               FILL,
+       };
+
+       FlushState _flush_state = FlushState::CODECS;
 };