Add some new Waker calls.
[dcpomatic.git] / src / lib / ffmpeg_encoder.cc
index 734c9810d16c701bf28497e99f8dbfcaa3f906ff..3f5b6f5913a889e52cb6a91d8cc92578df35d43b 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2017 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2017-2018 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
@@ -25,6 +25,8 @@
 #include "player_video.h"
 #include "log.h"
 #include "image.h"
+#include "cross.h"
+#include "butler.h"
 #include "compose.hpp"
 #include <iostream>
 
 using std::string;
 using std::runtime_error;
 using std::cout;
+using std::pair;
+using std::list;
+using std::map;
 using boost::shared_ptr;
 using boost::bind;
 using boost::weak_ptr;
 
-static AVPixelFormat
-force_pixel_format (AVPixelFormat, AVPixelFormat out)
-{
-       return out;
-}
-
-FFmpegEncoder::FFmpegEncoder (shared_ptr<const Film> film, weak_ptr<Job> job, boost::filesystem::path output, Format format)
+FFmpegEncoder::FFmpegEncoder (
+       shared_ptr<const Film> film,
+       weak_ptr<Job> job,
+       boost::filesystem::path output,
+       ExportFormat format,
+       bool mixdown_to_stereo,
+       bool split_reels,
+       int x264_crf
+       )
        : Encoder (film, job)
        , _history (1000)
-       , _output (output)
-{
-       switch (format) {
-       case FORMAT_PRORES:
-               _pixel_format = AV_PIX_FMT_YUV422P10;
-               _codec_name = "prores_ks";
-               break;
-       case FORMAT_H264:
-               _pixel_format = AV_PIX_FMT_YUV420P;
-               _codec_name = "libx264";
-               break;
-       }
-}
-
-void
-FFmpegEncoder::go ()
 {
-       AVCodec* codec = avcodec_find_encoder_by_name (_codec_name.c_str());
-       if (!codec) {
-               throw runtime_error (String::compose ("could not find FFmpeg encoder %1", _codec_name));
-       }
-
-       _codec_context = avcodec_alloc_context3 (codec);
-       if (!_codec_context) {
-               throw runtime_error ("could not allocate FFmpeg context");
-       }
-
-       avcodec_get_context_defaults3 (_codec_context, codec);
-
-       /* Variable quantisation */
-       _codec_context->global_quality = 0;
-       _codec_context->width = _film->frame_size().width;
-       _codec_context->height = _film->frame_size().height;
-       _codec_context->time_base = (AVRational) { 1, _film->video_frame_rate() };
-       _codec_context->pix_fmt = _pixel_format;
-       _codec_context->flags |= CODEC_FLAG_QSCALE | CODEC_FLAG_GLOBAL_HEADER;
-
-       avformat_alloc_output_context2 (&_format_context, 0, 0, _output.string().c_str());
-       if (!_format_context) {
-               throw runtime_error ("could not allocate FFmpeg format context");
-       }
-
-       _video_stream = avformat_new_stream (_format_context, codec);
-       if (!_video_stream) {
-               throw runtime_error ("could not create FFmpeg output video stream");
-       }
+       int const files = split_reels ? film->reels().size() : 1;
+       for (int i = 0; i < files; ++i) {
 
-       /* Note: needs to increment with each stream */
-       _video_stream->id = 0;
-       _video_stream->codec = _codec_context;
+               boost::filesystem::path filename = output;
+               string extension = boost::filesystem::extension (filename);
+               filename = boost::filesystem::change_extension (filename, "");
 
-       AVDictionary* options = 0;
-       av_dict_set (&options, "profile", "3", 0);
-       av_dict_set (&options, "threads", "auto", 0);
+               if (files > 1) {
+                       /// TRANSLATORS: _reel%1 here is to be added to an export filename to indicate
+                       /// which reel it is.  Preserve the %1; it will be replaced with the reel number.
+                       filename = filename.string() + String::compose(_("_reel%1"), i + 1);
+               }
 
-       if (avcodec_open2 (_codec_context, codec, &options) < 0) {
-               throw runtime_error ("could not open FFmpeg codec");
+               _file_encoders.push_back (
+                       FileEncoderSet (
+                               _film->frame_size(),
+                               _film->video_frame_rate(),
+                               _film->audio_frame_rate(),
+                               mixdown_to_stereo ? 2 : film->audio_channels(),
+                               format,
+                               x264_crf,
+                               _film->three_d(),
+                               filename,
+                               extension
+                               )
+                       );
        }
 
-       if (avio_open (&_format_context->pb, _output.c_str(), AVIO_FLAG_WRITE) < 0) {
-               throw runtime_error ("could not open FFmpeg output file");
+       _player->set_always_burn_open_subtitles ();
+       _player->set_play_referenced ();
+
+       int const ch = film->audio_channels ();
+
+       AudioMapping map;
+       if (mixdown_to_stereo) {
+               _output_audio_channels = 2;
+               map = AudioMapping (ch, 2);
+               float const overall_gain = 2 / (4 + sqrt(2));
+               float const minus_3dB = 1 / sqrt(2);
+               map.set (dcp::LEFT,   0, overall_gain);
+               map.set (dcp::RIGHT,  1, overall_gain);
+               map.set (dcp::CENTRE, 0, overall_gain * minus_3dB);
+               map.set (dcp::CENTRE, 1, overall_gain * minus_3dB);
+               map.set (dcp::LS,     0, overall_gain);
+               map.set (dcp::RS,     1, overall_gain);
+       } else {
+               _output_audio_channels = ch;
+               map = AudioMapping (ch, ch);
+               for (int i = 0; i < ch; ++i) {
+                       map.set (i, i, 1);
+               }
        }
 
-       if (avformat_write_header (_format_context, &options) < 0) {
-               throw runtime_error ("could not write header to FFmpeg output file");
-       }
+       _butler.reset (new Butler(_player, map, _output_audio_channels, bind(&PlayerVideo::force, _1, FFmpegFileEncoder::pixel_format(format)), true, false));
+}
 
+void
+FFmpegEncoder::go ()
+{
        {
                shared_ptr<Job> job = _job.lock ();
                DCPOMATIC_ASSERT (job);
                job->sub (_("Encoding"));
        }
 
-       while (!_player->pass ()) {}
-
-       while (true) {
-               AVPacket packet;
-               av_init_packet (&packet);
-               packet.data = 0;
-               packet.size = 0;
-
-               int got_packet;
-               avcodec_encode_video2 (_codec_context, &packet, 0, &got_packet);
-               if (!got_packet) {
-                       break;
+       Waker waker;
+
+       list<DCPTimePeriod> reel_periods = _film->reels ();
+       list<DCPTimePeriod>::const_iterator reel = reel_periods.begin ();
+       list<FileEncoderSet>::iterator encoder = _file_encoders.begin ();
+
+       DCPTime const video_frame = DCPTime::from_frames (1, _film->video_frame_rate ());
+       int const audio_frames = video_frame.frames_round(_film->audio_frame_rate());
+       float* interleaved = new float[_output_audio_channels * audio_frames];
+       shared_ptr<AudioBuffers> deinterleaved (new AudioBuffers (_output_audio_channels, audio_frames));
+       int const gets_per_frame = _film->three_d() ? 2 : 1;
+       for (DCPTime i; i < _film->length(); i += video_frame) {
+
+               if (_file_encoders.size() > 1 && !reel->contains(i)) {
+                       /* Next reel and file */
+                       ++reel;
+                       ++encoder;
+                       DCPOMATIC_ASSERT (reel != reel_periods.end());
+                       DCPOMATIC_ASSERT (encoder != _file_encoders.end());
                }
 
-               packet.stream_index = 0;
-               av_interleaved_write_frame (_format_context, &packet);
-               av_packet_unref (&packet);
-       }
-
-       av_write_trailer (_format_context);
-
-       avcodec_close (_codec_context);
-       avio_close (_format_context->pb);
-       avformat_free_context (_format_context);
-}
-
-void
-FFmpegEncoder::video (shared_ptr<PlayerVideo> video, DCPTime time)
-{
-       shared_ptr<Image> image = video->image (
-               bind (&Log::dcp_log, _film->log().get(), _1, _2),
-               bind (&force_pixel_format, _1, _pixel_format),
-               true,
-               false
-               );
-
-       AVFrame* frame = av_frame_alloc ();
-
-       for (int i = 0; i < 3; ++i) {
-               size_t const size = image->stride()[i] * image->size().height;
-               AVBufferRef* buffer = av_buffer_alloc (size);
-               /* XXX: inefficient */
-               memcpy (buffer->data, image->data()[i], size);
-               frame->buf[i] = av_buffer_ref (buffer);
-               frame->data[i] = buffer->data;
-               frame->linesize[i] = image->stride()[i];
-               av_buffer_unref (&buffer);
-       }
-
-       frame->width = image->size().width;
-       frame->height = image->size().height;
-       frame->format = _pixel_format;
-       frame->pts = time.seconds() / av_q2d (_video_stream->time_base);
+               for (int j = 0; j < gets_per_frame; ++j) {
+                       pair<shared_ptr<PlayerVideo>, DCPTime> v = _butler->get_video ();
+                       encoder->get(v.first->eyes())->video(v.first, v.second);
+               }
 
-       AVPacket packet;
-       av_init_packet (&packet);
-       packet.data = 0;
-       packet.size = 0;
+               _history.event ();
 
-       int got_packet;
-       if (avcodec_encode_video2 (_codec_context, &packet, frame, &got_packet) < 0) {
-               throw EncodeError ("FFmpeg video encode failed");
-       }
-
-       if (got_packet && packet.size) {
-               /* XXX: this should not be hard-wired */
-               packet.stream_index = 0;
-               av_interleaved_write_frame (_format_context, &packet);
-               av_packet_unref (&packet);
-       }
+               {
+                       boost::mutex::scoped_lock lm (_mutex);
+                       _last_time = i;
+               }
 
-       av_frame_free (&frame);
+               shared_ptr<Job> job = _job.lock ();
+               if (job) {
+                       job->set_progress (float(i.get()) / _film->length().get());
+               }
 
-       _history.event ();
+               waker.nudge ();
 
-       {
-               boost::mutex::scoped_lock lm (_mutex);
-               _last_time = time;
+               _butler->get_audio (interleaved, audio_frames);
+               /* XXX: inefficient; butler interleaves and we deinterleave again */
+               float* p = interleaved;
+               for (int j = 0; j < audio_frames; ++j) {
+                       for (int k = 0; k < _output_audio_channels; ++k) {
+                               deinterleaved->data(k)[j] = *p++;
+                       }
+               }
+               encoder->audio (deinterleaved);
        }
+       delete[] interleaved;
 
-       shared_ptr<Job> job = _job.lock ();
-       if (job) {
-               job->set_progress (float(time.get()) / _film->length().get());
+       BOOST_FOREACH (FileEncoderSet i, _file_encoders) {
+               i.flush ();
        }
 }
 
-void
-FFmpegEncoder::audio (shared_ptr<AudioBuffers> audio, DCPTime time)
+float
+FFmpegEncoder::current_rate () const
 {
+       return _history.rate ();
+}
 
+Frame
+FFmpegEncoder::frames_done () const
+{
+       boost::mutex::scoped_lock lm (_mutex);
+       return _last_time.frames_round (_film->video_frame_rate ());
 }
 
-void
-FFmpegEncoder::subtitle (PlayerSubtitles subs, DCPTimePeriod period)
+FFmpegEncoder::FileEncoderSet::FileEncoderSet (
+       dcp::Size video_frame_size,
+       int video_frame_rate,
+       int audio_frame_rate,
+       int channels,
+       ExportFormat format,
+       int x264_crf,
+       bool three_d,
+       boost::filesystem::path output,
+       string extension
+       )
 {
+       if (three_d) {
+               /// TRANSLATORS: L here is an abbreviation for "left", to indicate the left-eye part of a 3D export
+               _encoders[EYES_LEFT] = shared_ptr<FFmpegFileEncoder>(
+                       new FFmpegFileEncoder(video_frame_size, video_frame_rate, audio_frame_rate, channels, format, x264_crf, String::compose("%1_%2%3", output.string(), _("L"), extension))
+                       );
+               /// TRANSLATORS: R here is an abbreviation for "left", to indicate the left-eye part of a 3D export
+               _encoders[EYES_RIGHT] = shared_ptr<FFmpegFileEncoder>(
+                       new FFmpegFileEncoder(video_frame_size, video_frame_rate, audio_frame_rate, channels, format, x264_crf, String::compose("%1_%2%3", output.string(), _("R"), extension))
+                       );
+       } else {
+               _encoders[EYES_BOTH]  = shared_ptr<FFmpegFileEncoder>(
+                       new FFmpegFileEncoder(video_frame_size, video_frame_rate, audio_frame_rate, channels, format, x264_crf, String::compose("%1%2", output.string(), extension))
+                       );
+       }
+}
 
+shared_ptr<FFmpegFileEncoder>
+FFmpegEncoder::FileEncoderSet::get (Eyes eyes) const
+{
+       map<Eyes, boost::shared_ptr<FFmpegFileEncoder> >::const_iterator i = _encoders.find (eyes);
+       DCPOMATIC_ASSERT (i != _encoders.end());
+       return i->second;
 }
 
-float
-FFmpegEncoder::current_rate () const
+void
+FFmpegEncoder::FileEncoderSet::flush ()
 {
-       return _history.rate ();
+       for (map<Eyes, boost::shared_ptr<FFmpegFileEncoder> >::iterator i = _encoders.begin(); i != _encoders.end(); ++i) {
+               i->second->flush ();
+       }
 }
 
-Frame
-FFmpegEncoder::frames_done () const
+void
+FFmpegEncoder::FileEncoderSet::audio (shared_ptr<AudioBuffers> a)
 {
-       boost::mutex::scoped_lock lm (_mutex);
-       return _last_time.frames_round (_film->video_frame_rate ());
+       for (map<Eyes, boost::shared_ptr<FFmpegFileEncoder> >::iterator i = _encoders.begin(); i != _encoders.end(); ++i) {
+               i->second->audio (a);
+       }
 }