/*
- Copyright (C) 2017 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2017-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "log.h"
#include "image.h"
#include "cross.h"
+#include "butler.h"
#include "compose.hpp"
#include <iostream>
using std::string;
using std::runtime_error;
using std::cout;
+using std::pair;
+using std::list;
+using std::map;
using boost::shared_ptr;
using boost::bind;
using boost::weak_ptr;
-
-int FFmpegEncoder::_video_stream_index = 0;
-int FFmpegEncoder::_audio_stream_index = 1;
-
-static AVPixelFormat
-force_pixel_format (AVPixelFormat, AVPixelFormat out)
-{
- return out;
-}
-
-FFmpegEncoder::FFmpegEncoder (shared_ptr<const Film> film, weak_ptr<Job> job, boost::filesystem::path output, Format format)
+using boost::optional;
+using namespace dcpomatic;
+
+/** @param key Key to use to encrypt MP4 outputs */
+FFmpegEncoder::FFmpegEncoder (
+ shared_ptr<const Film> film,
+ weak_ptr<Job> job,
+ boost::filesystem::path output,
+ ExportFormat format,
+ bool mixdown_to_stereo,
+ bool split_reels,
+ int x264_crf
+#ifdef DCPOMATIC_VARIANT_SWAROOP
+ , optional<dcp::Key> key
+ , optional<string> id
+#endif
+ )
: Encoder (film, job)
- , _video_options (0)
, _history (1000)
- , _output (output)
- , _pending_audio (new AudioBuffers (film->audio_channels(), 0))
{
- switch (format) {
- case FORMAT_PRORES:
- _pixel_format = AV_PIX_FMT_YUV422P10;
- _sample_format = AV_SAMPLE_FMT_S16;
- _video_codec_name = "prores_ks";
- _audio_codec_name = "pcm_s16le";
- av_dict_set (&_video_options, "profile", "3", 0);
- av_dict_set (&_video_options, "threads", "auto", 0);
- break;
- case FORMAT_H264:
- _pixel_format = AV_PIX_FMT_YUV420P;
- _sample_format = AV_SAMPLE_FMT_FLTP;
- _video_codec_name = "libx264";
- _audio_codec_name = "aac";
- break;
- }
+ int const files = split_reels ? film->reels().size() : 1;
+ for (int i = 0; i < files; ++i) {
- _player->set_always_burn_subtitles (true);
- _player->set_play_referenced ();
-}
+ boost::filesystem::path filename = output;
+ string extension = boost::filesystem::extension (filename);
+ filename = boost::filesystem::change_extension (filename, "");
-void
-FFmpegEncoder::setup_video ()
-{
- _video_codec = avcodec_find_encoder_by_name (_video_codec_name.c_str());
- if (!_video_codec) {
- throw runtime_error (String::compose ("could not find FFmpeg encoder %1", _video_codec_name));
- }
+ if (files > 1) {
+ /// TRANSLATORS: _reel%1 here is to be added to an export filename to indicate
+ /// which reel it is. Preserve the %1; it will be replaced with the reel number.
+ filename = filename.string() + String::compose(_("_reel%1"), i + 1);
+ }
- _video_codec_context = avcodec_alloc_context3 (_video_codec);
- if (!_video_codec_context) {
- throw runtime_error ("could not allocate FFmpeg video context");
+ _file_encoders.push_back (
+ FileEncoderSet (
+ _film->frame_size(),
+ _film->video_frame_rate(),
+ _film->audio_frame_rate(),
+ mixdown_to_stereo ? 2 : film->audio_channels(),
+ format,
+ x264_crf,
+ _film->three_d(),
+ filename,
+ extension
+#ifdef DCPOMATIC_VARIANT_SWAROOP
+ , key
+ , id
+#endif
+ )
+ );
}
- avcodec_get_context_defaults3 (_video_codec_context, _video_codec);
-
- /* Variable quantisation */
- _video_codec_context->global_quality = 0;
- _video_codec_context->width = _film->frame_size().width;
- _video_codec_context->height = _film->frame_size().height;
- _video_codec_context->time_base = (AVRational) { 1, _film->video_frame_rate() };
- _video_codec_context->pix_fmt = _pixel_format;
- _video_codec_context->flags |= CODEC_FLAG_QSCALE | CODEC_FLAG_GLOBAL_HEADER;
-}
-
-void
-FFmpegEncoder::setup_audio ()
-{
- _audio_codec = avcodec_find_encoder_by_name (_audio_codec_name.c_str());
- if (!_audio_codec) {
- throw runtime_error (String::compose ("could not find FFmpeg encoder %1", _audio_codec_name));
- }
+ _player->set_always_burn_open_subtitles ();
+ _player->set_play_referenced ();
- _audio_codec_context = avcodec_alloc_context3 (_audio_codec);
- if (!_audio_codec_context) {
- throw runtime_error ("could not allocate FFmpeg audio context");
+ int const ch = film->audio_channels ();
+
+ AudioMapping map;
+ if (mixdown_to_stereo) {
+ _output_audio_channels = 2;
+ map = AudioMapping (ch, 2);
+ float const overall_gain = 2 / (4 + sqrt(2));
+ float const minus_3dB = 1 / sqrt(2);
+ map.set (dcp::LEFT, 0, overall_gain);
+ map.set (dcp::RIGHT, 1, overall_gain);
+ map.set (dcp::CENTRE, 0, overall_gain * minus_3dB);
+ map.set (dcp::CENTRE, 1, overall_gain * minus_3dB);
+ map.set (dcp::LS, 0, overall_gain);
+ map.set (dcp::RS, 1, overall_gain);
+ } else {
+ _output_audio_channels = ch;
+ map = AudioMapping (ch, ch);
+ for (int i = 0; i < ch; ++i) {
+ map.set (i, i, 1);
+ }
}
- avcodec_get_context_defaults3 (_audio_codec_context, _audio_codec);
-
- /* XXX: configurable */
- _audio_codec_context->bit_rate = 256 * 1024;
- _audio_codec_context->sample_fmt = _sample_format;
- _audio_codec_context->sample_rate = _film->audio_frame_rate ();
- _audio_codec_context->channel_layout = av_get_default_channel_layout (_film->audio_channels ());
- _audio_codec_context->channels = _film->audio_channels ();
+ _butler.reset (new Butler(_player, map, _output_audio_channels, bind(&PlayerVideo::force, _1, FFmpegFileEncoder::pixel_format(format)), true, false));
}
void
FFmpegEncoder::go ()
{
- setup_video ();
- setup_audio ();
-
- avformat_alloc_output_context2 (&_format_context, 0, 0, _output.string().c_str());
- if (!_format_context) {
- throw runtime_error ("could not allocate FFmpeg format context");
- }
-
- _video_stream = avformat_new_stream (_format_context, _video_codec);
- if (!_video_stream) {
- throw runtime_error ("could not create FFmpeg output video stream");
- }
-
- _audio_stream = avformat_new_stream (_format_context, _audio_codec);
- if (!_audio_stream) {
- throw runtime_error ("could not create FFmpeg output audio stream");
- }
-
- _video_stream->id = _video_stream_index;
- _video_stream->codec = _video_codec_context;
-
- _audio_stream->id = _audio_stream_index;
- _audio_stream->codec = _audio_codec_context;
-
- if (avcodec_open2 (_video_codec_context, _video_codec, &_video_options) < 0) {
- throw runtime_error ("could not open FFmpeg video codec");
- }
-
- int r = avcodec_open2 (_audio_codec_context, _audio_codec, 0);
- if (r < 0) {
- char buffer[256];
- av_strerror (r, buffer, sizeof(buffer));
- throw runtime_error (String::compose ("could not open FFmpeg audio codec (%1)", buffer));
- }
-
- if (avio_open_boost (&_format_context->pb, _output, AVIO_FLAG_WRITE) < 0) {
- throw runtime_error ("could not open FFmpeg output file");
- }
-
- if (avformat_write_header (_format_context, 0) < 0) {
- throw runtime_error ("could not write header to FFmpeg output file");
- }
-
{
shared_ptr<Job> job = _job.lock ();
DCPOMATIC_ASSERT (job);
job->sub (_("Encoding"));
}
- while (!_player->pass ()) {}
-
- if (_pending_audio->frames() > 0) {
- audio_frame (_pending_audio->frames ());
- }
+ list<DCPTimePeriod> reel_periods = _film->reels ();
+ list<DCPTimePeriod>::const_iterator reel = reel_periods.begin ();
+ list<FileEncoderSet>::iterator encoder = _file_encoders.begin ();
+
+ DCPTime const video_frame = DCPTime::from_frames (1, _film->video_frame_rate ());
+ int const audio_frames = video_frame.frames_round(_film->audio_frame_rate());
+ float* interleaved = new float[_output_audio_channels * audio_frames];
+ shared_ptr<AudioBuffers> deinterleaved (new AudioBuffers (_output_audio_channels, audio_frames));
+ int const gets_per_frame = _film->three_d() ? 2 : 1;
+ for (DCPTime i; i < _film->length(); i += video_frame) {
+
+ if (_file_encoders.size() > 1 && !reel->contains(i)) {
+ /* Next reel and file */
+ ++reel;
+ ++encoder;
+ DCPOMATIC_ASSERT (reel != reel_periods.end());
+ DCPOMATIC_ASSERT (encoder != _file_encoders.end());
+ }
- /* Flush */
+ for (int j = 0; j < gets_per_frame; ++j) {
+ pair<shared_ptr<PlayerVideo>, DCPTime> v = _butler->get_video (true, 0);
+ encoder->get(v.first->eyes())->video(v.first, v.second);
+ }
- bool flushed_video = false;
- bool flushed_audio = false;
+ _history.event ();
- while (!flushed_video || !flushed_audio) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _last_time = i;
+ }
- int got_packet;
- avcodec_encode_video2 (_video_codec_context, &packet, 0, &got_packet);
- if (got_packet) {
- packet.stream_index = 0;
- av_interleaved_write_frame (_format_context, &packet);
- } else {
- flushed_video = true;
+ shared_ptr<Job> job = _job.lock ();
+ if (job) {
+ job->set_progress (float(i.get()) / _film->length().get());
}
- av_packet_unref (&packet);
-
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
-
- avcodec_encode_audio2 (_audio_codec_context, &packet, 0, &got_packet);
- if (got_packet) {
- packet.stream_index = 0;
- av_interleaved_write_frame (_format_context, &packet);
- } else {
- flushed_audio = true;
+
+ _butler->get_audio (interleaved, audio_frames);
+ /* XXX: inefficient; butler interleaves and we deinterleave again */
+ float* p = interleaved;
+ for (int j = 0; j < audio_frames; ++j) {
+ for (int k = 0; k < _output_audio_channels; ++k) {
+ deinterleaved->data(k)[j] = *p++;
+ }
}
- av_packet_unref (&packet);
+ encoder->audio (deinterleaved);
}
+ delete[] interleaved;
- av_write_trailer (_format_context);
-
- avcodec_close (_video_codec_context);
- avcodec_close (_audio_codec_context);
- avio_close (_format_context->pb);
- avformat_free_context (_format_context);
+ BOOST_FOREACH (FileEncoderSet i, _file_encoders) {
+ i.flush ();
+ }
}
-void
-FFmpegEncoder::video (shared_ptr<PlayerVideo> video, DCPTime time)
+optional<float>
+FFmpegEncoder::current_rate () const
{
- shared_ptr<Image> image = video->image (
- bind (&Log::dcp_log, _film->log().get(), _1, _2),
- bind (&force_pixel_format, _1, _pixel_format),
- true,
- false
- );
-
- AVFrame* frame = av_frame_alloc ();
- DCPOMATIC_ASSERT (frame);
-
- for (int i = 0; i < 3; ++i) {
- size_t const size = image->stride()[i] * image->sample_size(i).height;
- AVBufferRef* buffer = av_buffer_alloc (size);
- DCPOMATIC_ASSERT (buffer);
- /* XXX: inefficient */
- memcpy (buffer->data, image->data()[i], size);
- frame->buf[i] = av_buffer_ref (buffer);
- frame->data[i] = buffer->data;
- frame->linesize[i] = image->stride()[i];
- av_buffer_unref (&buffer);
- }
-
- frame->width = image->size().width;
- frame->height = image->size().height;
- frame->format = _pixel_format;
- frame->pts = time.seconds() / av_q2d (_video_stream->time_base);
-
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
-
- int got_packet;
- if (avcodec_encode_video2 (_video_codec_context, &packet, frame, &got_packet) < 0) {
- throw EncodeError ("FFmpeg video encode failed");
- }
-
- if (got_packet && packet.size) {
- packet.stream_index = _video_stream_index;
- av_interleaved_write_frame (_format_context, &packet);
- av_packet_unref (&packet);
- }
-
- av_frame_free (&frame);
-
- _history.event ();
-
- {
- boost::mutex::scoped_lock lm (_mutex);
- _last_time = time;
- }
-
- shared_ptr<Job> job = _job.lock ();
- if (job) {
- job->set_progress (float(time.get()) / _film->length().get());
- }
+ return _history.rate ();
}
-/** Called when the player gives us some audio */
-void
-FFmpegEncoder::audio (shared_ptr<AudioBuffers> audio, DCPTime)
+Frame
+FFmpegEncoder::frames_done () const
{
- _pending_audio->append (audio);
-
- int frame_size = _audio_codec_context->frame_size;
- if (frame_size == 0) {
- /* codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE */
- frame_size = 2000;
- }
-
- while (_pending_audio->frames() >= frame_size) {
- audio_frame (frame_size);
- }
+ boost::mutex::scoped_lock lm (_mutex);
+ return _last_time.frames_round (_film->video_frame_rate ());
}
-void
-FFmpegEncoder::audio_frame (int size)
+FFmpegEncoder::FileEncoderSet::FileEncoderSet (
+ dcp::Size video_frame_size,
+ int video_frame_rate,
+ int audio_frame_rate,
+ int channels,
+ ExportFormat format,
+ int x264_crf,
+ bool three_d,
+ boost::filesystem::path output,
+ string extension
+#ifdef DCPOMATIC_VARIANT_SWAROOP
+ , optional<dcp::Key> key
+ , optional<string> id
+#endif
+ )
{
- DCPOMATIC_ASSERT (size);
-
- AVFrame* frame = av_frame_alloc ();
- DCPOMATIC_ASSERT (frame);
-
- int const channels = _audio_codec_context->channels;
- DCPOMATIC_ASSERT (channels);
-
- int const buffer_size = av_samples_get_buffer_size (0, channels, size, _audio_codec_context->sample_fmt, 0);
- DCPOMATIC_ASSERT (buffer_size >= 0);
-
- void* samples = av_malloc (buffer_size);
- DCPOMATIC_ASSERT (samples);
-
- frame->nb_samples = size;
- int r = avcodec_fill_audio_frame (frame, channels, _audio_codec_context->sample_fmt, (const uint8_t *) samples, buffer_size, 0);
- DCPOMATIC_ASSERT (r >= 0);
-
- float** p = _pending_audio->data ();
- switch (_audio_codec_context->sample_fmt) {
- case AV_SAMPLE_FMT_S16:
- {
- int16_t* q = reinterpret_cast<int16_t*> (samples);
- for (int i = 0; i < size; ++i) {
- for (int j = 0; j < channels; ++j) {
- *q++ = p[j][i] * 32767;
- }
- }
- break;
- }
- case AV_SAMPLE_FMT_FLTP:
- {
- float* q = reinterpret_cast<float*> (samples);
- for (int i = 0; i < channels; ++i) {
- memcpy (q, p[i], sizeof(float) * size);
- q += size;
- }
- break;
- }
- default:
- DCPOMATIC_ASSERT (false);
+ if (three_d) {
+ /// TRANSLATORS: L here is an abbreviation for "left", to indicate the left-eye part of a 3D export
+ _encoders[EYES_LEFT] = shared_ptr<FFmpegFileEncoder>(
+ new FFmpegFileEncoder(video_frame_size, video_frame_rate, audio_frame_rate, channels, format, x264_crf, String::compose("%1_%2%3", output.string(), _("L"), extension)
+#ifdef DCPOMATIC_VARIANT_SWAROOP
+ , key, id
+#endif
+ )
+ );
+ /// TRANSLATORS: R here is an abbreviation for "right", to indicate the right-eye part of a 3D export
+ _encoders[EYES_RIGHT] = shared_ptr<FFmpegFileEncoder>(
+ new FFmpegFileEncoder(video_frame_size, video_frame_rate, audio_frame_rate, channels, format, x264_crf, String::compose("%1_%2%3", output.string(), _("R"), extension)
+#ifdef DCPOMATIC_VARIANT_SWAROOP
+ , key, id
+#endif
+ )
+ );
+ } else {
+ _encoders[EYES_BOTH] = shared_ptr<FFmpegFileEncoder>(
+ new FFmpegFileEncoder(video_frame_size, video_frame_rate, audio_frame_rate, channels, format, x264_crf, String::compose("%1%2", output.string(), extension)
+#ifdef DCPOMATIC_VARIANT_SWAROOP
+ , key, id
+#endif
+ )
+ );
}
-
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
-
- int got_packet;
- if (avcodec_encode_audio2 (_audio_codec_context, &packet, frame, &got_packet) < 0) {
- throw EncodeError ("FFmpeg audio encode failed");
- }
-
- if (got_packet && packet.size) {
- packet.stream_index = _audio_stream_index;
- av_interleaved_write_frame (_format_context, &packet);
- av_packet_unref (&packet);
- }
-
- av_free (samples);
- av_frame_free (&frame);
-
- _pending_audio->trim_start (size);
}
-void
-FFmpegEncoder::subtitle (PlayerSubtitles, DCPTimePeriod)
+shared_ptr<FFmpegFileEncoder>
+FFmpegEncoder::FileEncoderSet::get (Eyes eyes) const
{
-
+ map<Eyes, boost::shared_ptr<FFmpegFileEncoder> >::const_iterator i = _encoders.find (eyes);
+ DCPOMATIC_ASSERT (i != _encoders.end());
+ return i->second;
}
-float
-FFmpegEncoder::current_rate () const
+void
+FFmpegEncoder::FileEncoderSet::flush ()
{
- return _history.rate ();
+ for (map<Eyes, boost::shared_ptr<FFmpegFileEncoder> >::iterator i = _encoders.begin(); i != _encoders.end(); ++i) {
+ i->second->flush ();
+ }
}
-Frame
-FFmpegEncoder::frames_done () const
+void
+FFmpegEncoder::FileEncoderSet::audio (shared_ptr<AudioBuffers> a)
{
- boost::mutex::scoped_lock lm (_mutex);
- return _last_time.frames_round (_film->video_frame_rate ());
+ for (map<Eyes, boost::shared_ptr<FFmpegFileEncoder> >::iterator i = _encoders.begin(); i != _encoders.end(); ++i) {
+ i->second->audio (a);
+ }
}