Remove in-place translations support.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index 85f79b513664f28f01640662b998d541ffcdde4a..6130d8e5fb5c59be98c6aec9c6ed7b78e23eeac1 100644 (file)
@@ -254,7 +254,7 @@ FFmpegDecoder::pass ()
  */
 static
 shared_ptr<AudioBuffers>
-deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
+deinterleave_audio(AVFrame* frame)
 {
        auto format = static_cast<AVSampleFormat>(frame->format);
 
@@ -266,6 +266,10 @@ deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
        auto audio = make_shared<AudioBuffers>(channels, frames);
        auto data = audio->data();
 
+       if (frames == 0) {
+               return audio;
+       }
+
        switch (format) {
        case AV_SAMPLE_FMT_U8:
        {
@@ -360,14 +364,9 @@ deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
        case AV_SAMPLE_FMT_FLTP:
        {
                auto p = reinterpret_cast<float**> (frame->data);
-               DCPOMATIC_ASSERT(channels <= stream->channels());
-               /* Sometimes there aren't as many channels in the frame as in the stream */
                for (int i = 0; i < channels; ++i) {
                        memcpy (data[i], p[i], frames * sizeof(float));
                }
-               for (int i = channels; i < stream->channels(); ++i) {
-                       audio->make_silent (i);
-               }
        }
        break;
 
@@ -481,7 +480,7 @@ void
 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
 {
        auto frame = audio_frame (stream);
-       auto data = deinterleave_audio(stream, frame);
+       auto data = deinterleave_audio(frame);
 
        auto const time_base = stream->stream(_format_context)->time_base;
 
@@ -635,9 +634,14 @@ FFmpegDecoder::process_video_frame ()
 void
 FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
 {
+       auto context = subtitle_codec_context();
+       if (!context) {
+               return;
+       }
+
        int got_subtitle;
        AVSubtitle sub;
-       if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
+       if (avcodec_decode_subtitle2(context, &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
                return;
        }
 
@@ -664,11 +668,11 @@ FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
        */
        ContentTime from;
        from = sub_period.from + _pts_offset;
+       _have_current_subtitle = true;
        if (sub_period.to) {
                _current_subtitle_to = *sub_period.to + _pts_offset;
        } else {
                _current_subtitle_to = optional<ContentTime>();
-               _have_current_subtitle = true;
        }
 
        ContentBitmapText bitmap_text(from);
@@ -716,7 +720,7 @@ FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect)
        /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
           (i.e. first byte B, second G, third R, fourth A)
        */
-       auto const palette = rect->pict.data[1];
+       auto const* palette = rect->pict.data[1];
 #else
        /* Start of the first line in the subtitle */
        auto sub_p = rect->data[0];
@@ -777,11 +781,23 @@ FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect)
        if (target_height == 0 && video_codec_context()) {
                target_height = video_codec_context()->height;
        }
-       DCPOMATIC_ASSERT (target_width);
-       DCPOMATIC_ASSERT (target_height);
+
+       int x_offset = 0;
+       int y_offset = 0;
+       if (_ffmpeg_content->video && _ffmpeg_content->video->use()) {
+               auto const crop = _ffmpeg_content->video->actual_crop();
+               target_width -= crop.left + crop.right;
+               target_height -= crop.top + crop.bottom;
+               x_offset = -crop.left;
+               y_offset = -crop.top;
+       }
+
+       DCPOMATIC_ASSERT(target_width > 0);
+       DCPOMATIC_ASSERT(target_height > 0);
+
        dcpomatic::Rect<double> const scaled_rect (
-               static_cast<double>(rect->x) / target_width,
-               static_cast<double>(rect->y) / target_height,
+               static_cast<double>(rect->x + x_offset) / target_width,
+               static_cast<double>(rect->y + y_offset) / target_height,
                static_cast<double>(rect->w) / target_width,
                static_cast<double>(rect->h) / target_height
                );
@@ -812,11 +828,14 @@ FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from)
        }
 
        sub::RawSubtitle base;
+       auto video_size = _ffmpeg_content->video->size();
+       DCPOMATIC_ASSERT(video_size);
+
        auto raw = sub::SSAReader::parse_line (
                base,
                text,
-               _ffmpeg_content->video->size().width,
-               _ffmpeg_content->video->size().height,
+               video_size->width,
+               video_size->height,
                sub::Colour(1, 1, 1)
                );