Fix crash on using delay; fix x-thread GUI access caused by FilmState default copy...
[dcpomatic.git] / src / lib / decoder.cc
index 4ffe049b8a29786eda6b75582458f8825c41a66a..a90c14b2bdc995af6664e51965a68bd7995978f8 100644 (file)
 
 #include <iostream>
 #include <stdint.h>
+#include <boost/lexical_cast.hpp>
 extern "C" {
 #include <libavfilter/avfiltergraph.h>
 #include <libavfilter/buffersrc.h>
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 61 && LIBAVFILTER_VERSION_MINOR <= 77
+#if (LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 53 && LIBAVFILTER_VERSION_MINOR <= 77) || LIBAVFILTER_VERSION_MAJOR == 3
 #include <libavfilter/avcodec.h>
 #include <libavfilter/buffersink.h>
+#elif LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
+#include <libavfilter/vsrc_buffer.h>
 #endif
 #include <libavformat/avio.h>
 }
@@ -45,6 +48,7 @@ extern "C" {
 #include "filter.h"
 #include "delay_line.h"
 #include "ffmpeg_compatibility.h"
+#include "subtitle.h"
 
 using namespace std;
 using namespace boost;
@@ -67,13 +71,12 @@ Decoder::Decoder (boost::shared_ptr<const FilmState> s, boost::shared_ptr<const
        , _video_frame (0)
        , _buffer_src_context (0)
        , _buffer_sink_context (0)
-       , _swr_context (0)
        , _have_setup_video_filters (false)
        , _delay_line (0)
        , _delay_in_bytes (0)
        , _audio_frames_processed (0)
 {
-       if (_opt->decode_video_frequency != 0 && _fs->length == 0) {
+       if (_opt->decode_video_frequency != 0 && _fs->length() == 0) {
                throw DecodeError ("cannot do a partial decode if length == 0");
        }
 }
@@ -83,88 +86,58 @@ Decoder::~Decoder ()
        delete _delay_line;
 }
 
+/** Start off a decode processing run */
 void
 Decoder::process_begin ()
 {
-       if (_fs->audio_sample_rate != dcp_audio_sample_rate (_fs->audio_sample_rate)) {
-               _swr_context = swr_alloc_set_opts (
-                       0,
-                       audio_channel_layout(),
-                       audio_sample_format(),
-                       dcp_audio_sample_rate (_fs->audio_sample_rate),
-                       audio_channel_layout(),
-                       audio_sample_format(),
-                       _fs->audio_sample_rate,
-                       0, 0
-                       );
-               
-               swr_init (_swr_context);
-       } else {
-               _swr_context = 0;
-       }
-
-       _delay_in_bytes = _fs->audio_delay * _fs->audio_sample_rate * _fs->audio_channels * _fs->bytes_per_sample() / 1000;
+       _delay_in_bytes = _fs->audio_delay() * _fs->audio_sample_rate() * _fs->audio_channels() * bytes_per_audio_sample() / 1000;
        delete _delay_line;
        _delay_line = new DelayLine (_delay_in_bytes);
 
        _audio_frames_processed = 0;
 }
 
+/** Finish off a decode processing run */
 void
 Decoder::process_end ()
 {
-       if (_swr_context) {
-
-               int mop = 0;
-               while (1) {
-                       uint8_t buffer[256 * _fs->bytes_per_sample() * _fs->audio_channels];
-                       uint8_t* out[1] = {
-                               buffer
-                       };
-
-                       int const frames = swr_convert (_swr_context, out, 256, 0, 0);
-
-                       if (frames < 0) {
-                               throw DecodeError ("could not run sample-rate converter");
-                       }
-
-                       if (frames == 0) {
-                               break;
-                       }
-
-                       mop += frames;
-                       int available = _delay_line->feed (buffer, frames * _fs->audio_channels * _fs->bytes_per_sample());
-                       Audio (buffer, available);
-               }
-
-               swr_free (&_swr_context);
-       }
-       
        if (_delay_in_bytes < 0) {
                uint8_t remainder[-_delay_in_bytes];
                _delay_line->get_remaining (remainder);
-               _audio_frames_processed += _delay_in_bytes / (_fs->audio_channels * _fs->bytes_per_sample());
-               Audio (remainder, _delay_in_bytes);
+               _audio_frames_processed += _delay_in_bytes / (_fs->audio_channels() * bytes_per_audio_sample());
+               emit_audio (remainder, -_delay_in_bytes);
        }
 
        /* If we cut the decode off, the audio may be short; push some silence
           in to get it to the right length.
        */
 
-       int const audio_short_by_frames =
-               (decoding_frames() * dcp_audio_sample_rate (_fs->audio_sample_rate) / _fs->frames_per_second)
-               - _audio_frames_processed;
+       int64_t const video_length_in_audio_frames = ((int64_t) _fs->dcp_length() * _fs->target_sample_rate() / _fs->frames_per_second());
+       int64_t const audio_short_by_frames = video_length_in_audio_frames - _audio_frames_processed;
 
-       int bytes = audio_short_by_frames * _fs->audio_channels * _fs->bytes_per_sample();
+       _log->log (
+               String::compose ("DCP length is %1 (%2 audio frames); %3 frames of audio processed.",
+                                _fs->dcp_length(),
+                                video_length_in_audio_frames,
+                                _audio_frames_processed)
+               );
+       
+       if (audio_short_by_frames >= 0 && _opt->decode_audio) {
 
-       int const silence_size = 64 * 1024;
-       uint8_t silence[silence_size];
-       memset (silence, 0, silence_size);
+               _log->log (String::compose ("DCP length is %1; %2 frames of audio processed.", _fs->dcp_length(), _audio_frames_processed));
+               _log->log (String::compose ("Adding %1 frames of silence to the end.", audio_short_by_frames));
 
-       while (bytes) {
-               int const t = min (bytes, silence_size);
-               Audio (silence, t);
-               bytes -= t;
+               int64_t bytes = audio_short_by_frames * _fs->audio_channels() * bytes_per_audio_sample();
+               
+               int64_t const silence_size = 16 * 1024 * _fs->audio_channels() * bytes_per_audio_sample();
+               uint8_t silence[silence_size];
+               memset (silence, 0, silence_size);
+               
+               while (bytes) {
+                       int64_t const t = min (bytes, silence_size);
+                       emit_audio (silence, t);
+                       bytes -= t;
+               }
        }
 }
 
@@ -180,24 +153,13 @@ Decoder::go ()
 
        while (pass () == false) {
                if (_job && !_ignore_length) {
-                       _job->set_progress (float (_video_frame) / decoding_frames ());
+                       _job->set_progress (float (_video_frame) / _fs->dcp_length());
                }
        }
 
        process_end ();
 }
 
-/** @return Number of frames that we will be decoding */
-int
-Decoder::decoding_frames () const
-{
-       if (_opt->num_frames > 0) {
-               return _opt->num_frames;
-       }
-       
-       return _fs->length;
-}
-
 /** Run one pass.  This may or may not generate any actual video / audio data;
  *  some decoders may require several passes to generate a single frame.
  *  @return true if we have finished processing all data; otherwise false.
@@ -210,7 +172,7 @@ Decoder::pass ()
                _have_setup_video_filters = true;
        }
        
-       if (_opt->num_frames != 0 && _video_frame >= _opt->num_frames) {
+       if (!_ignore_length && _video_frame >= _fs->dcp_length()) {
                return true;
        }
 
@@ -218,95 +180,91 @@ Decoder::pass ()
 }
 
 /** Called by subclasses to tell the world that some audio data is ready
- *  @param data Interleaved audio data, in FilmState::audio_sample_format.
+ *  @param data Audio data, in FilmState::audio_sample_format.
  *  @param size Number of bytes of data.
  */
 void
 Decoder::process_audio (uint8_t* data, int size)
 {
-       /* Here's samples per channel */
-       int const samples = size / _fs->bytes_per_sample();
+       /* Push into the delay line */
+       size = _delay_line->feed (data, size);
 
-       /* And here's frames (where 1 frame is a collection of samples, 1 for each channel,
-          so for 5.1 a frame would be 6 samples)
-       */
-       int const frames = samples / _fs->audio_channels;
+       emit_audio (data, size);
+}
 
-       /* Maybe apply gain */
-       if (_fs->audio_gain != 0) {
-               float const linear_gain = pow (10, _fs->audio_gain / 20);
-               uint8_t* p = data;
-               switch (_fs->audio_sample_format) {
-               case AV_SAMPLE_FMT_S16:
-                       for (int i = 0; i < samples; ++i) {
-                               /* XXX: assumes little-endian; also we should probably be dithering here */
-
-                               /* unsigned sample */
-                               int const ou = p[0] | (p[1] << 8);
-
-                               /* signed sample */
-                               int const os = ou >= 0x8000 ? (- 0x10000 + ou) : ou;
-
-                               /* signed sample with altered gain */
-                               int const gs = int (os * linear_gain);
-
-                               /* unsigned sample with altered gain */
-                               int const gu = gs > 0 ? gs : (0x10000 + gs);
-
-                               /* write it back */
-                               p[0] = gu & 0xff;
-                               p[1] = (gu & 0xff00) >> 8;
-                               p += 2;
+void
+Decoder::emit_audio (uint8_t* data, int size)
+{
+       /* Deinterleave and convert to float */
+
+       assert ((size % (bytes_per_audio_sample() * _fs->audio_channels())) == 0);
+
+       int const total_samples = size / bytes_per_audio_sample();
+       int const frames = total_samples / _fs->audio_channels();
+       shared_ptr<AudioBuffers> audio (new AudioBuffers (_fs->audio_channels(), frames));
+
+       switch (audio_sample_format()) {
+       case AV_SAMPLE_FMT_S16:
+       {
+               int16_t* p = (int16_t *) data;
+               int sample = 0;
+               int channel = 0;
+               for (int i = 0; i < total_samples; ++i) {
+                       audio->data(channel)[sample] = float(*p++) / (1 << 15);
+
+                       ++channel;
+                       if (channel == _fs->audio_channels()) {
+                               channel = 0;
+                               ++sample;
+                       }
+               }
+       }
+       break;
+
+       case AV_SAMPLE_FMT_S32:
+       {
+               int32_t* p = (int32_t *) data;
+               int sample = 0;
+               int channel = 0;
+               for (int i = 0; i < total_samples; ++i) {
+                       audio->data(channel)[sample] = float(*p++) / (1 << 31);
+
+                       ++channel;
+                       if (channel == _fs->audio_channels()) {
+                               channel = 0;
+                               ++sample;
                        }
-                       break;
-               default:
-                       assert (false);
                }
        }
 
-       /* This is a buffer we might use if we are sample-rate converting;
-          it will need freeing if so.
-       */
-       uint8_t* out_buffer = 0;
-
-       /* Maybe sample-rate convert */
-       if (_swr_context) {
-
-               uint8_t const * in[2] = {
-                       data,
-                       0
-               };
-
-               /* Compute the resampled frame count and add 32 for luck */
-               int const out_buffer_size_frames = ceil (frames * float (dcp_audio_sample_rate (_fs->audio_sample_rate)) / _fs->audio_sample_rate) + 32;
-               int const out_buffer_size_bytes = out_buffer_size_frames * _fs->audio_channels * _fs->bytes_per_sample();
-               out_buffer = new uint8_t[out_buffer_size_bytes];
-
-               uint8_t* out[2] = {
-                       out_buffer, 
-                       0
-               };
-
-               /* Resample audio */
-               int out_frames = swr_convert (_swr_context, out, out_buffer_size_frames, in, frames);
-               if (out_frames < 0) {
-                       throw DecodeError ("could not run sample-rate converter");
+       case AV_SAMPLE_FMT_FLTP:
+       {
+               float* p = reinterpret_cast<float*> (data);
+               for (int i = 0; i < _fs->audio_channels(); ++i) {
+                       memcpy (audio->data(i), p, frames * sizeof(float));
+                       p += frames;
                }
+       }
+       break;
 
-               /* And point our variables at the resampled audio */
-               data = out_buffer;
-               size = out_frames * _fs->audio_channels * _fs->bytes_per_sample();
+       default:
+               assert (false);
        }
-               
-       /* Update the number of audio frames we've pushed to the encoder */
-       _audio_frames_processed += size / (_fs->audio_channels * _fs->bytes_per_sample ());
 
-       /* Push into the delay line and then tell the world what we've got */
-       int available = _delay_line->feed (data, size);
-       Audio (data, available);
+       /* Maybe apply gain */
+       if (_fs->audio_gain() != 0) {
+               float const linear_gain = pow (10, _fs->audio_gain() / 20);
+               for (int i = 0; i < _fs->audio_channels(); ++i) {
+                       for (int j = 0; j < frames; ++j) {
+                               audio->data(i)[j] *= linear_gain;
+                       }
+               }
+       }
 
-       /* Delete the sample-rate conversion buffer, if it exists */
-       delete[] out_buffer;
+       /* Update the number of audio frames we've pushed to the encoder */
+       _audio_frames_processed += frames;
+
+       Audio (audio);
 }
 
 /** Called by subclasses to tell the world that some video data is ready.
@@ -325,7 +283,7 @@ Decoder::process_video (AVFrame* frame)
 
        int gap = 0;
        if (_opt->decode_video_frequency != 0) {
-               gap = _fs->length / _opt->decode_video_frequency;
+               gap = _fs->length() / _opt->decode_video_frequency;
        }
 
        if (_opt->decode_video_frequency != 0 && gap != 0 && (_video_frame % gap) != 0) {
@@ -333,16 +291,14 @@ Decoder::process_video (AVFrame* frame)
                return;
        }
 
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 61
+#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 53 && LIBAVFILTER_VERSION_MINOR <= 61
 
        if (av_vsrc_buffer_add_frame (_buffer_src_context, frame, 0) < 0) {
                throw DecodeError ("could not push buffer into filter chain.");
        }
 
-#else  
+#elif LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
 
-#if 0
-       
        AVRational par;
        par.num = sample_aspect_ratio_numerator ();
        par.den = sample_aspect_ratio_denominator ();
@@ -351,7 +307,7 @@ Decoder::process_video (AVFrame* frame)
                throw DecodeError ("could not push buffer into filter chain.");
        }
 
-#endif
+#else
 
        if (av_buffersrc_write_frame (_buffer_src_context, frame) < 0) {
                throw DecodeError ("could not push buffer into filter chain.");
@@ -359,14 +315,13 @@ Decoder::process_video (AVFrame* frame)
 
 #endif 
        
-//#ifdef DVDOMATIC_FFMPEG_0_8_3
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 61  
+#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 15 && LIBAVFILTER_VERSION_MINOR <= 61       
        while (avfilter_poll_frame (_buffer_sink_context->inputs[0])) {
 #else
        while (av_buffersink_read (_buffer_sink_context, 0)) {
 #endif         
 
-#ifdef DVDOMATIC_FFMPEG_0_8_3
+#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 15
                
                int r = avfilter_request_frame (_buffer_sink_context->inputs[0]);
                if (r < 0) {
@@ -392,7 +347,13 @@ Decoder::process_video (AVFrame* frame)
                                image->make_black ();
                        }
 
-                       Video (image, _video_frame);
+                       shared_ptr<Subtitle> sub;
+                       if (_timed_subtitle && _timed_subtitle->displayed_at (double (last_video_frame()) / rint (_fs->frames_per_second()))) {
+                               sub = _timed_subtitle->subtitle ();
+                       }
+
+                       TIMING ("Decoder emits %1", _video_frame);
+                       Video (image, _video_frame, sub);
                        ++_video_frame;
                }
        }
@@ -410,13 +371,13 @@ Decoder::setup_video_filters ()
        
        if (_opt->apply_crop) {
                size_after_crop = _fs->cropped_size (native_size ());
-               fs << crop_string (Position (_fs->crop.left, _fs->crop.top), size_after_crop);
+               fs << crop_string (Position (_fs->crop().left, _fs->crop().top), size_after_crop);
        } else {
                size_after_crop = native_size ();
                fs << crop_string (Position (0, 0), size_after_crop);
        }
 
-       string filters = Filter::ffmpeg_strings (_fs->filters).first;
+       string filters = Filter::ffmpeg_strings (_fs->filters()).first;
        if (!filters.empty ()) {
                filters += ",";
        }
@@ -447,12 +408,18 @@ Decoder::setup_video_filters ()
          << sample_aspect_ratio_denominator();
 
        int r;
+
        if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, "in", a.str().c_str(), 0, graph)) < 0) {
                throw DecodeError ("could not create buffer source");
        }
 
-       enum PixelFormat pixel_formats[] = { pixel_format(), PIX_FMT_NONE };
-       if (avfilter_graph_create_filter (&_buffer_sink_context, buffer_sink, "out", 0, pixel_formats, graph) < 0) {
+       AVBufferSinkParams* sink_params = av_buffersink_params_alloc ();
+       PixelFormat* pixel_fmts = new PixelFormat[2];
+       pixel_fmts[0] = pixel_format ();
+       pixel_fmts[1] = PIX_FMT_NONE;
+       sink_params->pixel_fmts = pixel_fmts;
+       
+       if (avfilter_graph_create_filter (&_buffer_sink_context, buffer_sink, "out", 0, sink_params, graph) < 0) {
                throw DecodeError ("could not create buffer sink.");
        }
 
@@ -469,15 +436,17 @@ Decoder::setup_video_filters ()
        inputs->next = 0;
 
        _log->log ("Using filter chain `" + filters + "'");
-#ifdef DVDOMATIC_FFMPEG_0_8_3  
+
+#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
        if (avfilter_graph_parse (graph, filters.c_str(), inputs, outputs, 0) < 0) {
-#else
+               throw DecodeError ("could not set up filter graph.");
+       }
+#else  
        if (avfilter_graph_parse (graph, filters.c_str(), &inputs, &outputs, 0) < 0) {
-#endif         
-               
                throw DecodeError ("could not set up filter graph.");
        }
-
+#endif 
+       
        if (avfilter_graph_config (graph, 0) < 0) {
                throw DecodeError ("could not configure filter graph.");
        }
@@ -485,3 +454,20 @@ Decoder::setup_video_filters ()
        /* XXX: leaking `inputs' / `outputs' ? */
 }
 
+void
+Decoder::process_subtitle (shared_ptr<TimedSubtitle> s)
+{
+       _timed_subtitle = s;
+       
+       if (_opt->apply_crop) {
+               Position const p = _timed_subtitle->subtitle()->position ();
+               _timed_subtitle->subtitle()->set_position (Position (p.x - _fs->crop().left, p.y - _fs->crop().top));
+       }
+}
+
+
+int
+Decoder::bytes_per_audio_sample () const
+{
+       return av_get_bytes_per_sample (audio_sample_format ());
+}