Merge master; fix crash on new film.
[dcpomatic.git] / src / lib / encoder.cc
index cff9899acb6151cc19990f372c0a091c7d63968f..c1d1041ae539f9cdab1f087291eb2d8d7104abbb 100644 (file)
@@ -27,7 +27,6 @@
 #include <libdcp/picture_asset.h>
 #include "encoder.h"
 #include "util.h"
-#include "options.h"
 #include "film.h"
 #include "log.h"
 #include "exceptions.h"
@@ -38,6 +37,8 @@
 #include "format.h"
 #include "cross.h"
 #include "writer.h"
+#include "player.h"
+#include "audio_mapping.h"
 
 #include "i18n.h"
 
@@ -48,7 +49,8 @@ using std::vector;
 using std::list;
 using std::cout;
 using std::make_pair;
-using namespace boost;
+using boost::shared_ptr;
+using boost::optional;
 
 int const Encoder::_history_size = 25;
 
@@ -77,22 +79,29 @@ Encoder::~Encoder ()
 void
 Encoder::process_begin ()
 {
-       if (_film->audio_stream() && _film->audio_stream()->sample_rate() != _film->target_audio_sample_rate()) {
+       if (_film->has_audio() && _film->audio_frame_rate() != _film->target_audio_sample_rate()) {
 #ifdef HAVE_SWRESAMPLE
 
                stringstream s;
-               s << String::compose (N_("Will resample audio from %1 to %2"), _film->audio_stream()->sample_rate(), _film->target_audio_sample_rate());
+               s << String::compose (N_("Will resample audio from %1 to %2"), _film->audio_frame_rate(), _film->target_audio_sample_rate());
                _film->log()->log (s.str ());
 
-               /* We will be using planar float data when we call the resampler */
+               /* We will be using planar float data when we call the
+                  resampler.  As far as I can see, the audio channel
+                  layout is not necessary for our purposes; it seems
+                  only to be used get the number of channels and
+                  decide if rematrixing is needed.  It won't be, since
+                  input and output layouts are the same.
+               */
+
                _swr_context = swr_alloc_set_opts (
                        0,
-                       _film->audio_stream()->channel_layout(),
+                       av_get_default_channel_layout (_film->audio_mapping().dcp_channels ()),
                        AV_SAMPLE_FMT_FLTP,
                        _film->target_audio_sample_rate(),
-                       _film->audio_stream()->channel_layout(),
+                       av_get_default_channel_layout (_film->audio_mapping().dcp_channels ()),
                        AV_SAMPLE_FMT_FLTP,
-                       _film->audio_stream()->sample_rate(),
+                       _film->audio_frame_rate(),
                        0, 0
                        );
                
@@ -126,9 +135,9 @@ void
 Encoder::process_end ()
 {
 #if HAVE_SWRESAMPLE    
-       if (_film->audio_stream() && _film->audio_stream()->channels() && _swr_context) {
+       if (_film->has_audio() && _swr_context) {
 
-               shared_ptr<AudioBuffers> out (new AudioBuffers (_film->audio_stream()->channels(), 256));
+               shared_ptr<AudioBuffers> out (new AudioBuffers (_film->audio_mapping().dcp_channels(), 256));
                        
                while (1) {
                        int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
@@ -142,7 +151,7 @@ Encoder::process_end ()
                        }
 
                        out->set_frames (frames);
-                       write_audio (out);
+                       _writer->write (out);
                }
 
                swr_free (&_swr_context);
@@ -193,7 +202,7 @@ Encoder::process_end ()
  *  or 0 if not known.
  */
 float
-Encoder::current_frames_per_second () const
+Encoder::current_encoding_rate () const
 {
        boost::mutex::scoped_lock lock (_history_mutex);
        if (int (_time_history.size()) < _history_size) {
@@ -231,9 +240,9 @@ Encoder::frame_done ()
 }
 
 void
-Encoder::process_video (shared_ptr<const Image> image, bool same, boost::shared_ptr<Subtitle> sub)
+Encoder::process_video (shared_ptr<const Image> image, bool same, shared_ptr<Subtitle> sub)
 {
-       FrameRateConversion frc (_film->source_frame_rate(), _film->dcp_frame_rate());
+       FrameRateConversion frc (_film->video_frame_rate(), _film->dcp_frame_rate());
        
        if (frc.skip && (_video_frames_in % 2)) {
                ++_video_frames_in;
@@ -269,7 +278,7 @@ Encoder::process_video (shared_ptr<const Image> image, bool same, boost::shared_
                /* Queue this new frame for encoding */
                pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
                TIMING ("adding to queue of %1", _queue.size ());
-               _queue.push_back (boost::shared_ptr<DCPVideoFrame> (
+               _queue.push_back (shared_ptr<DCPVideoFrame> (
                                          new DCPVideoFrame (
                                                  image, sub, _film->format()->dcp_size(), _film->format()->dcp_padding (_film),
                                                  _film->subtitle_offset(), _film->subtitle_scale(),
@@ -301,9 +310,9 @@ Encoder::process_audio (shared_ptr<const AudioBuffers> data)
        if (_swr_context) {
 
                /* Compute the resampled frames count and add 32 for luck */
-               int const max_resampled_frames = ceil ((int64_t) data->frames() * _film->target_audio_sample_rate() / _film->audio_stream()->sample_rate()) + 32;
+               int const max_resampled_frames = ceil ((int64_t) data->frames() * _film->target_audio_sample_rate() / _film->audio_frame_rate()) + 32;
 
-               shared_ptr<AudioBuffers> resampled (new AudioBuffers (_film->audio_stream()->channels(), max_resampled_frames));
+               shared_ptr<AudioBuffers> resampled (new AudioBuffers (_film->audio_mapping().dcp_channels(), max_resampled_frames));
 
                /* Resample audio */
                int const resampled_frames = swr_convert (
@@ -321,7 +330,7 @@ Encoder::process_audio (shared_ptr<const AudioBuffers> data)
        }
 #endif
 
-       write_audio (data);
+       _writer->write (data);
 }
 
 void
@@ -362,7 +371,7 @@ Encoder::encoder_thread (ServerDescription* server)
                }
 
                TIMING ("encoder thread %1 wakes with queue of %2", boost::this_thread::get_id(), _queue.size());
-               boost::shared_ptr<DCPVideoFrame> vf = _queue.front ();
+               shared_ptr<DCPVideoFrame> vf = _queue.front ();
                _film->log()->log (String::compose (N_("Encoder thread %1 pops frame %2 from queue"), boost::this_thread::get_id(), vf->frame()), Log::VERBOSE);
                _queue.pop_front ();
                
@@ -416,34 +425,10 @@ Encoder::encoder_thread (ServerDescription* server)
                }
 
                if (remote_backoff > 0) {
-                       dvdomatic_sleep (remote_backoff);
+                       dcpomatic_sleep (remote_backoff);
                }
 
                lock.lock ();
                _condition.notify_all ();
        }
 }
-
-void
-Encoder::write_audio (shared_ptr<const AudioBuffers> data)
-{
-       AudioMapping m (_film->audio_channels ());
-       if (m.dcp_channels() != _film->audio_channels()) {
-
-               /* Remap (currently just for mono -> 5.1) */
-
-               shared_ptr<AudioBuffers> b (new AudioBuffers (m.dcp_channels(), data->frames ()));
-               for (int i = 0; i < m.dcp_channels(); ++i) {
-                       optional<int> s = m.dcp_to_source (static_cast<libdcp::Channel> (i));
-                       if (!s) {
-                               b->make_silent (i);
-                       } else {
-                               memcpy (b->data()[i], data->data()[s.get()], data->frames() * sizeof(float));
-                       }
-               }
-
-               data = b;
-       }
-
-       _writer->write (data);
-}