No need to rebuild pieces when only crop or ratio changes.
[dcpomatic.git] / src / lib / player.cc
index 1ffced4252b6cbce0a3cc333499619ba215b8fcc..d13ae5f3cb8588fbb6b96dab72ed9f54c22745d4 100644 (file)
@@ -24,6 +24,8 @@
 #include "ffmpeg_content.h"
 #include "still_image_decoder.h"
 #include "still_image_content.h"
+#include "moving_image_decoder.h"
+#include "moving_image_content.h"
 #include "sndfile_decoder.h"
 #include "sndfile_content.h"
 #include "subtitle_content.h"
@@ -32,6 +34,7 @@
 #include "image.h"
 #include "ratio.h"
 #include "resampler.h"
+#include "log.h"
 #include "scaler.h"
 
 using std::list;
@@ -45,22 +48,22 @@ using boost::shared_ptr;
 using boost::weak_ptr;
 using boost::dynamic_pointer_cast;
 
-#define DEBUG_PLAYER 1
+//#define DEBUG_PLAYER 1
 
 class Piece
 {
 public:
        Piece (shared_ptr<Content> c)
                : content (c)
-               , video_position (c->start ())
-               , audio_position (c->start ())
+               , video_position (c->position ())
+               , audio_position (c->position ())
        {}
        
        Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
                : content (c)
                , decoder (d)
-               , video_position (c->start ())
-               , audio_position (c->start ())
+               , video_position (c->position ())
+               , audio_position (c->position ())
        {}
        
        shared_ptr<Content> content;
@@ -80,7 +83,7 @@ std::ostream& operator<<(std::ostream& s, Piece const & p)
                s << "\tsndfile    ";
        }
        
-       s << " at " << p.content->start() << " until " << p.content->end();
+       s << " at " << p.content->position() << " until " << p.content->end();
        
        return s;
 }
@@ -94,12 +97,13 @@ Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
        , _have_valid_pieces (false)
        , _video_position (0)
        , _audio_position (0)
-       , _audio_buffers (f->dcp_audio_channels(), 0)
+       , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
+       , _last_emit_was_black (false)
 {
-       _playlist->Changed.connect (bind (&Player::playlist_changed, this));
-       _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
-       _film->Changed.connect (bind (&Player::film_changed, this, _1));
-       set_video_container_size (_film->container()->size (_film->full_frame ()));
+       _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
+       _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
+       _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
+       set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
 }
 
 void
@@ -173,7 +177,7 @@ Player::pass ()
                        emit_black ();
                } else {
 #ifdef DEBUG_PLAYER
-                       cout << "Pass " << *earliest << "\n";
+                       cout << "Pass video " << *earliest << "\n";
 #endif                 
                        earliest->decoder->pass ();
                }
@@ -182,27 +186,52 @@ Player::pass ()
        case AUDIO:
                if (earliest_t > _audio_position) {
 #ifdef DEBUG_PLAYER
-                       cout << "no audio here; emitting silence.\n";
+                       cout << "no audio here (none until " << earliest_t << "); emitting silence.\n";
 #endif
                        emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
                } else {
 #ifdef DEBUG_PLAYER
-                       cout << "Pass " << *earliest << "\n";
-#endif                 
+                       cout << "Pass audio " << *earliest << "\n";
+#endif
                        earliest->decoder->pass ();
+
+                       if (earliest->decoder->done()) {
+                               shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
+                               assert (ac);
+                               shared_ptr<Resampler> re = resampler (ac, false);
+                               if (re) {
+                                       shared_ptr<const AudioBuffers> b = re->flush ();
+                                       if (b->frames ()) {
+                                               process_audio (earliest, b, ac->audio_length ());
+                                       }
+                               }
+                       }
                }
                break;
        }
 
+       if (_audio) {
+               Time audio_done_up_to = TIME_MAX;
+               for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+                       if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
+                               audio_done_up_to = min (audio_done_up_to, (*i)->audio_position);
+                       }
+               }
+
+               TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to);
+               Audio (tb.audio, tb.time);
+               _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
+       }
+               
 #ifdef DEBUG_PLAYER
-       cout << "\tpost pass " << _video_position << " " << _audio_position << "\n";
+       cout << "\tpost pass _video_position=" << _video_position << " _audio_position=" << _audio_position << "\n";
 #endif 
 
        return false;
 }
 
 void
-Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, bool same, VideoContent::Frame frame)
+Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
 {
        shared_ptr<Piece> piece = weak_piece.lock ();
        if (!piece) {
@@ -212,19 +241,28 @@ Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image
        shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
        assert (content);
 
-       FrameRateConversion frc (content->video_frame_rate(), _film->dcp_video_frame_rate());
+       FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
        if (frc.skip && (frame % 2) == 1) {
                return;
        }
 
-       shared_ptr<Image> work_image = image->crop (content->crop(), true);
+       Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
+       if (content->trimmed (relative_time)) {
+               return;
+       }
+
+       /* Convert to RGB first, as FFmpeg doesn't seem to like handling YUV images with odd widths */
+       shared_ptr<Image> work_image = image->scale (image->size (), _film->scaler(), PIX_FMT_RGB24, true);
 
-       libdcp::Size const image_size = content->ratio()->size (_video_container_size);
-       
-       work_image = work_image->scale_and_convert_to_rgb (image_size, _film->scaler(), true);
+       work_image = work_image->crop (content->crop(), true);
 
-       Time time = content->start() + (frame * frc.factor() * TIME_HZ / _film->dcp_video_frame_rate());
+       float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
+       libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
        
+       work_image = work_image->scale (image_size, _film->scaler(), PIX_FMT_RGB24, true);
+
+       Time time = content->position() + relative_time - content->trim_start ();
+           
        if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
                work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
        }
@@ -240,16 +278,18 @@ Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image
 
 #ifdef DCPOMATIC_DEBUG
        _last_video = piece->content;
-#endif 
+#endif
 
-       Video (work_image, same, time);
-       time += TIME_HZ / _film->dcp_video_frame_rate();
+       Video (work_image, eyes, content->colour_conversion(), same, time);
+       time += TIME_HZ / _film->video_frame_rate();
 
        if (frc.repeat) {
-               Video (work_image, true, time);
-               time += TIME_HZ / _film->dcp_video_frame_rate();
+               Video (work_image, eyes, content->colour_conversion(), true, time);
+               time += TIME_HZ / _film->video_frame_rate();
        }
 
+       _last_emit_was_black = false;
+
        _video_position = piece->video_position = time;
 }
 
@@ -264,14 +304,31 @@ Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers
        shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
        assert (content);
 
+       /* Gain */
+       if (content->audio_gain() != 0) {
+               shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
+               gain->apply_gain (content->audio_gain ());
+               audio = gain;
+       }
+
        /* Resample */
        if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
-               shared_ptr<Resampler> r = resampler (content);
-               audio = r->run (audio);
+               shared_ptr<Resampler> r = resampler (content, true);
+               pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
+               audio = ro.first;
+               frame = ro.second;
+       }
+       
+       Time const relative_time = _film->audio_frames_to_time (frame);
+
+       if (content->trimmed (relative_time)) {
+               return;
        }
 
+       Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time;
+       
        /* Remap channels */
-       shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->dcp_audio_channels(), audio->frames()));
+       shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
        dcp_mapped->make_silent ();
        list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
        for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
@@ -282,11 +339,9 @@ Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers
 
        audio = dcp_mapped;
 
-       Time time = content->start() + (frame * TIME_HZ / _film->dcp_audio_frame_rate()) + (content->audio_delay() * TIME_HZ / 1000);
-
        /* We must cut off anything that comes before the start of all time */
        if (time < 0) {
-               int const frames = - time * _film->dcp_audio_frame_rate() / TIME_HZ;
+               int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
                if (frames >= audio->frames ()) {
                        return;
                }
@@ -298,48 +353,17 @@ Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers
                time = 0;
        }
 
-       /* The time of this audio may indicate that some of our buffered audio is not going to
-          be added to any more, so it can be emitted.
-       */
-
-       if (time > _audio_position) {
-               /* We can emit some audio from our buffers */
-               OutputAudioFrame const N = _film->time_to_audio_frames (time - _audio_position);
-               if (N > _audio_buffers.frames()) {
-                       /* We need some extra silence before whatever is in the buffers */
-                       _audio_buffers.ensure_size (N);
-                       _audio_buffers.move (0, N - _audio_buffers.frames(), _audio_buffers.frames ());
-                       _audio_buffers.make_silent (0, _audio_buffers.frames());
-                       _audio_buffers.set_frames (N);
-               }
-               assert (N <= _audio_buffers.frames());
-               shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), N));
-               emit->copy_from (&_audio_buffers, N, 0, 0);
-               Audio (emit, _audio_position);
-               _audio_position = piece->audio_position = time + _film->audio_frames_to_time (N);
-
-               /* And remove it from our buffers */
-               if (_audio_buffers.frames() > N) {
-                       _audio_buffers.move (N, 0, _audio_buffers.frames() - N);
-               }
-               _audio_buffers.set_frames (_audio_buffers.frames() - N);
-       }
-
-       /* Now accumulate the new audio into our buffers */
-       _audio_buffers.ensure_size (_audio_buffers.frames() + audio->frames());
-       _audio_buffers.accumulate_frames (audio.get(), 0, 0, audio->frames ());
-       _audio_buffers.set_frames (_audio_buffers.frames() + audio->frames());
+       _audio_merger.push (audio, time);
+       piece->audio_position += _film->audio_frames_to_time (audio->frames ());
 }
 
 void
 Player::flush ()
 {
-       if (_audio_buffers.frames() > 0) {
-               shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), _audio_buffers.frames()));
-               emit->copy_from (&_audio_buffers, _audio_buffers.frames(), 0, 0);
-               Audio (emit, _audio_position);
-               _audio_position += _film->audio_frames_to_time (_audio_buffers.frames ());
-               _audio_buffers.set_frames (0);
+       TimedAudioBuffers<Time> tb = _audio_merger.flush ();
+       if (tb.audio) {
+               Audio (tb.audio, tb.time);
+               _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
        }
 
        while (_video_position < _audio_position) {
@@ -374,18 +398,18 @@ Player::seek (Time t, bool accurate)
                        continue;
                }
                
-               Time s = t - vc->start ();
+               Time s = t - vc->position ();
                s = max (static_cast<Time> (0), s);
-               s = min (vc->length(), s);
+               s = min (vc->length_after_trim(), s);
 
-               (*i)->video_position = (*i)->audio_position = vc->start() + s;
+               (*i)->video_position = (*i)->audio_position = vc->position() + s;
 
-               FrameRateConversion frc (vc->video_frame_rate(), _film->dcp_video_frame_rate());
+               FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
                /* Here we are converting from time (in the DCP) to a frame number in the content.
                   Hence we need to use the DCP's frame rate and the double/skip correction, not
                   the source's rate.
                */
-               VideoContent::Frame f = s * _film->dcp_video_frame_rate() / (frc.factor() * TIME_HZ);
+               VideoContent::Frame f = (s + vc->trim_start ()) * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
                dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
        }
 
@@ -414,7 +438,7 @@ Player::setup_pieces ()
                if (fc) {
                        shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
                        
-                       fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3));
+                       fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
                        fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
                        fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
 
@@ -435,12 +459,24 @@ Player::setup_pieces ()
 
                        if (!id) {
                                id.reset (new StillImageDecoder (_film, ic));
-                               id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3));
+                               id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
                        }
 
                        piece->decoder = id;
                }
 
+               shared_ptr<const MovingImageContent> mc = dynamic_pointer_cast<const MovingImageContent> (*i);
+               if (mc) {
+                       shared_ptr<MovingImageDecoder> md;
+
+                       if (!md) {
+                               md.reset (new MovingImageDecoder (_film, mc));
+                               md->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
+                       }
+
+                       piece->decoder = md;
+               }
+
                shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
                if (sc) {
                        shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
@@ -469,16 +505,24 @@ Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
        }
 
        if (
-               property == ContentProperty::START || property == ContentProperty::LENGTH ||
-               property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_RATIO
+               property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
+               property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END
                ) {
                
                _have_valid_pieces = false;
                Changed (frequent);
 
        } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
+
                update_subtitle ();
                Changed (frequent);
+
+       } else if (
+               property == VideoContentProperty::VIDEO_FRAME_TYPE || property == VideoContentProperty::VIDEO_CROP ||
+               property == VideoContentProperty::VIDEO_RATIO
+               ) {
+               
+               Changed (frequent);
        }
 }
 
@@ -498,12 +542,16 @@ Player::set_video_container_size (libdcp::Size s)
 }
 
 shared_ptr<Resampler>
-Player::resampler (shared_ptr<AudioContent> c)
+Player::resampler (shared_ptr<AudioContent> c, bool create)
 {
        map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
        if (i != _resamplers.end ()) {
                return i->second;
        }
+
+       if (!create) {
+               return shared_ptr<Resampler> ();
+       }
        
        shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
        _resamplers[c] = r;
@@ -517,16 +565,20 @@ Player::emit_black ()
        _last_video.reset ();
 #endif
        
-       /* XXX: use same here */
-       Video (_black_frame, false, _video_position);
+       Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
        _video_position += _film->video_frames_to_time (1);
+       _last_emit_was_black = true;
 }
 
 void
 Player::emit_silence (OutputAudioFrame most)
 {
-       OutputAudioFrame N = min (most, _film->dcp_audio_frame_rate() / 2);
-       shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->dcp_audio_channels(), N));
+       if (most == 0) {
+               return;
+       }
+       
+       OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
+       shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
        silence->make_silent ();
        Audio (silence, _audio_position);
        _audio_position += _film->audio_frames_to_time (N);
@@ -597,7 +649,12 @@ Player::update_subtitle ()
        _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
        _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
        
-       _out_subtitle.image = _in_subtitle.image->scale (libdcp::Size (scaled_size.width, scaled_size.height), Scaler::from_id ("bicubic"), true);
-       _out_subtitle.from = _in_subtitle.from + piece->content->start ();
-       _out_subtitle.to = _in_subtitle.to + piece->content->start ();
+       _out_subtitle.image = _in_subtitle.image->scale (
+               scaled_size,
+               Scaler::from_id ("bicubic"),
+               _in_subtitle.image->pixel_format (),
+               true
+               );
+       _out_subtitle.from = _in_subtitle.from + piece->content->position ();
+       _out_subtitle.to = _in_subtitle.to + piece->content->position ();
 }