Make a new AudioBuffers constructor and use it to avoid some use of the more complica...
[dcpomatic.git] / src / lib / player.cc
index fd9ffb9ed4f65cb23e27affa79001e9fd7317609..304f8c723d86daa5b6059b7496c84b4f978b59b2 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
@@ -87,7 +87,7 @@ int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
        : _film (film)
        , _playlist (playlist)
-       , _suspended (false)
+       , _suspended (0)
        , _ignore_video (false)
        , _ignore_audio (false)
        , _ignore_text (false)
@@ -243,25 +243,26 @@ Player::setup_pieces_unlocked ()
        _last_video_time = DCPTime ();
        _last_video_eyes = EYES_BOTH;
        _last_audio_time = DCPTime ();
+
+       /* Cached value to save recalculating it on every ::pass */
+       _film_length = _film->length ();
 }
 
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
        if (type == CHANGE_TYPE_PENDING) {
-               boost::mutex::scoped_lock lm (_mutex);
                /* The player content is probably about to change, so we can't carry on
                   until that has happened and we've rebuilt our pieces.  Stop pass()
                   and seek() from working until then.
                */
-               _suspended = true;
+               ++_suspended;
        } else if (type == CHANGE_TYPE_DONE) {
                /* A change in our content has gone through.  Re-build our pieces. */
                setup_pieces ();
-               _suspended = false;
+               --_suspended;
        } else if (type == CHANGE_TYPE_CANCELLED) {
-               boost::mutex::scoped_lock lm (_mutex);
-               _suspended = false;
+               --_suspended;
        }
 
        Change (type, property, frequent);
@@ -564,13 +565,14 @@ bool
 Player::pass ()
 {
        boost::mutex::scoped_lock lm (_mutex);
+       DCPOMATIC_ASSERT (_film_length);
 
        if (_suspended) {
                /* We can't pass in this state */
                return false;
        }
 
-       if (_playlist->length(_film) == DCPTime()) {
+       if (*_film_length == DCPTime()) {
                /* Special case of an empty Film; just give one black frame */
                emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
                return true;
@@ -648,8 +650,16 @@ Player::pass ()
                if (_last_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
                           or after this silence.  Bodge the start time of the silence to fix it.
+                          I think this is nothing to worry about since we will just add or
+                          remove a little silence at the end of some content.
                        */
-                       DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
+                       int64_t const error = labs(period.from.get() - _last_audio_time->get());
+                       /* Let's not worry about less than a frame at 24fps */
+                       int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
+                       if (error >= too_much_error) {
+                               _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
+                       }
+                       DCPOMATIC_ASSERT (error < too_much_error);
                        period.from = *_last_audio_time;
                }
                if (period.duration() > one_video_frame()) {
@@ -669,7 +679,7 @@ Player::pass ()
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
           of our streams, or the position of the _silent.
        */
-       DCPTime pull_to = _film->length ();
+       DCPTime pull_to = *_film_length;
        for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
                if (!i->second.piece->done && i->second.last_push_end < pull_to) {
                        pull_to = i->second.last_push_end;
@@ -890,9 +900,7 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
                if (remaining_frames == 0) {
                        return;
                }
-               shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
-               cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
-               content_audio.audio = cut;
+               content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
        }
 
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
@@ -945,8 +953,15 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
 
        PlayerText ps;
        shared_ptr<Image> image = subtitle.sub.image;
+
        /* We will scale the subtitle up to fit _video_container_size */
-       dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
+       int const width = subtitle.sub.rectangle.width * _video_container_size.width;
+       int const height = subtitle.sub.rectangle.height * _video_container_size.height;
+       if (width == 0 || height == 0) {
+               return;
+       }
+
+       dcp::Size scaled_size (width, height);
        ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
        DCPTime from (content_time_to_dcp (piece, subtitle.from()));
 
@@ -1178,8 +1193,7 @@ Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTi
        if (remaining_frames <= 0) {
                return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
        }
-       shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
-       cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
+       shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
        return make_pair(cut, time + discard_time);
 }