Cleanup: use a variable we already made.
[dcpomatic.git] / src / lib / player.cc
index 74e7480f518166a96666d62b8c40f7a7e21714c6..df10ec14612f0d348cc27b70eea3d0402bbd7f39 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
@@ -18,6 +18,7 @@
 
 */
 
+
 #include "atmos_decoder.h"
 #include "player.h"
 #include "film.h"
 
 #include "i18n.h"
 
+
 using std::copy;
 using std::cout;
 using std::dynamic_pointer_cast;
 using std::list;
 using std::make_pair;
 using std::make_shared;
-using std::map;
 using std::max;
 using std::min;
 using std::min;
@@ -76,6 +77,7 @@ using std::pair;
 using std::shared_ptr;
 using std::vector;
 using std::weak_ptr;
+using std::make_shared;
 using boost::optional;
 using boost::scoped_ptr;
 #if BOOST_VERSION >= 106100
@@ -83,6 +85,7 @@ using namespace boost::placeholders;
 #endif
 using namespace dcpomatic;
 
+
 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
 int const PlayerProperty::PLAYLIST = 701;
 int const PlayerProperty::FILM_CONTAINER = 702;
@@ -90,15 +93,18 @@ int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 int const PlayerProperty::PLAYBACK_LENGTH = 705;
 
-Player::Player (shared_ptr<const Film> film)
+
+Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
        : _film (film)
        , _suspended (0)
        , _tolerant (film->tolerant())
        , _audio_merger (_film->audio_frame_rate())
+       , _subtitle_alignment (subtitle_alignment)
 {
        construct ();
 }
 
+
 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
        : _film (film)
        , _playlist (playlist_)
@@ -109,6 +115,7 @@ Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist
        construct ();
 }
 
+
 void
 Player::construct ()
 {
@@ -120,16 +127,12 @@ Player::construct ()
        _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
        set_video_container_size (_film->frame_size ());
 
-       film_change (ChangeType::DONE, Film::AUDIO_PROCESSOR);
+       film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
 
        setup_pieces ();
        seek (DCPTime (), true);
 }
 
-Player::~Player ()
-{
-       delete _shuffler;
-}
 
 void
 Player::setup_pieces ()
@@ -145,12 +148,14 @@ have_video (shared_ptr<const Content> content)
        return static_cast<bool>(content->video) && content->video->use();
 }
 
+
 bool
 have_audio (shared_ptr<const Content> content)
 {
        return static_cast<bool>(content->audio);
 }
 
+
 void
 Player::setup_pieces_unlocked ()
 {
@@ -159,8 +164,7 @@ Player::setup_pieces_unlocked ()
        auto old_pieces = _pieces;
        _pieces.clear ();
 
-       delete _shuffler;
-       _shuffler = new Shuffler();
+       _shuffler.reset (new Shuffler());
        _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
 
        for (auto i: playlist()->content()) {
@@ -215,7 +219,7 @@ Player::setup_pieces_unlocked ()
                if (decoder->video) {
                        if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
                                /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
-                               decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+                               decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
                        } else {
                                decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
                        }
@@ -255,23 +259,40 @@ Player::setup_pieces_unlocked ()
                }
        }
 
+       for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
+               if (auto video = (*i)->content->video) {
+                       if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
+                               /* Look for content later in the content list with in-use video that overlaps this */
+                               auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
+                               auto j = i;
+                               ++j;
+                               for (; j != _pieces.end(); ++j) {
+                                       if ((*j)->content->video && (*j)->content->video->use()) {
+                                               (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
+                                       }
+                               }
+                       }
+               }
+       }
+
        _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
        _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
 
-       _last_video_time = DCPTime ();
+       _last_video_time = boost::optional<dcpomatic::DCPTime>();
        _last_video_eyes = Eyes::BOTH;
-       _last_audio_time = DCPTime ();
+       _last_audio_time = boost::optional<dcpomatic::DCPTime>();
 }
 
+
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
        if (property == VideoContentProperty::CROP) {
                if (type == ChangeType::DONE) {
-                       dcp::Size const vcs = video_container_size();
+                       auto const vcs = video_container_size();
                        boost::mutex::scoped_lock lm (_mutex);
-                       for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
-                               i->first->reset_metadata (_film, vcs);
+                       for (auto const& i: _delay) {
+                               i.first->reset_metadata (_film, vcs);
                        }
                }
        } else {
@@ -293,6 +314,7 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
        Change (type, property, frequent);
 }
 
+
 void
 Player::set_video_container_size (dcp::Size s)
 {
@@ -309,13 +331,14 @@ Player::set_video_container_size (dcp::Size s)
 
                _video_container_size = s;
 
-               _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
+               _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
                _black_image->make_black ();
        }
 
        Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
 }
 
+
 void
 Player::playlist_change (ChangeType type)
 {
@@ -325,6 +348,7 @@ Player::playlist_change (ChangeType type)
        Change (type, PlayerProperty::PLAYLIST, false);
 }
 
+
 void
 Player::film_change (ChangeType type, Film::Property p)
 {
@@ -333,9 +357,9 @@ Player::film_change (ChangeType type, Film::Property p)
           last time we were run.
        */
 
-       if (p == Film::CONTAINER) {
+       if (p == Film::Property::CONTAINER) {
                Change (type, PlayerProperty::FILM_CONTAINER, false);
-       } else if (p == Film::VIDEO_FRAME_RATE) {
+       } else if (p == Film::Property::VIDEO_FRAME_RATE) {
                /* Pieces contain a FrameRateChange which contains the DCP frame rate,
                   so we need new pieces here.
                */
@@ -343,12 +367,12 @@ Player::film_change (ChangeType type, Film::Property p)
                        setup_pieces ();
                }
                Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
-       } else if (p == Film::AUDIO_PROCESSOR) {
+       } else if (p == Film::Property::AUDIO_PROCESSOR) {
                if (type == ChangeType::DONE && _film->audio_processor ()) {
                        boost::mutex::scoped_lock lm (_mutex);
                        _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
                }
-       } else if (p == Film::AUDIO_CHANNELS) {
+       } else if (p == Film::Property::AUDIO_CHANNELS) {
                if (type == ChangeType::DONE) {
                        boost::mutex::scoped_lock lm (_mutex);
                        _audio_merger.clear ();
@@ -356,6 +380,7 @@ Player::film_change (ChangeType type, Film::Property p)
        }
 }
 
+
 shared_ptr<PlayerVideo>
 Player::black_player_video_frame (Eyes eyes) const
 {
@@ -375,10 +400,11 @@ Player::black_player_video_frame (Eyes eyes) const
        );
 }
 
+
 Frame
 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
 {
-       DCPTime s = t - piece->content->position ();
+       auto s = t - piece->content->position ();
        s = min (piece->content->length_after_trim(_film), s);
        s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
 
@@ -392,6 +418,7 @@ Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
        return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
 }
 
+
 DCPTime
 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
 {
@@ -400,15 +427,17 @@ Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
        return d + piece->content->position();
 }
 
+
 Frame
 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
 {
        auto s = t - piece->content->position ();
        s = min (piece->content->length_after_trim(_film), s);
        /* See notes in dcp_to_content_video */
-       return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
+       return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
 }
 
+
 DCPTime
 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
 {
@@ -418,6 +447,7 @@ Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
                + piece->content->position();
 }
 
+
 ContentTime
 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
 {
@@ -426,12 +456,14 @@ Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
        return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
 }
 
+
 DCPTime
 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
 {
        return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
 }
 
+
 vector<FontData>
 Player::get_subtitle_fonts ()
 {
@@ -449,6 +481,7 @@ Player::get_subtitle_fonts ()
        return fonts;
 }
 
+
 /** Set this player never to produce any video data */
 void
 Player::set_ignore_video ()
@@ -458,6 +491,7 @@ Player::set_ignore_video ()
        setup_pieces_unlocked ();
 }
 
+
 void
 Player::set_ignore_audio ()
 {
@@ -466,6 +500,7 @@ Player::set_ignore_audio ()
        setup_pieces_unlocked ();
 }
 
+
 void
 Player::set_ignore_text ()
 {
@@ -474,6 +509,7 @@ Player::set_ignore_text ()
        setup_pieces_unlocked ();
 }
 
+
 /** Set the player to always burn open texts into the image regardless of the content settings */
 void
 Player::set_always_burn_open_subtitles ()
@@ -482,6 +518,7 @@ Player::set_always_burn_open_subtitles ()
        _always_burn_open_subtitles = true;
 }
 
+
 /** Sets up the player to be faster, possibly at the expense of quality */
 void
 Player::set_fast ()
@@ -491,6 +528,7 @@ Player::set_fast ()
        setup_pieces_unlocked ();
 }
 
+
 void
 Player::set_play_referenced ()
 {
@@ -499,6 +537,7 @@ Player::set_play_referenced ()
        setup_pieces_unlocked ();
 }
 
+
 static void
 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
 {
@@ -512,66 +551,67 @@ maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Fra
        }
 }
 
+
 list<ReferencedReelAsset>
 Player::get_reel_assets ()
 {
        /* Does not require a lock on _mutex as it's only called from DCPEncoder */
 
-       list<ReferencedReelAsset> a;
+       list<ReferencedReelAsset> reel_assets;
 
-       for (auto i: playlist()->content()) {
-               shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
-               if (!j) {
+       for (auto content: playlist()->content()) {
+               auto dcp = dynamic_pointer_cast<DCPContent>(content);
+               if (!dcp) {
                        continue;
                }
 
                scoped_ptr<DCPDecoder> decoder;
                try {
-                       decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
+                       decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
                } catch (...) {
-                       return a;
+                       return reel_assets;
                }
 
-               DCPOMATIC_ASSERT (j->video_frame_rate ());
-               double const cfr = j->video_frame_rate().get();
-               Frame const trim_start = j->trim_start().frames_round (cfr);
-               Frame const trim_end = j->trim_end().frames_round (cfr);
+               DCPOMATIC_ASSERT (dcp->video_frame_rate());
+               double const cfr = dcp->video_frame_rate().get();
+               Frame const trim_start = dcp->trim_start().frames_round(cfr);
+               Frame const trim_end = dcp->trim_end().frames_round(cfr);
                int const ffr = _film->video_frame_rate ();
 
                /* position in the asset from the start */
                int64_t offset_from_start = 0;
-               /* position in the asset from the end */
+               /* position i the asset from the end */
                int64_t offset_from_end = 0;
                for (auto k: decoder->reels()) {
                        /* Assume that main picture duration is the length of the reel */
                        offset_from_end += k->main_picture()->actual_duration();
                }
 
-               for (auto k: decoder->reels()) {
+               for (auto reel: decoder->reels()) {
 
                        /* Assume that main picture duration is the length of the reel */
-                       int64_t const reel_duration = k->main_picture()->actual_duration();
+                       int64_t const reel_duration = reel->main_picture()->actual_duration();
 
                        /* See doc/design/trim_reels.svg */
                        Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
                        Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
 
-                       DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
-                       if (j->reference_video ()) {
-                               maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
+                       auto const from = content->position() + DCPTime::from_frames(offset_from_start, ffr);
+                       if (dcp->reference_video()) {
+                               maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_audio ()) {
-                               maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
+                       if (dcp->reference_audio()) {
+                               maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TextType::OPEN_SUBTITLE)) {
-                               maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
+                       if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
+                               maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TextType::CLOSED_CAPTION)) {
-                               for (auto l: k->closed_captions()) {
-                                       maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
+                       if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
+                               for (auto caption: reel->closed_captions()) {
+                                       maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, ffr);
                                }
                        }
 
@@ -580,9 +620,10 @@ Player::get_reel_assets ()
                }
        }
 
-       return a;
+       return reel_assets;
 }
 
+
 bool
 Player::pass ()
 {
@@ -610,7 +651,7 @@ Player::pass ()
                        continue;
                }
 
-               DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
+               auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
                if (t > i->content->end(_film)) {
                        i->done = true;
                } else {
@@ -653,7 +694,7 @@ Player::pass ()
        {
                LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
-               shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
+               auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced && dcp->reference_audio()) {
                        /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
                           to `hide' the fact that no audio was emitted during the referenced DCP (though
@@ -702,12 +743,40 @@ Player::pass ()
        /* Emit any audio that is ready */
 
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
-          of our streams, or the position of the _silent.
+          of our streams, or the position of the _silent.  First, though we choose only streams that are less than
+          ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
+          behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
+          that will never come, causing bugs like #2101.
        */
-       DCPTime pull_to = _playback_length;
-       for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
-               if (!i->second.piece->done && i->second.last_push_end < pull_to) {
-                       pull_to = i->second.last_push_end;
+       constexpr int ignore_streams_behind = 5;
+
+       using state_pair = std::pair<AudioStreamPtr, StreamState>;
+
+       /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
+       auto latest_last_push_end = std::max_element(
+               _stream_states.begin(),
+               _stream_states.end(),
+               [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
+               );
+
+       if (latest_last_push_end != _stream_states.end()) {
+               LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
+       }
+
+       /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
+       std::map<AudioStreamPtr, StreamState> alive_stream_states;
+       for (auto const& i: _stream_states) {
+               if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+                       alive_stream_states.insert(i);
+               } else {
+                       LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
+               }
+       }
+
+       auto pull_to = _playback_length;
+       for (auto const& i: alive_stream_states) {
+               if (!i.second.piece->done && i.second.last_push_end < pull_to) {
+                       pull_to = i.second.last_push_end;
                }
        }
        if (!_silent.done() && _silent.position() < pull_to) {
@@ -715,11 +784,11 @@ Player::pass ()
        }
 
        LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
-       list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
-       for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
+       auto audio = _audio_merger.pull (pull_to);
+       for (auto i = audio.begin(); i != audio.end(); ++i) {
                if (_last_audio_time && i->second < *_last_audio_time) {
                        /* This new data comes before the last we emitted (or the last seek); discard it */
-                       pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
+                       auto cut = discard_audio (i->first, i->second, *_last_audio_time);
                        if (!cut.first) {
                                continue;
                        }
@@ -734,14 +803,15 @@ Player::pass ()
 
        if (done) {
                _shuffler->flush ();
-               for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
-                       do_emit_video(i->first, i->second);
+               for (auto const& i: _delay) {
+                       do_emit_video(i.first, i.second);
                }
        }
 
        return done;
 }
 
+
 /** @return Open subtitles for the frame at the given time, converted to images */
 optional<PositionImage>
 Player::open_subtitles_for_frame (DCPTime time) const
@@ -767,31 +837,36 @@ Player::open_subtitles_for_frame (DCPTime time) const
                                PositionImage (
                                        i.image,
                                        Position<int> (
-                                               lrint (_video_container_size.width * i.rectangle.x),
-                                               lrint (_video_container_size.height * i.rectangle.y)
+                                               lrint(_video_container_size.width * i.rectangle.x),
+                                               lrint(_video_container_size.height * i.rectangle.y)
                                                )
                                        )
                                );
                }
 
                /* String subtitles (rendered to an image) */
-               if (!j.string.empty ()) {
-                       list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
+               if (!j.string.empty()) {
+                       auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
                        copy (s.begin(), s.end(), back_inserter (captions));
                }
        }
 
-       if (captions.empty ()) {
-               return optional<PositionImage> ();
+       if (captions.empty()) {
+               return {};
        }
 
-       return merge (captions);
+       return merge (captions, _subtitle_alignment);
 }
 
+
 void
 Player::video (weak_ptr<Piece> wp, ContentVideo video)
 {
-       shared_ptr<Piece> piece = wp.lock ();
+       if (_suspended) {
+               return;
+       }
+
+       auto piece = wp.lock ();
        if (!piece) {
                return;
        }
@@ -817,6 +892,10 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                return;
        }
 
+       if (piece->ignore_video && piece->ignore_video->contains(time)) {
+               return;
+       }
+
        /* Fill gaps that we discover now that we have some video which needs to be emitted.
           This is where we need to fill to.
        */
@@ -827,9 +906,9 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
                /* Fill if we have more than half a frame to do */
                if ((fill_to - fill_from) > one_video_frame() / 2) {
-                       LastVideoMap::const_iterator last = _last_video.find (wp);
+                       auto last = _last_video.find (wp);
                        if (_film->three_d()) {
-                               Eyes fill_to_eyes = video.eyes;
+                               auto fill_to_eyes = video.eyes;
                                if (fill_to_eyes == Eyes::BOTH) {
                                        fill_to_eyes = Eyes::LEFT;
                                }
@@ -837,15 +916,15 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                                        /* Don't fill after the end of the content */
                                        fill_to_eyes = Eyes::LEFT;
                                }
-                               DCPTime j = fill_from;
-                               Eyes eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
+                               auto j = fill_from;
+                               auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
                                if (eyes == Eyes::BOTH) {
                                        eyes = Eyes::LEFT;
                                }
                                while (j < fill_to || eyes != fill_to_eyes) {
                                        if (last != _last_video.end()) {
                                                LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
-                                               shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
+                                               auto copy = last->second->shallow_copy();
                                                copy->set_eyes (eyes);
                                                emit_video (copy, j);
                                        } else {
@@ -869,21 +948,26 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                }
        }
 
-       _last_video[wp].reset (
-               new PlayerVideo (
-                       video.image,
-                       piece->content->video->crop (),
-                       piece->content->video->fade (_film, video.frame),
-                       scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
+       auto const content_video = piece->content->video;
+
+       _last_video[wp] = std::make_shared<PlayerVideo>(
+               video.image,
+               content_video->actual_crop(),
+               content_video->fade (_film, video.frame),
+               scale_for_display(
+                       content_video->scaled_size(_film->frame_size()),
                        _video_container_size,
-                       video.eyes,
-                       video.part,
-                       piece->content->video->colour_conversion(),
-                       piece->content->video->range(),
-                       piece->content,
-                       video.frame,
-                       false
-                       )
+                       _film->frame_size(),
+                       content_video->pixel_quanta()
+                       ),
+               _video_container_size,
+               video.eyes,
+               video.part,
+               content_video->colour_conversion(),
+               content_video->range(),
+               piece->content,
+               video.frame,
+               false
                );
 
        DCPTime t = time;
@@ -895,31 +979,36 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
        }
 }
 
+
 void
 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
 {
+       if (_suspended) {
+               return;
+       }
+
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
 
-       shared_ptr<Piece> piece = wp.lock ();
+       auto piece = wp.lock ();
        if (!piece) {
                return;
        }
 
-       shared_ptr<AudioContent> content = piece->content->audio;
+       auto content = piece->content->audio;
        DCPOMATIC_ASSERT (content);
 
        int const rfr = content->resampled_frame_rate (_film);
 
        /* Compute time in the DCP */
-       DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
+       auto time = resampled_audio_to_dcp (piece, content_audio.frame);
        LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
 
        /* And the end of this block in the DCP */
-       DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+       auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
 
        /* Remove anything that comes before the start or after the end of the content */
        if (time < piece->content->position()) {
-               pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
+               auto cut = discard_audio (content_audio.audio, time, piece->content->position());
                if (!cut.first) {
                        /* This audio is entirely discarded */
                        return;
@@ -934,7 +1023,7 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
                if (remaining_frames == 0) {
                        return;
                }
-               content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
+               content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
        }
 
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
@@ -942,8 +1031,8 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
        /* Gain */
 
        if (content->gain() != 0) {
-               shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
-               gain->apply_gain (content->gain ());
+               auto gain = make_shared<AudioBuffers>(content_audio.audio);
+               gain->apply_gain (content->gain());
                content_audio.audio = gain;
        }
 
@@ -964,11 +1053,16 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
        _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
 }
 
+
 void
 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
 {
-       shared_ptr<Piece> piece = wp.lock ();
-       shared_ptr<const TextContent> text = wc.lock ();
+       if (_suspended) {
+               return;
+       }
+
+       auto piece = wp.lock ();
+       auto text = wc.lock ();
        if (!piece || !text) {
                return;
        }
@@ -986,7 +1080,7 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
        subtitle.sub.rectangle.height *= text->y_scale ();
 
        PlayerText ps;
-       shared_ptr<Image> image = subtitle.sub.image;
+       auto image = subtitle.sub.image;
 
        /* We will scale the subtitle up to fit _video_container_size */
        int const width = subtitle.sub.rectangle.width * _video_container_size.width;
@@ -996,17 +1090,22 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
        }
 
        dcp::Size scaled_size (width, height);
-       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
+       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
        DCPTime from (content_time_to_dcp (piece, subtitle.from()));
 
        _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
 }
 
+
 void
 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
 {
-       shared_ptr<Piece> piece = wp.lock ();
-       shared_ptr<const TextContent> text = wc.lock ();
+       if (_suspended) {
+               return;
+       }
+
+       auto piece = wp.lock ();
+       auto text = wc.lock ();
        if (!piece || !text) {
                return;
        }
@@ -1046,10 +1145,15 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
        _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
 }
 
+
 void
 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
 {
-       shared_ptr<const TextContent> text = wc.lock ();
+       if (_suspended) {
+               return;
+       }
+
+       auto text = wc.lock ();
        if (!text) {
                return;
        }
@@ -1069,7 +1173,7 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Conte
                return;
        }
 
-       pair<PlayerText, DCPTime> from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
+       auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
 
        bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
        if (text->use() && !always && !text->burn()) {
@@ -1077,6 +1181,7 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Conte
        }
 }
 
+
 void
 Player::seek (DCPTime time, bool accurate)
 {
@@ -1138,9 +1243,20 @@ Player::seek (DCPTime time, bool accurate)
        _last_video.clear ();
 }
 
+
 void
 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
+       if (!_film->three_d()) {
+               if (pv->eyes() == Eyes::LEFT) {
+                       /* Use left-eye images for both eyes... */
+                       pv->set_eyes (Eyes::BOTH);
+               } else if (pv->eyes() == Eyes::RIGHT) {
+                       /* ...and discard the right */
+                       return;
+               }
+       }
+
        /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
           player before the video that requires them.
        */
@@ -1155,11 +1271,12 @@ Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
                return;
        }
 
-       pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
+       auto to_do = _delay.front();
        _delay.pop_front();
        do_emit_video (to_do.first, to_do.second);
 }
 
+
 void
 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
@@ -1177,6 +1294,7 @@ Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
        Video (pv, time);
 }
 
+
 void
 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
 {
@@ -1191,6 +1309,7 @@ Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
        _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
 }
 
+
 void
 Player::fill_audio (DCPTimePeriod period)
 {
@@ -1205,7 +1324,7 @@ Player::fill_audio (DCPTimePeriod period)
                DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
                Frame const samples = block.frames_round(_film->audio_frame_rate());
                if (samples) {
-                       shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
+                       auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
                        silence->make_silent ();
                        emit_audio (silence, t);
                }
@@ -1213,25 +1332,28 @@ Player::fill_audio (DCPTimePeriod period)
        }
 }
 
+
 DCPTime
 Player::one_video_frame () const
 {
        return DCPTime::from_frames (1, _film->video_frame_rate ());
 }
 
+
 pair<shared_ptr<AudioBuffers>, DCPTime>
 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
 {
-       DCPTime const discard_time = discard_to - time;
-       Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
-       Frame remaining_frames = audio->frames() - discard_frames;
+       auto const discard_time = discard_to - time;
+       auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
+       auto remaining_frames = audio->frames() - discard_frames;
        if (remaining_frames <= 0) {
                return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
        }
-       shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
+       auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
        return make_pair(cut, time + discard_time);
 }
 
+
 void
 Player::set_dcp_decode_reduction (optional<int> reduction)
 {
@@ -1253,6 +1375,7 @@ Player::set_dcp_decode_reduction (optional<int> reduction)
        Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
 }
 
+
 optional<DCPTime>
 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
 {
@@ -1279,6 +1402,10 @@ Player::playlist () const
 void
 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
 {
+       if (_suspended) {
+               return;
+       }
+
        Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
 }