Move resampling back into AudioDecoder and fix various screw-ups with audio in the...
[dcpomatic.git] / src / lib / player.cc
index db69c66d1377d1ceadd21fdf96776b10dabc9930..1ab164d867f216f80c44bb0dd8c4e5c24fd9b0ad 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
 
 */
 
-#include <sstream>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <signal.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <boost/thread.hpp>
-#include <boost/algorithm/string.hpp>
+#include <stdint.h>
 #include "player.h"
-#include "film_state.h"
-#include "filter.h"
-#include "screen.h"
-#include "exceptions.h"
+#include "film.h"
+#include "ffmpeg_decoder.h"
+#include "ffmpeg_content.h"
+#include "still_image_decoder.h"
+#include "still_image_content.h"
+#include "sndfile_decoder.h"
+#include "sndfile_content.h"
+#include "subtitle_content.h"
+#include "playlist.h"
+#include "job.h"
+#include "image.h"
+#include "ratio.h"
+#include "log.h"
+#include "scaler.h"
 
-using namespace std;
-using namespace boost;
+using std::list;
+using std::cout;
+using std::min;
+using std::max;
+using std::vector;
+using std::pair;
+using std::map;
+using boost::shared_ptr;
+using boost::weak_ptr;
+using boost::dynamic_pointer_cast;
 
-Player::Player (shared_ptr<const FilmState> fs, shared_ptr<const Screen> screen, Split split)
-       : _stdout_reader_should_run (true)
-       , _position (0)
-       , _paused (false)
+//#define DEBUG_PLAYER 1
+
+class Piece
 {
-       assert (fs->format);
+public:
+       Piece (shared_ptr<Content> c)
+               : content (c)
+               , video_position (c->start ())
+               , audio_position (c->start ())
+       {}
        
-       if (pipe (_mplayer_stdin) < 0) {
-               throw PlayError ("could not create pipe");
-       }
-
-       if (pipe (_mplayer_stdout) < 0) {
-               throw PlayError ("could not create pipe");
-       }
+       Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
+               : content (c)
+               , decoder (d)
+               , video_position (c->start ())
+               , audio_position (c->start ())
+       {}
+       
+       shared_ptr<Content> content;
+       shared_ptr<Decoder> decoder;
+       Time video_position;
+       Time audio_position;
+};
 
-       if (pipe (_mplayer_stderr) < 0) {
-               throw PlayError ("could not create pipe");
+#ifdef DEBUG_PLAYER
+std::ostream& operator<<(std::ostream& s, Piece const & p)
+{
+       if (dynamic_pointer_cast<FFmpegContent> (p.content)) {
+               s << "\tffmpeg     ";
+       } else if (dynamic_pointer_cast<StillImageContent> (p.content)) {
+               s << "\tstill image";
+       } else if (dynamic_pointer_cast<SndfileContent> (p.content)) {
+               s << "\tsndfile    ";
        }
        
-       int const p = fork ();
-       if (p < 0) {
-               throw PlayError ("could not fork for mplayer");
-       } else if (p == 0) {
-               close (_mplayer_stdin[1]);
-               dup2 (_mplayer_stdin[0], STDIN_FILENO);
-               
-               close (_mplayer_stdout[0]);
-               dup2 (_mplayer_stdout[1], STDOUT_FILENO);
-               
-               close (_mplayer_stderr[0]);
-               dup2 (_mplayer_stderr[1], STDERR_FILENO);
+       s << " at " << p.content->start() << " until " << p.content->end();
+       
+       return s;
+}
+#endif 
 
-               char* p[] = { strdup ("TERM=xterm"), strdup ("DISPLAY=:0"), 0 };
-               environ = p;
+Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
+       : _film (f)
+       , _playlist (p)
+       , _video (true)
+       , _audio (true)
+       , _have_valid_pieces (false)
+       , _video_position (0)
+       , _audio_position (0)
+       , _audio_buffers (f->audio_channels(), 0)
+{
+       _playlist->Changed.connect (bind (&Player::playlist_changed, this));
+       _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
+       _film->Changed.connect (bind (&Player::film_changed, this, _1));
+       set_video_container_size (_film->container()->size (_film->full_frame ()));
+}
 
-               stringstream s;
-               s << "/usr/local/bin/mplayer";
+void
+Player::disable_video ()
+{
+       _video = false;
+}
 
-               s << " -vo x11 -noaspect -noautosub -nosub -vo x11 -noborder -slave -quiet -input nodefault-bindings:conf=/dev/null";
-               s << " -sws " << fs->scaler->mplayer_id ();
+void
+Player::disable_audio ()
+{
+       _audio = false;
+}
 
-               stringstream vf;
-               
-               Position position = screen->position (fs->format);
-               Size screen_size = screen->size (fs->format);
-               Size const cropped_size = fs->cropped_size (fs->size);
-               switch (split) {
-               case SPLIT_NONE:
-                       vf << crop_string (Position (fs->left_crop, fs->top_crop), cropped_size);
-                       s << " -geometry " << position.x << ":" << position.y;
-                       break;
-               case SPLIT_LEFT:
-               {
-                       Size split_size = cropped_size;
-                       split_size.width /= 2;
-                       vf << crop_string (Position (fs->left_crop, fs->top_crop), split_size);
-                       screen_size.width /= 2;
-                       s << " -geometry " << position.x << ":" << position.y;
-                       break;
+bool
+Player::pass ()
+{
+       if (!_have_valid_pieces) {
+               setup_pieces ();
+               _have_valid_pieces = true;
+       }
+
+#ifdef DEBUG_PLAYER
+       cout << "= PASS\n";
+#endif 
+
+       Time earliest_t = TIME_MAX;
+       shared_ptr<Piece> earliest;
+       enum {
+               VIDEO,
+               AUDIO
+       } type = VIDEO;
+
+       for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+               if ((*i)->decoder->done ()) {
+                       continue;
                }
-               case SPLIT_RIGHT:
-               {
-                       Size split_size = cropped_size;
-                       split_size.width /= 2;
-                       vf << crop_string (Position (fs->left_crop + split_size.width, fs->top_crop), split_size);
-                       screen_size.width /= 2;
-                       s << " -geometry " << (position.x + screen_size.width) << ":" << position.y;
-                       break;
+
+               if (_video && dynamic_pointer_cast<VideoDecoder> ((*i)->decoder)) {
+                       if ((*i)->video_position < earliest_t) {
+                               earliest_t = (*i)->video_position;
+                               earliest = *i;
+                               type = VIDEO;
+                       }
                }
+
+               if (_audio && dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
+                       if ((*i)->audio_position < earliest_t) {
+                               earliest_t = (*i)->audio_position;
+                               earliest = *i;
+                               type = AUDIO;
+                       }
                }
+       }
 
-               vf << ",scale=" << screen_size.width << ":" << screen_size.height;
+       if (!earliest) {
+#ifdef DEBUG_PLAYER
+               cout << "no earliest piece.\n";
+#endif         
                
-               pair<string, string> filters = Filter::ffmpeg_strings (fs->filters);
-               
-               if (!filters.first.empty()) {
-                       vf << "," << filters.first;
+               flush ();
+               return true;
+       }
+
+       switch (type) {
+       case VIDEO:
+               if (earliest_t > _video_position) {
+#ifdef DEBUG_PLAYER
+                       cout << "no video here; emitting black frame (earliest=" << earliest_t << ", video_position=" << _video_position << ").\n";
+#endif
+                       emit_black ();
+               } else {
+#ifdef DEBUG_PLAYER
+                       cout << "Pass " << *earliest << "\n";
+#endif                 
+                       earliest->decoder->pass ();
                }
-               
-               if (!filters.second.empty ()) {
-                       vf << ",pp=" << filters.second;
+               break;
+
+       case AUDIO:
+               if (earliest_t > _audio_position) {
+#ifdef DEBUG_PLAYER
+                       cout << "no audio here; emitting silence.\n";
+#endif
+                       emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
+               } else {
+#ifdef DEBUG_PLAYER
+                       cout << "Pass " << *earliest << "\n";
+#endif                 
+                       earliest->decoder->pass ();
                }
-               
-               s << " -vf " << vf.str();
-               s << " \"" << fs->content_path() << "\" ";
+               break;
+       }
 
-               string cmd (s.str ());
+#ifdef DEBUG_PLAYER
+       cout << "\tpost pass " << _video_position << " " << _audio_position << "\n";
+#endif 
 
-               vector<string> b = split_at_spaces_considering_quotes (cmd);
-               
-               char** cl = new char*[b.size() + 1];
-               for (vector<string>::size_type i = 0; i < b.size(); ++i) {
-                       cl[i] = strdup (b[i].c_str ());
+       return false;
+}
+
+void
+Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
+{
+       shared_ptr<Piece> piece = weak_piece.lock ();
+       if (!piece) {
+               return;
+       }
+
+       shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
+       assert (content);
+
+       FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
+       if (frc.skip && (frame % 2) == 1) {
+               return;
+       }
+
+       shared_ptr<Image> work_image = image->crop (content->crop(), true);
+
+       libdcp::Size const image_size = content->ratio()->size (_video_container_size);
+       
+       work_image = work_image->scale_and_convert_to_rgb (image_size, _film->scaler(), true);
+
+       Time time = content->start() + (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
+       
+       if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
+               work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
+       }
+
+       if (image_size != _video_container_size) {
+               assert (image_size.width <= _video_container_size.width);
+               assert (image_size.height <= _video_container_size.height);
+               shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
+               im->make_black ();
+               im->copy (work_image, Position<int> ((_video_container_size.width - image_size.width) / 2, (_video_container_size.height - image_size.height) / 2));
+               work_image = im;
+       }
+
+#ifdef DCPOMATIC_DEBUG
+       _last_video = piece->content;
+#endif
+
+       Video (work_image, eyes, same, time);
+       time += TIME_HZ / _film->video_frame_rate();
+
+       if (frc.repeat) {
+               Video (work_image, eyes, true, time);
+               time += TIME_HZ / _film->video_frame_rate();
+       }
+
+       _video_position = piece->video_position = time;
+}
+
+void
+Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
+{
+       shared_ptr<Piece> piece = weak_piece.lock ();
+       if (!piece) {
+               return;
+       }
+
+       shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
+       assert (content);
+
+       /* Remap channels */
+       shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
+       dcp_mapped->make_silent ();
+       list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
+       for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
+               if (i->first < audio->channels() && i->second < dcp_mapped->channels()) {
+                       dcp_mapped->accumulate_channel (audio.get(), i->first, i->second);
                }
-               cl[b.size()] = 0;
-               
-               execv (cl[0], cl);
+       }
 
-               stringstream e;
-               e << "exec of mplayer failed " << strerror (errno);
-               throw PlayError (e.str ());
-               
-       } else {
-               _mplayer_pid = p;
-               command ("pause");
+       audio = dcp_mapped;
+
+       Time time = content->start()
+               + _film->audio_frames_to_time (frame)
+               + (content->audio_delay() * TIME_HZ / 1000);
+
+       /* We must cut off anything that comes before the start of all time */
+       if (time < 0) {
+               int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
+               if (frames >= audio->frames ()) {
+                       return;
+               }
+
+               shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
+               trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
 
-               _stdout_reader = new boost::thread (boost::bind (&Player::stdout_reader, this));
+               audio = trimmed;
+               time = 0;
        }
+
+       /* The time of this audio may indicate that some of our buffered audio is not going to
+          be added to any more, so it can be emitted.
+       */
+
+       if (time > _audio_position) {
+               /* We can emit some audio from our buffers; this is how many frames */
+               OutputAudioFrame const N = _film->time_to_audio_frames (time - _audio_position);
+               if (N > _audio_buffers.frames()) {
+                       /* We need some extra silence before whatever is in the buffers */
+                       _audio_buffers.ensure_size (N);
+                       _audio_buffers.move (0, N - _audio_buffers.frames(), _audio_buffers.frames ());
+                       _audio_buffers.make_silent (0, _audio_buffers.frames());
+                       _audio_buffers.set_frames (N);
+               }
+               assert (N <= _audio_buffers.frames());
+
+               /* XXX: not convinced that a copy is necessary here */
+               shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), N));
+               emit->copy_from (&_audio_buffers, N, 0, 0);
+               Audio (emit, _audio_position);
+               _audio_position = piece->audio_position = _audio_position + _film->audio_frames_to_time (N);
+
+               /* And remove it from our buffers */
+               if (_audio_buffers.frames() > N) {
+                       _audio_buffers.move (N, 0, _audio_buffers.frames() - N);
+               }
+               _audio_buffers.set_frames (_audio_buffers.frames() - N);
+       }
+
+       /* Now accumulate the new audio into our buffers */
+       _audio_buffers.ensure_size (_audio_buffers.frames() + audio->frames());
+       _audio_buffers.accumulate_frames (audio.get(), 0, 0, audio->frames ());
+       _audio_buffers.set_frames (_audio_buffers.frames() + audio->frames());
 }
 
-Player::~Player ()
+void
+Player::flush ()
 {
-       _stdout_reader_should_run = false;
-       _stdout_reader->join ();
-       delete _stdout_reader;
+       if (_audio_buffers.frames() > 0) {
+               shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), _audio_buffers.frames()));
+               emit->copy_from (&_audio_buffers, _audio_buffers.frames(), 0, 0);
+               Audio (emit, _audio_position);
+               _audio_position += _film->audio_frames_to_time (_audio_buffers.frames ());
+               _audio_buffers.set_frames (0);
+       }
+
+       while (_video_position < _audio_position) {
+               emit_black ();
+       }
+
+       while (_audio_position < _video_position) {
+               emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
+       }
        
-       close (_mplayer_stdin[0]);
-       close (_mplayer_stdout[1]);
-       kill (_mplayer_pid, SIGTERM);
 }
 
+/** Seek so that the next pass() will yield (approximately) the requested frame.
+ *  Pass accurate = true to try harder to get close to the request.
+ *  @return true on error
+ */
 void
-Player::command (string c)
+Player::seek (Time t, bool accurate)
 {
-       char buf[64];
-       snprintf (buf, sizeof (buf), "%s\n", c.c_str ());
-       write (_mplayer_stdin[1], buf, strlen (buf));
+       if (!_have_valid_pieces) {
+               setup_pieces ();
+               _have_valid_pieces = true;
+       }
+
+       if (_pieces.empty ()) {
+               return;
+       }
+
+       for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+               shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
+               if (!vc) {
+                       continue;
+               }
+               
+               Time s = t - vc->start ();
+               s = max (static_cast<Time> (0), s);
+               s = min (vc->length(), s);
+
+               (*i)->video_position = (*i)->audio_position = vc->start() + s;
+
+               FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
+               /* Here we are converting from time (in the DCP) to a frame number in the content.
+                  Hence we need to use the DCP's frame rate and the double/skip correction, not
+                  the source's rate.
+               */
+               VideoContent::Frame f = s * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
+               dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
+       }
+
+       _video_position = _audio_position = t;
+       
+       /* XXX: don't seek audio because we don't need to... */
 }
 
 void
-Player::stdout_reader ()
-{
-       while (_stdout_reader_should_run) {
-               char buf[1024];
-               int r = read (_mplayer_stdout[0], buf, sizeof (buf));
-               if (r > 0) {
-                       stringstream s (buf);
-                       while (s.good ()) {
-                               string line;
-                               getline (s, line);
-
-                               vector<string> b;
-                               split (b, line, is_any_of ("="));
-                               if (b.size() < 2) {
-                                       continue;
-                               }
+Player::setup_pieces ()
+{
+       list<shared_ptr<Piece> > old_pieces = _pieces;
+
+       _pieces.clear ();
+
+       ContentList content = _playlist->content ();
+       sort (content.begin(), content.end(), ContentSorter ());
+
+       for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
+
+               shared_ptr<Piece> piece (new Piece (*i));
+
+               /* XXX: into content? */
+
+               shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
+               if (fc) {
+                       shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
+                       
+                       fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
+                       fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
+                       fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
 
-                               if (b[0] == "ANS_time_pos") {
-                                       set_position (atof (b[1].c_str ()));
-                               } else if (b[0] == "ANS_pause") {
-                                       set_paused (b[1] == "yes");
+                       piece->decoder = fd;
+               }
+               
+               shared_ptr<const StillImageContent> ic = dynamic_pointer_cast<const StillImageContent> (*i);
+               if (ic) {
+                       shared_ptr<StillImageDecoder> id;
+                       
+                       /* See if we can re-use an old StillImageDecoder */
+                       for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
+                               shared_ptr<StillImageDecoder> imd = dynamic_pointer_cast<StillImageDecoder> ((*j)->decoder);
+                               if (imd && imd->content() == ic) {
+                                       id = imd;
                                }
                        }
+
+                       if (!id) {
+                               id.reset (new StillImageDecoder (_film, ic));
+                               id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
+                       }
+
+                       piece->decoder = id;
                }
 
-               usleep (5e5);
+               shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
+               if (sc) {
+                       shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
+                       sd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
+
+                       piece->decoder = sd;
+               }
+
+               _pieces.push_back (piece);
+       }
+
+#ifdef DEBUG_PLAYER
+       cout << "=== Player setup:\n";
+       for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+               cout << *(i->get()) << "\n";
+       }
+#endif 
+}
+
+void
+Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
+{
+       shared_ptr<Content> c = w.lock ();
+       if (!c) {
+               return;
+       }
+
+       if (
+               property == ContentProperty::START || property == ContentProperty::LENGTH ||
+               property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_RATIO
+               ) {
+               
+               _have_valid_pieces = false;
+               Changed (frequent);
 
-               snprintf (buf, sizeof (buf), "pausing_keep_force get_property time_pos\npausing_keep_force get_property pause\n");
-               write (_mplayer_stdin[1], buf, strlen (buf));
+       } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
+               update_subtitle ();
+               Changed (frequent);
+       } else if (property == VideoContentProperty::VIDEO_FRAME_TYPE) {
+               Changed (frequent);
        }
 }
 
 void
-Player::set_position (float p)
+Player::playlist_changed ()
 {
-       /* XXX: could be an atomic */
-       boost::mutex::scoped_lock lm (_state_mutex);
-       _position = p;
+       _have_valid_pieces = false;
+       Changed (false);
 }
 
 void
-Player::set_paused (bool p)
+Player::set_video_container_size (libdcp::Size s)
 {
-       /* XXX: could be an atomic */
-       boost::mutex::scoped_lock lm (_state_mutex);
-       _paused = p;
+       _video_container_size = s;
+       _black_frame.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
+       _black_frame->make_black ();
 }
 
-float
-Player::position () const
+void
+Player::emit_black ()
 {
-       boost::mutex::scoped_lock lm (_state_mutex);
-       return _position;
+#ifdef DCPOMATIC_DEBUG
+       _last_video.reset ();
+#endif
+       
+       /* XXX: use same here */
+       Video (_black_frame, EYES_BOTH, false, _video_position);
+       _video_position += _film->video_frames_to_time (1);
 }
 
-bool
-Player::paused () const
+void
+Player::emit_silence (OutputAudioFrame most)
+{
+       OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
+       shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
+       silence->make_silent ();
+       Audio (silence, _audio_position);
+       _audio_position += _film->audio_frames_to_time (N);
+}
+
+void
+Player::film_changed (Film::Property p)
+{
+       /* Here we should notice Film properties that affect our output, and
+          alert listeners that our output now would be different to how it was
+          last time we were run.
+       */
+
+       if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) {
+               Changed (false);
+       }
+}
+
+void
+Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
+{
+       _in_subtitle.piece = weak_piece;
+       _in_subtitle.image = image;
+       _in_subtitle.rect = rect;
+       _in_subtitle.from = from;
+       _in_subtitle.to = to;
+
+       update_subtitle ();
+}
+
+void
+Player::update_subtitle ()
 {
-       boost::mutex::scoped_lock lm (_state_mutex);
-       return _paused;
+       shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
+       if (!piece) {
+               return;
+       }
+
+       if (!_in_subtitle.image) {
+               _out_subtitle.image.reset ();
+               return;
+       }
+
+       shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
+       assert (sc);
+
+       dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
+       libdcp::Size scaled_size;
+
+       in_rect.y += sc->subtitle_offset ();
+
+       /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
+       scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
+       scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
+
+       /* Then we need a corrective translation, consisting of two parts:
+        *
+        * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
+        *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
+        *
+        * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
+        *     (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
+        *     (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
+        *
+        * Combining these two translations gives these expressions.
+        */
+       
+       _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
+       _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
+       
+       _out_subtitle.image = _in_subtitle.image->scale (libdcp::Size (scaled_size.width, scaled_size.height), Scaler::from_id ("bicubic"), true);
+       _out_subtitle.from = _in_subtitle.from + piece->content->start ();
+       _out_subtitle.to = _in_subtitle.to + piece->content->start ();
 }