Merge master.
authorCarl Hetherington <cth@carlh.net>
Tue, 4 Feb 2014 09:59:38 +0000 (09:59 +0000)
committerCarl Hetherington <cth@carlh.net>
Tue, 4 Feb 2014 09:59:38 +0000 (09:59 +0000)
1  2 
src/lib/encoder.cc
src/lib/exceptions.h
src/lib/ffmpeg_content.cc
src/lib/film.cc
src/lib/player.cc
src/lib/subrip_content.cc
src/lib/transcode_job.cc
src/wx/film_editor.cc
wscript

diff --combined src/lib/encoder.cc
index d26f776148c74fd34c0ff35abd264857b87e9266,92b4763be5768f5ca04c5f40621191d6ef52cb4b..f1c3e7e6182e2d4dfb9b7145388680738bed1d43
@@@ -180,6 -180,8 +180,8 @@@ Encoder::frame_done (
  void
  Encoder::process_video (shared_ptr<PlayerImage> image, Eyes eyes, ColourConversion conversion, bool same)
  {
+       kick ();
+       
        boost::mutex::scoped_lock lock (_mutex);
  
        /* XXX: discard 3D here if required */
                TIMING ("adding to queue of %1", _queue.size ());
                _queue.push_back (shared_ptr<DCPVideoFrame> (
                                          new DCPVideoFrame (
 -                                                image->image(), _video_frames_out, eyes, conversion, _film->video_frame_rate(),
 +                                                image->image(PIX_FMT_RGB24, false), _video_frames_out, eyes, conversion, _film->video_frame_rate(),
                                                  _film->j2k_bandwidth(), _film->resolution(), _film->log()
                                                  )
                                          ));
diff --combined src/lib/exceptions.h
index 61163c8d12403dc492251f3e85668b4fa9a1119e,3423a5754e340e3909b6b59ef617b5785d1a2809..213be6186d523a713a0a3f167f77127e0047164e
@@@ -230,13 -230,6 +230,13 @@@ public
        PixelFormatError (std::string o, AVPixelFormat f);
  };
  
 +/** An error that occurs while parsing a SubRip file */
 +class SubRipError : public FileError
 +{
 +public:
 +      SubRipError (std::string, std::string, boost::filesystem::path);
 +};
 +
  /** A parent class for classes which have a need to catch and
   *  re-throw exceptions.  This is intended for classes
   *  which run their own thread; they should do something like
@@@ -260,6 -253,7 +260,7 @@@ public
                boost::mutex::scoped_lock lm (_mutex);
                if (_exception) {
                        boost::rethrow_exception (_exception);
+                       _exception = boost::exception_ptr ();
                }
        }
  
index 394c16aa53568c4b55fe4e13be3bf0818bb28504,2c5fcf70e1e09915245711103a1f76081a395680..3bee49146a5304024d083c6138b00e8332012390
@@@ -62,7 -62,7 +62,7 @@@ FFmpegContent::FFmpegContent (shared_pt
        : Content (f, node)
        , VideoContent (f, node)
        , AudioContent (f, node)
-       , SubtitleContent (f, node)
+       , SubtitleContent (f, node, version)
  {
        list<cxml::NodePtr> c = node->node_children ("SubtitleStream");
        for (list<cxml::NodePtr>::const_iterator i = c.begin(); i != c.end(); ++i) {
@@@ -163,7 -163,7 +163,7 @@@ FFmpegContent::examine (shared_ptr<Job
  
        shared_ptr<FFmpegExaminer> examiner (new FFmpegExaminer (shared_from_this ()));
  
 -      VideoContent::Frame video_length = 0;
 +      VideoFrame video_length = 0;
        video_length = examiner->video_length ();
        film->log()->log (String::compose ("Video length obtained from header as %1 frames", video_length));
  
@@@ -262,12 -262,12 +262,12 @@@ FFmpegContent::set_audio_stream (shared
        signal_changed (FFmpegContentProperty::AUDIO_STREAM);
  }
  
 -AudioContent::Frame
 +AudioFrame
  FFmpegContent::audio_length () const
  {
        int const cafr = content_audio_frame_rate ();
        int const vfr  = video_frame_rate ();
 -      VideoContent::Frame const vl = video_length ();
 +      VideoFrame const vl = video_length ();
  
        boost::mutex::scoped_lock lm (_mutex);
        if (!_audio_stream) {
@@@ -310,15 -310,16 +310,15 @@@ FFmpegContent::output_audio_frame_rate 
        /* Resample to a DCI-approved sample rate */
        double t = dcp_audio_frame_rate (content_audio_frame_rate ());
  
 -      FrameRateConversion frc (video_frame_rate(), film->video_frame_rate());
 +      FrameRateChange frc (video_frame_rate(), film->video_frame_rate());
  
        /* Compensate if the DCP is being run at a different frame rate
           to the source; that is, if the video is run such that it will
           look different in the DCP compared to the source (slower or faster).
 -         skip/repeat doesn't come into effect here.
        */
  
        if (frc.change_speed) {
 -              t *= video_frame_rate() * frc.factor() / film->video_frame_rate();
 +              t /= frc.speed_up;
        }
  
        return rint (t);
@@@ -445,13 -446,13 +445,13 @@@ FFmpegSubtitleStream::as_xml (xmlpp::No
        FFmpegStream::as_xml (root);
  }
  
 -Time
 +DCPTime
  FFmpegContent::full_length () const
  {
        shared_ptr<const Film> film = _film.lock ();
        assert (film);
        
 -      FrameRateConversion frc (video_frame_rate (), film->video_frame_rate ());
 +      FrameRateChange frc (video_frame_rate (), film->video_frame_rate ());
        return video_length() * frc.factor() * TIME_HZ / film->video_frame_rate ();
  }
  
diff --combined src/lib/film.cc
index 099bacfdcb7b5513358f968555e03a864703e338,8690d3ee0b97eb22e1080337ff4d7e1b27c88746..67f795a6c739af5e60b8947bfde2e4657ba9f275
@@@ -83,8 -83,10 +83,10 @@@ using libdcp::Signer
  
  /* 5 -> 6
   * AudioMapping XML changed.
+  * 6 -> 7
+  * Subtitle offset changed to subtitle y offset, and subtitle x offset added.
   */
- int const Film::state_version = 6;
+ int const Film::state_version = 7;
  
  /** Construct a Film object in a given directory.
   *
@@@ -853,7 -855,7 +855,7 @@@ Film::move_content_later (shared_ptr<Co
        _playlist->move_later (c);
  }
  
 -Time
 +DCPTime
  Film::length () const
  {
        return _playlist->length ();
@@@ -865,18 -867,12 +867,18 @@@ Film::has_subtitles () cons
        return _playlist->has_subtitles ();
  }
  
 -OutputVideoFrame
 +VideoFrame
  Film::best_video_frame_rate () const
  {
        return _playlist->best_dcp_frame_rate ();
  }
  
 +FrameRateChange
 +Film::active_frame_rate_change (DCPTime t) const
 +{
 +      return _playlist->active_frame_rate_change (t, video_frame_rate ());
 +}
 +
  void
  Film::playlist_content_changed (boost::weak_ptr<Content> c, int p)
  {
@@@ -895,31 -891,31 +897,31 @@@ Film::playlist_changed (
        signal_changed (CONTENT);
  }     
  
 -OutputAudioFrame
 -Film::time_to_audio_frames (Time t) const
 +AudioFrame
 +Film::time_to_audio_frames (DCPTime t) const
  {
        return t * audio_frame_rate () / TIME_HZ;
  }
  
 -OutputVideoFrame
 -Film::time_to_video_frames (Time t) const
 +VideoFrame
 +Film::time_to_video_frames (DCPTime t) const
  {
        return t * video_frame_rate () / TIME_HZ;
  }
  
 -Time
 -Film::audio_frames_to_time (OutputAudioFrame f) const
 +DCPTime
 +Film::audio_frames_to_time (AudioFrame f) const
  {
        return f * TIME_HZ / audio_frame_rate ();
  }
  
 -Time
 -Film::video_frames_to_time (OutputVideoFrame f) const
 +DCPTime
 +Film::video_frames_to_time (VideoFrame f) const
  {
        return f * TIME_HZ / video_frame_rate ();
  }
  
 -OutputAudioFrame
 +AudioFrame
  Film::audio_frame_rate () const
  {
        /* XXX */
diff --combined src/lib/player.cc
index cb6d519842c5c6cfce1ac92d5f1fe83f74a04e0c,e661a7b3693930eac8b0348fd250de185a098706..3e6a1598d17e76559c64b7ca0cde2837ec905b64
@@@ -18,7 -18,6 +18,7 @@@
  */
  
  #include <stdint.h>
 +#include <algorithm>
  #include "player.h"
  #include "film.h"
  #include "ffmpeg_decoder.h"
  #include "sndfile_decoder.h"
  #include "sndfile_content.h"
  #include "subtitle_content.h"
 +#include "subrip_decoder.h"
 +#include "subrip_content.h"
  #include "playlist.h"
  #include "job.h"
  #include "image.h"
  #include "ratio.h"
 -#include "resampler.h"
  #include "log.h"
  #include "scaler.h"
 +#include "render_subtitles.h"
  
  using std::list;
  using std::cout;
@@@ -48,20 -45,69 +48,20 @@@ using std::map
  using boost::shared_ptr;
  using boost::weak_ptr;
  using boost::dynamic_pointer_cast;
 +using boost::optional;
  
  class Piece
  {
  public:
 -      Piece (shared_ptr<Content> c)
 -              : content (c)
 -              , video_position (c->position ())
 -              , audio_position (c->position ())
 -              , repeat_to_do (0)
 -              , repeat_done (0)
 -      {}
 -      
 -      Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
 +      Piece (shared_ptr<Content> c, shared_ptr<Decoder> d, FrameRateChange f)
                : content (c)
                , decoder (d)
 -              , video_position (c->position ())
 -              , audio_position (c->position ())
 +              , frc (f)
        {}
  
 -      /** Set this piece to repeat a video frame a given number of times */
 -      void set_repeat (IncomingVideo video, int num)
 -      {
 -              repeat_video = video;
 -              repeat_to_do = num;
 -              repeat_done = 0;
 -      }
 -
 -      void reset_repeat ()
 -      {
 -              repeat_video.image.reset ();
 -              repeat_to_do = 0;
 -              repeat_done = 0;
 -      }
 -
 -      bool repeating () const
 -      {
 -              return repeat_done != repeat_to_do;
 -      }
 -
 -      void repeat (Player* player)
 -      {
 -              player->process_video (
 -                      repeat_video.weak_piece,
 -                      repeat_video.image,
 -                      repeat_video.eyes,
 -                      repeat_done > 0,
 -                      repeat_video.frame,
 -                      (repeat_done + 1) * (TIME_HZ / player->_film->video_frame_rate ())
 -                      );
 -
 -              ++repeat_done;
 -      }
 -      
        shared_ptr<Content> content;
        shared_ptr<Decoder> decoder;
 -      /** Time of the last video we emitted relative to the start of the DCP */
 -      Time video_position;
 -      /** Time of the last audio we emitted relative to the start of the DCP */
 -      Time audio_position;
 -
 -      IncomingVideo repeat_video;
 -      int repeat_to_do;
 -      int repeat_done;
 +      FrameRateChange frc;
  };
  
  Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
        , _audio_position (0)
        , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
        , _last_emit_was_black (false)
 +      , _just_did_inaccurate_seek (false)
 +      , _approximate_size (false)
  {
        _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
        _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
@@@ -102,162 -146,111 +102,162 @@@ Player::pass (
                setup_pieces ();
        }
  
 -      Time earliest_t = TIME_MAX;
 -      shared_ptr<Piece> earliest;
 -      enum {
 -              VIDEO,
 -              AUDIO
 -      } type = VIDEO;
 +      /* Interrogate all our pieces to find the one with the earliest decoded data */
 +
 +      shared_ptr<Piece> earliest_piece;
 +      shared_ptr<Decoded> earliest_decoded;
 +      DCPTime earliest_time = TIME_MAX;
 +      DCPTime earliest_audio = TIME_MAX;
  
        for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
 -              if ((*i)->decoder->done ()) {
 -                      continue;
 -              }
  
 -              shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> ((*i)->decoder);
 -              shared_ptr<AudioDecoder> ad = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
 +              DCPTime const offset = (*i)->content->position() - (*i)->content->trim_start();
 +              
 +              bool done = false;
 +              shared_ptr<Decoded> dec;
 +              while (!done) {
 +                      dec = (*i)->decoder->peek ();
 +                      if (!dec) {
 +                              /* Decoder has nothing else to give us */
 +                              break;
 +                      }
  
 -              if (_video && vd) {
 -                      if ((*i)->video_position < earliest_t) {
 -                              earliest_t = (*i)->video_position;
 -                              earliest = *i;
 -                              type = VIDEO;
 +                      dec->set_dcp_times (_film->video_frame_rate(), _film->audio_frame_rate(), (*i)->frc, offset);
 +                      DCPTime const t = dec->dcp_time - offset;
 +                      if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
 +                              /* In the end-trimmed part; decoder has nothing else to give us */
 +                              dec.reset ();
 +                              done = true;
 +                      } else if (t >= (*i)->content->trim_start ()) {
 +                              /* Within the un-trimmed part; everything's ok */
 +                              done = true;
 +                      } else {
 +                              /* Within the start-trimmed part; get something else */
 +                              (*i)->decoder->consume ();
                        }
                }
  
 -              if (_audio && ad && ad->has_audio ()) {
 -                      if ((*i)->audio_position < earliest_t) {
 -                              earliest_t = (*i)->audio_position;
 -                              earliest = *i;
 -                              type = AUDIO;
 -                      }
 +              if (!dec) {
 +                      continue;
                }
 -      }
  
 -      if (!earliest) {
 +              if (dec->dcp_time < earliest_time) {
 +                      earliest_piece = *i;
 +                      earliest_decoded = dec;
 +                      earliest_time = dec->dcp_time;
 +              }
 +
 +              if (dynamic_pointer_cast<DecodedAudio> (dec) && dec->dcp_time < earliest_audio) {
 +                      earliest_audio = dec->dcp_time;
 +              }
 +      }
 +              
 +      if (!earliest_piece) {
                flush ();
                return true;
        }
  
 -      switch (type) {
 -      case VIDEO:
 -              if (earliest_t > _video_position) {
 -                      emit_black ();
 -              } else {
 -                      if (earliest->repeating ()) {
 -                              earliest->repeat (this);
 +      if (earliest_audio != TIME_MAX) {
 +              TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (max (int64_t (0), earliest_audio));
 +              Audio (tb.audio, tb.time);
 +              /* This assumes that the audio_frames_to_time conversion is exact
 +                 so that there are no accumulated errors caused by rounding.
 +              */
 +              _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
 +      }
 +
 +      /* Emit the earliest thing */
 +
 +      shared_ptr<DecodedVideo> dv = dynamic_pointer_cast<DecodedVideo> (earliest_decoded);
 +      shared_ptr<DecodedAudio> da = dynamic_pointer_cast<DecodedAudio> (earliest_decoded);
 +      shared_ptr<DecodedImageSubtitle> dis = dynamic_pointer_cast<DecodedImageSubtitle> (earliest_decoded);
 +      shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (earliest_decoded);
 +
 +      /* Will be set to false if we shouldn't consume the peeked DecodedThing */
 +      bool consume = true;
 +
 +      if (dv && _video) {
 +
 +              if (_just_did_inaccurate_seek) {
 +
 +                      /* Just emit; no subtlety */
 +                      emit_video (earliest_piece, dv);
 +                      step_video_position (dv);
 +                      
 +              } else if (dv->dcp_time > _video_position) {
 +
 +                      /* Too far ahead */
 +
 +                      list<shared_ptr<Piece> >::iterator i = _pieces.begin();
 +                      while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) {
 +                              ++i;
 +                      }
 +
 +                      if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) {
 +                              /* We're outside all video content */
 +                              emit_black ();
 +                              _statistics.video.black++;
                        } else {
 -                              earliest->decoder->pass ();
 +                              /* We're inside some video; repeat the frame */
 +                              _last_incoming_video.video->dcp_time = _video_position;
 +                              emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video);
 +                              step_video_position (_last_incoming_video.video);
 +                              _statistics.video.repeat++;
                        }
 -              }
 -              break;
  
 -      case AUDIO:
 -              if (earliest_t > _audio_position) {
 -                      emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
 +                      consume = false;
 +
 +              } else if (dv->dcp_time == _video_position) {
 +                      /* We're ok */
 +                      emit_video (earliest_piece, dv);
 +                      step_video_position (dv);
 +                      _statistics.video.good++;
                } else {
 -                      earliest->decoder->pass ();
 -
 -                      if (earliest->decoder->done()) {
 -                              shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
 -                              assert (ac);
 -                              shared_ptr<Resampler> re = resampler (ac, false);
 -                              if (re) {
 -                                      shared_ptr<const AudioBuffers> b = re->flush ();
 -                                      if (b->frames ()) {
 -                                              process_audio (earliest, b, ac->audio_length ());
 -                                      }
 -                              }
 -                      }
 +                      /* Too far behind: skip */
 +                      _statistics.video.skip++;
                }
 -              break;
 -      }
  
 -      if (_audio) {
 -              boost::optional<Time> audio_done_up_to;
 -              for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
 -                      if ((*i)->decoder->done ()) {
 -                              continue;
 -                      }
 +              _just_did_inaccurate_seek = false;
  
 -                      shared_ptr<AudioDecoder> ad = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
 -                      if (ad && ad->has_audio ()) {
 -                              audio_done_up_to = min (audio_done_up_to.get_value_or (TIME_MAX), (*i)->audio_position);
 -                      }
 -              }
 +      } else if (da && _audio) {
  
 -              if (audio_done_up_to) {
 -                      TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to.get ());
 -                      Audio (tb.audio, tb.time);
 -                      _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
 +              if (da->dcp_time > _audio_position) {
 +                      /* Too far ahead */
 +                      emit_silence (da->dcp_time - _audio_position);
 +                      consume = false;
 +                      _statistics.audio.silence += (da->dcp_time - _audio_position);
 +              } else if (da->dcp_time == _audio_position) {
 +                      /* We're ok */
 +                      emit_audio (earliest_piece, da);
 +                      _statistics.audio.good += da->data->frames();
 +              } else {
 +                      /* Too far behind: skip */
 +                      _statistics.audio.skip += da->data->frames();
                }
 -      }
                
 +      } else if (dis && _video) {
 +              _image_subtitle.piece = earliest_piece;
 +              _image_subtitle.subtitle = dis;
 +              update_subtitle_from_image ();
 +      } else if (dts && _video) {
 +              _text_subtitle.piece = earliest_piece;
 +              _text_subtitle.subtitle = dts;
 +              update_subtitle_from_text ();
 +      }
 +
 +      if (consume) {
 +              earliest_piece->decoder->consume ();
 +      }                       
 +      
        return false;
  }
  
 -/** @param extra Amount of extra time to add to the content frame's time (for repeat) */
  void
 -Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame, Time extra)
 +Player::emit_video (weak_ptr<Piece> weak_piece, shared_ptr<DecodedVideo> video)
  {
        /* Keep a note of what came in so that we can repeat it if required */
        _last_incoming_video.weak_piece = weak_piece;
 -      _last_incoming_video.image = image;
 -      _last_incoming_video.eyes = eyes;
 -      _last_incoming_video.same = same;
 -      _last_incoming_video.frame = frame;
 -      _last_incoming_video.extra = extra;
 +      _last_incoming_video.video = video;
        
        shared_ptr<Piece> piece = weak_piece.lock ();
        if (!piece) {
        shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
        assert (content);
  
 -      FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
 -      if (frc.skip && (frame % 2) == 1) {
 -              return;
 -      }
 +      FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate());
  
 -      Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
 -      if (content->trimmed (relative_time)) {
 -              return;
 -      }
 -
 -      Time const time = content->position() + relative_time + extra - content->trim_start ();
        float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
 -      libdcp::Size const image_size = fit_ratio_within (ratio, _video_container_size);
 +      libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
 +      if (_approximate_size) {
 +              image_size.width &= ~3;
 +              image_size.height &= ~3;
 +      }
  
        shared_ptr<PlayerImage> pi (
                new PlayerImage (
 -                      image,
 +                      video->image,
                        content->crop(),
                        image_size,
                        _video_container_size,
                        )
                );
        
 -      if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
 +      if (
 +              _film->with_subtitles () &&
 +              _out_subtitle.image &&
 +              video->dcp_time >= _out_subtitle.from && video->dcp_time <= _out_subtitle.to
 +              ) {
  
                Position<int> const container_offset (
                        (_video_container_size.width - image_size.width) / 2,
 -                      (_video_container_size.height - image_size.width) / 2
 +                      (_video_container_size.height - image_size.height) / 2
                        );
  
                pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset);
        _last_video = piece->content;
  #endif
  
 -      Video (pi, eyes, content->colour_conversion(), same, time);
 -
 +      Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time);
 +      
        _last_emit_was_black = false;
 -      _video_position = piece->video_position = (time + TIME_HZ / _film->video_frame_rate());
 +}
  
 -      if (frc.repeat > 1 && !piece->repeating ()) {
 -              piece->set_repeat (_last_incoming_video, frc.repeat - 1);
 +void
 +Player::step_video_position (shared_ptr<DecodedVideo> video)
 +{
 +      /* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */
 +      if (video->eyes != EYES_LEFT) {
 +              /* This assumes that the video_frames_to_time conversion is exact
 +                 so that there are no accumulated errors caused by rounding.
 +              */
 +              _video_position += _film->video_frames_to_time (1);
        }
  }
  
  void
 -Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
 +Player::emit_audio (weak_ptr<Piece> weak_piece, shared_ptr<DecodedAudio> audio)
  {
        shared_ptr<Piece> piece = weak_piece.lock ();
        if (!piece) {
  
        /* Gain */
        if (content->audio_gain() != 0) {
 -              shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
 +              shared_ptr<AudioBuffers> gain (new AudioBuffers (audio->data));
                gain->apply_gain (content->audio_gain ());
 -              audio = gain;
 -      }
 -
 -      /* Resample */
 -      if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
 -              shared_ptr<Resampler> r = resampler (content, true);
 -              pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
 -              audio = ro.first;
 -              frame = ro.second;
 +              audio->data = gain;
        }
 -      
 -      Time const relative_time = _film->audio_frames_to_time (frame);
  
 -      if (content->trimmed (relative_time)) {
 -              return;
 -      }
 -
 -      Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time - content->trim_start ();
 -      
        /* Remap channels */
 -      shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
 +      shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames()));
        dcp_mapped->make_silent ();
 -
        AudioMapping map = content->audio_mapping ();
        for (int i = 0; i < map.content_channels(); ++i) {
                for (int j = 0; j < _film->audio_channels(); ++j) {
                        if (map.get (i, static_cast<libdcp::Channel> (j)) > 0) {
                                dcp_mapped->accumulate_channel (
 -                                      audio.get(),
 +                                      audio->data.get(),
                                        i,
                                        static_cast<libdcp::Channel> (j),
                                        map.get (i, static_cast<libdcp::Channel> (j))
                }
        }
  
 -      audio = dcp_mapped;
 +      audio->data = dcp_mapped;
  
 -      /* We must cut off anything that comes before the start of all time */
 -      if (time < 0) {
 -              int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
 -              if (frames >= audio->frames ()) {
 +      /* Delay */
 +      audio->dcp_time += content->audio_delay() * TIME_HZ / 1000;
 +      if (audio->dcp_time < 0) {
 +              int const frames = - audio->dcp_time * _film->audio_frame_rate() / TIME_HZ;
 +              if (frames >= audio->data->frames ()) {
                        return;
                }
  
 -              shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
 -              trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
 +              shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames));
 +              trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0);
  
 -              audio = trimmed;
 -              time = 0;
 +              audio->data = trimmed;
 +              audio->dcp_time = 0;
        }
  
 -      _audio_merger.push (audio, time);
 -      piece->audio_position += _film->audio_frames_to_time (audio->frames ());
 +      _audio_merger.push (audio->data, audio->dcp_time);
  }
  
  void
  Player::flush ()
  {
 -      TimedAudioBuffers<Time> tb = _audio_merger.flush ();
 +      TimedAudioBuffers<DCPTime> tb = _audio_merger.flush ();
        if (_audio && tb.audio) {
                Audio (tb.audio, tb.time);
                _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
        }
  
        while (_audio && _audio_position < _video_position) {
 -              emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
 +              emit_silence (_video_position - _audio_position);
        }
        
  }
   *  @return true on error
   */
  void
 -Player::seek (Time t, bool accurate)
 +Player::seek (DCPTime t, bool accurate)
  {
        if (!_have_valid_pieces) {
                setup_pieces ();
        }
  
        for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
 -              shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
 -              if (!vc) {
 -                      continue;
 -              }
 -
                /* s is the offset of t from the start position of this content */
 -              Time s = t - vc->position ();
 -              s = max (static_cast<Time> (0), s);
 -              s = min (vc->length_after_trim(), s);
 +              DCPTime s = t - (*i)->content->position ();
 +              s = max (static_cast<DCPTime> (0), s);
 +              s = min ((*i)->content->length_after_trim(), s);
  
 -              /* Hence set the piece positions to the `global' time */
 -              (*i)->video_position = (*i)->audio_position = vc->position() + s;
 +              /* Convert this to the content time */
 +              ContentTime ct = (s + (*i)->content->trim_start()) * (*i)->frc.speed_up;
  
                /* And seek the decoder */
 -              dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (
 -                      vc->time_to_content_video_frames (s + vc->trim_start ()), accurate
 -                      );
 -
 -              (*i)->reset_repeat ();
 +              (*i)->decoder->seek (ct, accurate);
        }
  
 -      _video_position = _audio_position = t;
 +      _video_position = time_round_up (t, TIME_HZ / _film->video_frame_rate());
 +      _audio_position = time_round_up (t, TIME_HZ / _film->audio_frame_rate());
 +
 +      _audio_merger.clear (_audio_position);
  
 -      /* XXX: don't seek audio because we don't need to... */
 +      if (!accurate) {
 +              /* We just did an inaccurate seek, so it's likely that the next thing seen
 +                 out of pass() will be a fair distance from _{video,audio}_position.  Setting
 +                 this flag stops pass() from trying to fix that: we assume that if it
 +                 was an inaccurate seek then the caller does not care too much about
 +                 inserting black/silence to keep the time tidy.
 +              */
 +              _just_did_inaccurate_seek = true;
 +      }
  }
  
  void
  Player::setup_pieces ()
  {
        list<shared_ptr<Piece> > old_pieces = _pieces;
 -
        _pieces.clear ();
  
        ContentList content = _playlist->content ();
 -      sort (content.begin(), content.end(), ContentSorter ());
  
        for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
  
 -              shared_ptr<Piece> piece (new Piece (*i));
 +              shared_ptr<Decoder> decoder;
 +              optional<FrameRateChange> frc;
  
 -              /* XXX: into content? */
 +              /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
 +              DCPTime best_overlap_t = 0;
 +              shared_ptr<VideoContent> best_overlap;
 +              for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
 +                      shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
 +                      if (!vc) {
 +                              continue;
 +                      }
 +                      
 +                      DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
 +                      if (overlap > best_overlap_t) {
 +                              best_overlap = vc;
 +                              best_overlap_t = overlap;
 +                      }
 +              }
  
 +              optional<FrameRateChange> best_overlap_frc;
 +              if (best_overlap) {
 +                      best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
 +              } else {
 +                      /* No video overlap; e.g. if the DCP is just audio */
 +                      best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
 +              }
 +
 +              /* FFmpeg */
                shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
                if (fc) {
 -                      shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
 -                      
 -                      fd->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, 0));
 -                      fd->Audio.connect (bind (&Player::process_audio, this, weak_ptr<Piece> (piece), _1, _2));
 -                      fd->Subtitle.connect (bind (&Player::process_subtitle, this, weak_ptr<Piece> (piece), _1, _2, _3, _4));
 -
 -                      fd->seek (fc->time_to_content_video_frames (fc->trim_start ()), true);
 -                      piece->decoder = fd;
 +                      decoder.reset (new FFmpegDecoder (_film, fc, _video, _audio));
 +                      frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
                }
 -              
 +
 +              /* ImageContent */
                shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
                if (ic) {
 -                      bool reusing = false;
 -                      
                        /* See if we can re-use an old ImageDecoder */
                        for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
                                shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
                                if (imd && imd->content() == ic) {
 -                                      piece = *j;
 -                                      reusing = true;
 +                                      decoder = imd;
                                }
                        }
  
 -                      if (!reusing) {
 -                              shared_ptr<ImageDecoder> id (new ImageDecoder (_film, ic));
 -                              id->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, 0));
 -                              piece->decoder = id;
 +                      if (!decoder) {
 +                              decoder.reset (new ImageDecoder (_film, ic));
                        }
 +
 +                      frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
                }
  
 +              /* SndfileContent */
                shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
                if (sc) {
 -                      shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
 -                      sd->Audio.connect (bind (&Player::process_audio, this, weak_ptr<Piece> (piece), _1, _2));
 +                      decoder.reset (new SndfileDecoder (_film, sc));
 +                      frc = best_overlap_frc;
 +              }
  
 -                      piece->decoder = sd;
 +              /* SubRipContent */
 +              shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
 +              if (rc) {
 +                      decoder.reset (new SubRipDecoder (_film, rc));
 +                      frc = best_overlap_frc;
                }
  
 -              _pieces.push_back (piece);
 +              ContentTime st = (*i)->trim_start() * frc->speed_up;
 +              decoder->seek (st, true);
 +              
 +              _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
        }
  
        _have_valid_pieces = true;
 +
 +      /* The Piece for the _last_incoming_video will no longer be valid */
 +      _last_incoming_video.video.reset ();
 +
 +      _video_position = _audio_position = 0;
  }
  
  void
@@@ -547,10 -521,13 +547,14 @@@ Player::content_changed (weak_ptr<Conte
                _have_valid_pieces = false;
                Changed (frequent);
  
-       } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
+       } else if (
+               property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
+               property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
+               property == SubtitleContentProperty::SUBTITLE_SCALE
+               ) {
  
 -              update_subtitle ();
 +              update_subtitle_from_image ();
 +              update_subtitle_from_text ();
                Changed (frequent);
  
        } else if (
@@@ -592,6 -569,29 +596,6 @@@ Player::set_video_container_size (libdc
                );
  }
  
 -shared_ptr<Resampler>
 -Player::resampler (shared_ptr<AudioContent> c, bool create)
 -{
 -      map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
 -      if (i != _resamplers.end ()) {
 -              return i->second;
 -      }
 -
 -      if (!create) {
 -              return shared_ptr<Resampler> ();
 -      }
 -
 -      _film->log()->log (
 -              String::compose (
 -                      "Creating new resampler for %1 to %2 with %3 channels", c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()
 -                      )
 -              );
 -      
 -      shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
 -      _resamplers[c] = r;
 -      return r;
 -}
 -
  void
  Player::emit_black ()
  {
  }
  
  void
 -Player::emit_silence (OutputAudioFrame most)
 +Player::emit_silence (DCPTime most)
  {
        if (most == 0) {
                return;
        }
        
 -      OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
 -      shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
 +      DCPTime t = min (most, TIME_HZ / 2);
 +      shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t * _film->audio_frame_rate() / TIME_HZ));
        silence->make_silent ();
        Audio (silence, _audio_position);
 -      _audio_position += _film->audio_frames_to_time (N);
 +      
 +      _audio_position += t;
  }
  
  void
@@@ -633,14 -632,26 +637,14 @@@ Player::film_changed (Film::Property p
  }
  
  void
 -Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
 +Player::update_subtitle_from_image ()
  {
 -      _in_subtitle.piece = weak_piece;
 -      _in_subtitle.image = image;
 -      _in_subtitle.rect = rect;
 -      _in_subtitle.from = from;
 -      _in_subtitle.to = to;
 -
 -      update_subtitle ();
 -}
 -
 -void
 -Player::update_subtitle ()
 -{
 -      shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
 +      shared_ptr<Piece> piece = _image_subtitle.piece.lock ();
        if (!piece) {
                return;
        }
  
 -      if (!_in_subtitle.image) {
 +      if (!_image_subtitle.subtitle->image) {
                _out_subtitle.image.reset ();
                return;
        }
        shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
        assert (sc);
  
 -      dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
 +      dcpomatic::Rect<double> in_rect = _image_subtitle.subtitle->rect;
        libdcp::Size scaled_size;
  
-       in_rect.y += sc->subtitle_offset ();
+       in_rect.x += sc->subtitle_x_offset ();
+       in_rect.y += sc->subtitle_y_offset ();
  
        /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
        scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
        _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
        _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
        
 -      _out_subtitle.image = _in_subtitle.image->scale (
 +      _out_subtitle.image = _image_subtitle.subtitle->image->scale (
                scaled_size,
                Scaler::from_id ("bicubic"),
 -              _in_subtitle.image->pixel_format (),
 +              _image_subtitle.subtitle->image->pixel_format (),
                true
                );
 -
 -      /* XXX: hack */
 -      Time from = _in_subtitle.from;
 -      Time to = _in_subtitle.to;
 -      shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (piece->content);
 -      if (vc) {
 -              from = rint (from * vc->video_frame_rate() / _film->video_frame_rate());
 -              to = rint (to * vc->video_frame_rate() / _film->video_frame_rate());
 -      }
        
-       _out_subtitle.from = _image_subtitle.subtitle->dcp_time;
-       _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to;
 -      _out_subtitle.from = from + piece->content->position ();
 -      _out_subtitle.to = to + piece->content->position ();
++      _out_subtitle.from = _image_subtitle.subtitle->dcp_time + piece->content->position ();
++      _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to + piece->content->position ();
  }
  
  /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
  bool
  Player::repeat_last_video ()
  {
 -      if (!_last_incoming_video.image || !_have_valid_pieces) {
 +      if (!_last_incoming_video.video || !_have_valid_pieces) {
                return false;
        }
  
 -      process_video (
 +      emit_video (
                _last_incoming_video.weak_piece,
 -              _last_incoming_video.image,
 -              _last_incoming_video.eyes,
 -              _last_incoming_video.same,
 -              _last_incoming_video.frame,
 -              _last_incoming_video.extra
 +              _last_incoming_video.video
                );
  
        return true;
  }
  
 +void
 +Player::update_subtitle_from_text ()
 +{
 +      if (_text_subtitle.subtitle->subs.empty ()) {
 +              _out_subtitle.image.reset ();
 +              return;
 +      }
 +
 +      render_subtitles (_text_subtitle.subtitle->subs, _video_container_size, _out_subtitle.image, _out_subtitle.position);
 +}
 +
 +void
 +Player::set_approximate_size ()
 +{
 +      _approximate_size = true;
 +}
 +                            
  PlayerImage::PlayerImage (
        shared_ptr<const Image> in,
        Crop crop,
@@@ -742,10 -750,10 +747,10 @@@ PlayerImage::set_subtitle (shared_ptr<c
  }
  
  shared_ptr<Image>
 -PlayerImage::image ()
 +PlayerImage::image (AVPixelFormat format, bool aligned)
  {
 -      shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, PIX_FMT_RGB24, false);
 -
 +      shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, format, aligned);
 +      
        Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
  
        if (_subtitle_image) {
  
        return out;
  }
 +
 +void
 +PlayerStatistics::dump (shared_ptr<Log> log) const
 +{
 +      log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
 +      log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence));
 +}
 +
 +PlayerStatistics const &
 +Player::statistics () const
 +{
 +      return _statistics;
 +}
index 48d3528e14d881265dd93466b622038c207492a1,0000000000000000000000000000000000000000..73499a5f69c384a17bc5e2ed39787afd31882af3
mode 100644,000000..100644
--- /dev/null
@@@ -1,99 -1,0 +1,100 @@@
- SubRipContent::SubRipContent (shared_ptr<const Film> film, shared_ptr<const cxml::Node> node, int)
 +/*
 +    Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
 +
 +    This program is free software; you can redistribute it and/or modify
 +    it under the terms of the GNU General Public License as published by
 +    the Free Software Foundation; either version 2 of the License, or
 +    (at your option) any later version.
 +
 +    This program is distributed in the hope that it will be useful,
 +    but WITHOUT ANY WARRANTY; without even the implied warranty of
 +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 +    GNU General Public License for more details.
 +
 +    You should have received a copy of the GNU General Public License
 +    along with this program; if not, write to the Free Software
 +    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 +
 +*/
 +
 +#include "subrip_content.h"
 +#include "util.h"
 +#include "subrip.h"
 +
 +#include "i18n.h"
 +
 +using std::stringstream;
 +using std::string;
 +using boost::shared_ptr;
 +
 +SubRipContent::SubRipContent (shared_ptr<const Film> film, boost::filesystem::path path)
 +      : Content (film, path)
 +      , SubtitleContent (film, path)
 +{
 +
 +}
 +
-       , SubtitleContent (film, node)
++SubRipContent::SubRipContent (shared_ptr<const Film> film, shared_ptr<const cxml::Node> node, int version)
 +      : Content (film, node)
-         << "_" << subtitle_offset();
++      , SubtitleContent (film, node, version)
 +{
 +
 +}
 +
 +void
 +SubRipContent::examine (boost::shared_ptr<Job> job)
 +{
 +      Content::examine (job);
 +      SubRip s (shared_from_this ());
 +      boost::mutex::scoped_lock lm (_mutex);
 +      _length = s.length ();
 +}
 +
 +string
 +SubRipContent::summary () const
 +{
 +      return path_summary() + " " + _("[subtitles]");
 +}
 +
 +string
 +SubRipContent::technical_summary () const
 +{
 +      return Content::technical_summary() + " - " + _("SubRip subtitles");
 +}
 +
 +string
 +SubRipContent::information () const
 +{
 +      
 +}
 +      
 +void
 +SubRipContent::as_xml (xmlpp::Node* node)
 +{
 +      node->add_child("Type")->add_child_text ("SubRip");
 +      Content::as_xml (node);
 +      SubtitleContent::as_xml (node);
 +}
 +
 +DCPTime
 +SubRipContent::full_length () const
 +{
 +      /* XXX: this assumes that the timing of the SubRip file is appropriate
 +         for the DCP's frame rate.
 +      */
 +      return _length;
 +}
 +
 +string
 +SubRipContent::identifier () const
 +{
 +      LocaleGuard lg;
 +
 +      stringstream s;
 +      s << Content::identifier()
 +        << "_" << subtitle_scale()
++        << "_" << subtitle_x_offset()
++        << "_" << subtitle_y_offset();
 +
 +      return s.str ();
 +}
diff --combined src/lib/transcode_job.cc
index 8820726895446e449b12090b8f7e221a45f1b619,f0e22da64483d2166bf0adfe08e37d7946df393e..289259369d028652aa2fc684f7aabc4997c30971
@@@ -63,6 -63,7 +63,7 @@@ TranscodeJob::run (
                set_state (FINISHED_OK);
  
                _film->log()->log (N_("Transcode job completed successfully"));
+               _transcoder.reset ();
  
        } catch (...) {
                set_progress (1);
@@@ -110,6 -111,6 +111,6 @@@ TranscodeJob::remaining_time () cons
        }
  
        /* Compute approximate proposed length here, as it's only here that we need it */
 -      OutputVideoFrame const left = _film->time_to_video_frames (_film->length ()) - _transcoder->video_frames_out();
 +      VideoFrame const left = _film->time_to_video_frames (_film->length ()) - _transcoder->video_frames_out();
        return left / fps;
  }
diff --combined src/wx/film_editor.cc
index f246e99e90e8f1633819af11c29839aa9ae8bbc9,061305436a3003563fe93d8f82d46f3b9ff576e6..831a57a0286c6678fc048dcfbb9987f56828a2e4
@@@ -177,7 -177,7 +177,7 @@@ FilmEditor::make_dcp_panel (
                wxSizer* s = new wxBoxSizer (wxHORIZONTAL);
                _j2k_bandwidth = new wxSpinCtrl (_dcp_panel, wxID_ANY);
                s->Add (_j2k_bandwidth, 1);
-               add_label_to_sizer (s, _dcp_panel, _("MBps"), false);
+               add_label_to_sizer (s, _dcp_panel, _("Mbit/s"), false);
                grid->Add (s, wxGBPosition (r, 1));
        }
        ++r;
@@@ -841,7 -841,7 +841,7 @@@ FilmEditor::setup_content_sensitivity (
  
        _video_panel->Enable    (video_selection.size() > 0 && _generally_sensitive);
        _audio_panel->Enable    (audio_selection.size() > 0 && _generally_sensitive);
 -      _subtitle_panel->Enable (selection.size() == 1 && dynamic_pointer_cast<FFmpegContent> (selection.front()) && _generally_sensitive);
 +      _subtitle_panel->Enable (selection.size() == 1 && dynamic_pointer_cast<SubtitleContent> (selection.front()) && _generally_sensitive);
        _timing_panel->Enable   (selection.size() == 1 && _generally_sensitive);
  }
  
@@@ -936,7 -936,7 +936,7 @@@ FilmEditor::content_timeline_clicked (
                _timeline_dialog = 0;
        }
        
 -      _timeline_dialog = new TimelineDialog (this, _film);
 +      _timeline_dialog = new DCPTimelineDialog (this, _film);
        _timeline_dialog->Show ();
  }
  
diff --combined wscript
index 571d2b0897f90774325b1bb59f758e30aaa3b64e,1bf81bfb4214fed76153f18b2c4537c0a47e7e0d..6c97047fcdf763ef7fceb72e424928d692801159
+++ b/wscript
@@@ -3,7 -3,7 +3,7 @@@ import o
  import sys
  
  APPNAME = 'dcpomatic'
- VERSION = '1.63.5devel'
+ VERSION = '1.64.0devel'
  
  def options(opt):
      opt.load('compiler_cxx')
@@@ -145,8 -145,6 +145,8 @@@ def configure(conf)
      conf.check_cfg(package='libxml++-2.6', args='--cflags --libs', uselib_store='XML++', mandatory=True)
      conf.check_cfg(package='libcurl', args='--cflags --libs', uselib_store='CURL', mandatory=True)
      conf.check_cfg(package='libzip', args='--cflags --libs', uselib_store='ZIP', mandatory=True)
 +    conf.check_cfg(package='pangomm-1.4', args='--cflags --libs', uselib_store='PANGOMM', mandatory=True)
 +    conf.check_cfg(package='cairomm-1.0', args='--cflags --libs', uselib_store='CAIROMM', mandatory=True)
  
      conf.check_cxx(fragment="""
                              #include <boost/version.hpp>\n