Merge master.
authorCarl Hetherington <cth@carlh.net>
Tue, 20 May 2014 12:23:26 +0000 (13:23 +0100)
committerCarl Hetherington <cth@carlh.net>
Tue, 20 May 2014 12:23:26 +0000 (13:23 +0100)
28 files changed:
1  2 
ChangeLog
src/lib/colour_conversion.cc
src/lib/content_video.h
src/lib/dcp_video_frame.cc
src/lib/dcp_video_frame.h
src/lib/encoder.cc
src/lib/encoder.h
src/lib/ffmpeg_decoder.cc
src/lib/image.cc
src/lib/image.h
src/lib/image_decoder.cc
src/lib/image_decoder.h
src/lib/image_proxy.cc
src/lib/player.cc
src/lib/player.h
src/lib/player_video_frame.cc
src/lib/player_video_frame.h
src/lib/server.cc
src/lib/transcoder.cc
src/lib/types.h
src/lib/video_content.cc
src/lib/video_decoder.cc
src/lib/video_decoder.h
src/lib/wscript
src/tools/server_test.cc
src/wx/film_viewer.cc
src/wx/film_viewer.h
test/client_server_test.cc

diff --cc ChangeLog
index 33b7e2e2122049f2fdb9bc98245bce6f6b3d9c6b,da42e7ba1e65b1873ce39b9549c8e396895d10cd..6f31dd5d24fbcd839fa07968a0e32c77d0e27977
+++ b/ChangeLog
@@@ -1,7 -1,12 +1,16 @@@
 +2014-03-07  Carl Hetherington  <cth@carlh.net>
 +
 +      * Add subtitle view.
 +
+ 2014-05-19  Carl Hetherington  <cth@carlh.net>
+       * Version 1.69.9 released.
+ 2014-05-19  Carl Hetherington  <cth@carlh.net>
+       * Decode image sources in the multi-threaded part
+       of the transcoder, rather than the single-threaded.
  2014-05-16  Carl Hetherington  <cth@carlh.net>
  
        * Version 1.69.8 released.
Simple merge
index 20b5b8dec310a066eeb17e8c4a273ec32a826f2e,0000000000000000000000000000000000000000..a7f73597c866495b0df60a2f3036bd8066d0f1f0
mode 100644,000000..100644
--- /dev/null
@@@ -1,46 -1,0 +1,48 @@@
- class Image;
 +/*
 +    Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
 +
 +    This program is free software; you can redistribute it and/or modify
 +    it under the terms of the GNU General Public License as published by
 +    the Free Software Foundation; either version 2 of the License, or
 +    (at your option) any later version.
 +
 +    This program is distributed in the hope that it will be useful,
 +    but WITHOUT ANY WARRANTY; without even the implied warranty of
 +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 +    GNU General Public License for more details.
 +
 +    You should have received a copy of the GNU General Public License
 +    along with this program; if not, write to the Free Software
 +    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 +
 +*/
 +
 +#ifndef DCPOMATIC_CONTENT_VIDEO_H
 +#define DCPOMATIC_CONTENT_VIDEO_H
 +
-       ContentVideo (boost::shared_ptr<const Image> i, Eyes e, VideoFrame f)
++class ImageProxy;
 +
 +/** @class ContentVideo
 + *  @brief A frame of video straight out of some content.
 + */
 +class ContentVideo
 +{
 +public:
 +      ContentVideo ()
 +              : eyes (EYES_BOTH)
 +      {}
 +
-       boost::shared_ptr<const Image> image;
++      ContentVideo (boost::shared_ptr<const ImageProxy> i, Eyes e, Part p, VideoFrame f)
 +              : image (i)
 +              , eyes (e)
++              , part (p)
 +              , frame (f)
 +      {}
 +      
++      boost::shared_ptr<const ImageProxy> image;
 +      Eyes eyes;
++      Part part;
 +      VideoFrame frame;
 +};
 +
 +#endif
index d860c319542bd52296ea0ec37f949d11e64d06dd,5cd6a118e6256987646734b0a2d47b4187872ac3..d154ba96b074298824da3713de768e892e6e5ed2
  #include <boost/array.hpp>
  #include <boost/asio.hpp>
  #include <boost/filesystem.hpp>
 -#include <libdcp/rec709_linearised_gamma_lut.h>
 -#include <libdcp/srgb_linearised_gamma_lut.h>
 -#include <libdcp/gamma_lut.h>
 -#include <libdcp/xyz_frame.h>
 -#include <libdcp/rgb_xyz.h>
 -#include <libdcp/colour_matrix.h>
 -#include <libdcp/raw_convert.h>
 +#include <boost/lexical_cast.hpp>
+ #include <openssl/md5.h>
 +#include <dcp/gamma_lut.h>
 +#include <dcp/xyz_frame.h>
 +#include <dcp/rgb_xyz.h>
 +#include <dcp/colour_matrix.h>
 +#include <dcp/raw_convert.h>
  #include <libcxml/cxml.h>
  #include "film.h"
  #include "dcp_video_frame.h"
@@@ -120,28 -109,47 +109,44 @@@ DCPVideoFrame::DCPVideoFrame (shared_pt
  shared_ptr<EncodedData>
  DCPVideoFrame::encode_locally ()
  {
-       shared_ptr<dcp::GammaLUT> in_lut;
-       in_lut = dcp::GammaLUT::cache.get (12, _conversion.input_gamma, _conversion.input_gamma_linearised);
 -      shared_ptr<libdcp::LUT> in_lut;
 -      if (_frame->colour_conversion().input_gamma_linearised) {
 -              in_lut = libdcp::SRGBLinearisedGammaLUT::cache.get (12, _frame->colour_conversion().input_gamma);
 -      } else {
 -              in_lut = libdcp::GammaLUT::cache.get (12, _frame->colour_conversion().input_gamma);
 -      }
--
++      shared_ptr<dcp::GammaLUT> in_lut = dcp::GammaLUT::cache.get (
++              12, _frame->colour_conversion().input_gamma, _frame->colour_conversion().input_gamma_linearised
++              );
++      
        /* XXX: libdcp should probably use boost */
        
        double matrix[3][3];
        for (int i = 0; i < 3; ++i) {
                for (int j = 0; j < 3; ++j) {
-                       matrix[i][j] = _conversion.matrix (i, j);
+                       matrix[i][j] = _frame->colour_conversion().matrix (i, j);
                }
        }
-       
 -      shared_ptr<libdcp::XYZFrame> xyz = libdcp::rgb_to_xyz (
 +      shared_ptr<dcp::XYZFrame> xyz = dcp::rgb_to_xyz (
-               _image,
+               _frame->image(),
                in_lut,
-               dcp::GammaLUT::cache.get (16, 1 / _conversion.output_gamma, false),
 -              libdcp::GammaLUT::cache.get (16, 1 / _frame->colour_conversion().output_gamma),
++              dcp::GammaLUT::cache.get (16, 1 / _frame->colour_conversion().output_gamma, false),
                matrix
                );
+       {
+               MD5_CTX md5_context;
+               MD5_Init (&md5_context);
+               MD5_Update (&md5_context, xyz->data(0), 1998 * 1080 * 4);
+               MD5_Update (&md5_context, xyz->data(1), 1998 * 1080 * 4);
+               MD5_Update (&md5_context, xyz->data(2), 1998 * 1080 * 4);
+               unsigned char digest[MD5_DIGEST_LENGTH];
+               MD5_Final (digest, &md5_context);
                
+               stringstream s;
+               for (int i = 0; i < MD5_DIGEST_LENGTH; ++i) {
+                       s << std::hex << std::setfill('0') << std::setw(2) << ((int) digest[i]);
+               }
+       }
        /* Set the max image and component sizes based on frame_rate */
        int max_cs_len = ((float) _j2k_bandwidth) / 8 / _frames_per_second;
-       if (_eyes == EYES_LEFT || _eyes == EYES_RIGHT) {
+       if (_frame->eyes() == EYES_LEFT || _frame->eyes() == EYES_RIGHT) {
                /* In 3D we have only half the normal bandwidth per eye */
                max_cs_len /= 2;
        }
index c51a3f02ba615c0aebdff6530577b2abe71f711f,e4006d98683af8c9ff58e9d0eafc9040686d9c56..7393efde699d1a4ad1f6560c50a102dddcb0eb8e
@@@ -100,8 -103,8 +101,8 @@@ public
  class DCPVideoFrame : public boost::noncopyable
  {
  public:
-       DCPVideoFrame (boost::shared_ptr<const Image>, int, Eyes, ColourConversion, int, int, Resolution, boost::shared_ptr<Log>);
-       DCPVideoFrame (boost::shared_ptr<const Image>, cxml::ConstNodePtr, boost::shared_ptr<Log>);
+       DCPVideoFrame (boost::shared_ptr<const PlayerVideoFrame>, int, int, int, Resolution, boost::shared_ptr<Log>);
 -      DCPVideoFrame (boost::shared_ptr<const PlayerVideoFrame>, boost::shared_ptr<const cxml::Node>, boost::shared_ptr<Log>);
++      DCPVideoFrame (boost::shared_ptr<const PlayerVideoFrame>, cxml::ConstNodePtr, boost::shared_ptr<Log>);
  
        boost::shared_ptr<EncodedData> encode_locally ();
        boost::shared_ptr<EncodedData> encode_remotely (ServerDescription);
index b83cbc10a71a46ad434615d2bc83a0d8e303e384,4fc2d7f81c152b760d00b0a8e0db79c2dd307c0c..2364b67a7ceccb5a1ced874efad51b83228fdcaa
@@@ -178,7 -180,7 +178,7 @@@ Encoder::frame_done (
  }
  
  void
- Encoder::process_video (shared_ptr<DCPVideo> frame)
 -Encoder::process_video (shared_ptr<PlayerVideoFrame> pvf, bool same)
++Encoder::process_video (shared_ptr<PlayerVideoFrame> pvf)
  {
        _waker.nudge ();
        
        rethrow ();
  
        if (_writer->can_fake_write (_video_frames_out)) {
-               _writer->fake_write (_video_frames_out, frame->eyes ());
+               _writer->fake_write (_video_frames_out, pvf->eyes ());
 -              _have_a_real_frame[pvf->eyes()] = false;
 -              frame_done ();
 -      } else if (same && _have_a_real_frame[pvf->eyes()]) {
 -              /* Use the last frame that we encoded. */
 -              _writer->repeat (_video_frames_out, pvf->eyes());
                frame_done ();
        } else {
                /* Queue this new frame for encoding */
                TIMING ("adding to queue of %1", _queue.size ());
                _queue.push_back (shared_ptr<DCPVideoFrame> (
                                          new DCPVideoFrame (
-                                                 frame->image(PIX_FMT_RGB24, false),
 -                                                pvf, _video_frames_out, _film->video_frame_rate(),
 -                                                _film->j2k_bandwidth(), _film->resolution(), _film->log()
++                                                pvf,
 +                                                _video_frames_out,
-                                                 frame->eyes(),
-                                                 frame->conversion(),
 +                                                _film->video_frame_rate(),
 +                                                _film->j2k_bandwidth(),
 +                                                _film->resolution(),
 +                                                _film->log()
                                                  )
                                          ));
                
                _condition.notify_all ();
 -              _have_a_real_frame[pvf->eyes()] = true;
        }
  
-       if (frame->eyes() != EYES_LEFT) {
+       if (pvf->eyes() != EYES_LEFT) {
                ++_video_frames_out;
        }
  }
index 6c465f816773d12635f1b11dd020a807c7a1b756,a8ee220aaac8cdea58ec3f0cd7c493dddeff93c0..ac1d74c57b39af3aa449e43fd628651108701823
@@@ -67,9 -67,10 +67,9 @@@ public
        void process_begin ();
  
        /** Call with a frame of video.
 -       *  @param pvf Video frame image.
 -       *  @param same true if pvf is the same as the last time we were called.
 +       *  @param f Video frame.
         */
-       void process_video (boost::shared_ptr<DCPVideo> f);
 -      void process_video (boost::shared_ptr<PlayerVideoFrame> pvf, bool same);
++      void process_video (boost::shared_ptr<PlayerVideoFrame> f);
  
        /** Call with some audio data */
        void process_audio (boost::shared_ptr<const AudioBuffers>);
index 9ae5f0485246d2f88b42432adcfcf479a1eb711a,7a5bf8ba832f51a538ea57775147ba20296a3c06..d251a37446ff96e2e49d514575b7719e53f44b6c
@@@ -483,10 -473,49 +484,10 @@@ FFmpegDecoder::decode_video_packet (
                shared_ptr<Image> image = i->first;
                
                if (i->second != AV_NOPTS_VALUE) {
 -
 -                      double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset;
 -
 -                      if (_just_sought) {
 -                              /* We just did a seek, so disable any attempts to correct for where we
 -                                 are / should be.
 -                              */
 -                              _video_position = rint (pts * _ffmpeg_content->video_frame_rate ());
 -                              _just_sought = false;
 -                      }
 -
 -                      double const next = _video_position / _ffmpeg_content->video_frame_rate();
 -                      double const one_frame = 1 / _ffmpeg_content->video_frame_rate ();
 -                      double delta = pts - next;
 -
 -                      while (delta > one_frame) {
 -                              /* This PTS is more than one frame forward in time of where we think we should be; emit
 -                                 a black frame.
 -                              */
 -
 -                              /* XXX: I think this should be a copy of the last frame... */
 -                              boost::shared_ptr<Image> black (
 -                                      new Image (
 -                                              static_cast<AVPixelFormat> (_frame->format),
 -                                              libdcp::Size (video_codec_context()->width, video_codec_context()->height),
 -                                              true
 -                                              )
 -                                      );
 -                              
 -                              black->make_black ();
 -                              video (shared_ptr<ImageProxy> (new RawImageProxy (image)), false, _video_position);
 -                              delta -= one_frame;
 -                      }
 -
 -                      if (delta > -one_frame) {
 -                              /* This PTS is within a frame of being right; emit this (otherwise it will be dropped) */
 -                              video (shared_ptr<ImageProxy> (new RawImageProxy (image)), false, _video_position);
 -                      }
 -                              
 +                      double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds ();
-                       video (image, rint (pts * _ffmpeg_content->video_frame_rate ()));
++                      video (shared_ptr<ImageProxy> (new RawImageProxy (image)), rint (pts * _ffmpeg_content->video_frame_rate ()));
                } else {
 -                      shared_ptr<const Film> film = _film.lock ();
 -                      assert (film);
 -                      film->log()->log ("Dropping frame without PTS");
 +                      _log->log ("Dropping frame without PTS");
                }
        }
  
index 432cfbd54b9279e0b2631cc417b511066dc2ab2f,1fa55e24209ed099ba43bf03cda8693c13e57067..d4ec6f99a6381070835b2fd54a875f414e5d7f87
@@@ -39,9 -38,9 +40,10 @@@ using std::string
  using std::min;
  using std::cout;
  using std::cerr;
 +using std::list;
+ using std::stringstream;
  using boost::shared_ptr;
 -using libdcp::Size;
 +using dcp::Size;
  
  int
  Image::line_factor (int n) const
@@@ -642,23 -621,24 +644,44 @@@ Image::aligned () cons
        return _aligned;
  }
  
 -      
 +PositionImage
 +merge (list<PositionImage> images)
 +{
 +      if (images.empty ()) {
 +              return PositionImage ();
 +      }
 +
 +      dcpomatic::Rect<int> all (images.front().position, images.front().image->size().width, images.front().image->size().height);
 +      for (list<PositionImage>::const_iterator i = images.begin(); i != images.end(); ++i) {
 +              all.extend (dcpomatic::Rect<int> (i->position, i->image->size().width, i->image->size().height));
 +      }
 +
 +      shared_ptr<Image> merged (new Image (images.front().image->pixel_format (), dcp::Size (all.width, all.height), true));
 +      merged->make_transparent ();
 +      for (list<PositionImage>::const_iterator i = images.begin(); i != images.end(); ++i) {
 +              merged->alpha_blend (i->image, i->position);
 +      }
 +
 +      return PositionImage (merged, all.position ());
 +}
++
+ string
+ Image::digest () const
+ {
+       MD5_CTX md5_context;
+       MD5_Init (&md5_context);
+       for (int i = 0; i < components(); ++i) {
+               MD5_Update (&md5_context, data()[i], line_size()[i]);
+       }
+       
+       unsigned char digest[MD5_DIGEST_LENGTH];
+       MD5_Final (digest, &md5_context);
+       
+       stringstream s;
+       for (int i = 0; i < MD5_DIGEST_LENGTH; ++i) {
+               s << std::hex << std::setfill('0') << std::setw(2) << ((int) digest[i]);
+       }
+       return s.str ();
+ }
diff --cc src/lib/image.h
Simple merge
index 5de0c8582facb1aca80ed8d03479bdf2928331d4,d33b64cd446086a96c092bf9c9e64e6b8292ee7a..9f83d1d896d1444579bcd157b214b389468b9f30
@@@ -39,53 -41,20 +40,21 @@@ ImageDecoder::ImageDecoder (shared_ptr<
  
  }
  
 -void
 +bool
  ImageDecoder::pass ()
  {
 -      if (_video_position >= _image_content->video_length ()) {
 -              return;
 +      if (_video_position >= _image_content->video_length().frames (_image_content->video_frame_rate ())) {
 +              return true;
        }
  
--      if (_image && _image_content->still ()) {
-               video (_image, _video_position);
-               ++_video_position;
-               return false;
 -              video (_image, true, _video_position);
 -              return;
++      if (!_image_content->still() || !_image) {
++              /* Either we need an image or we are using moving images, so load one */
++              _image.reset (new MagickImageProxy (_image_content->path (_image_content->still() ? 0 : _video_position)));
        }
-       Magick::Image* magick_image = 0;
-       boost::filesystem::path const path = _image_content->path (_image_content->still() ? 0 : _video_position);
-       
-       try {
-               magick_image = new Magick::Image (path.string ());
-       } catch (...) {
-               throw OpenFileError (path);
-       }
-       
-       dcp::Size size (magick_image->columns(), magick_image->rows());
-       _image.reset (new Image (PIX_FMT_RGB24, size, true));
-       using namespace MagickCore;
-       
-       uint8_t* p = _image->data()[0];
-       for (int y = 0; y < size.height; ++y) {
-               uint8_t* q = p;
-               for (int x = 0; x < size.width; ++x) {
-                       Magick::Color c = magick_image->pixelColor (x, y);
-                       *q++ = c.redQuantum() * 255 / QuantumRange;
-                       *q++ = c.greenQuantum() * 255 / QuantumRange;
-                       *q++ = c.blueQuantum() * 255 / QuantumRange;
-               }
-               p += _image->stride()[0];
-       }
-       delete magick_image;
--
 -      _image.reset (new MagickImageProxy (_image_content->path (_image_content->still() ? 0 : _video_position)));
 -      video (_image, false, _video_position);
++              
 +      video (_image, _video_position);
 +      ++_video_position;
 +      return false;
  }
  
  void
index 8d88df3de44eda5536e97e9a02e5560e32084c80,5b82dd85c161eedd07ccfb0c09cdc90c6dc96708..242f69477826a499d915505cd0b9486808aba68d
@@@ -34,13 -34,14 +34,13 @@@ public
                return _image_content;
        }
  
 -      /* Decoder */
 -
 -      void pass ();
 -      void seek (VideoContent::Frame, bool);
 -      bool done () const;
 +      void seek (ContentTime, bool);
  
  private:
 +      bool pass ();
 +      
        boost::shared_ptr<const ImageContent> _image_content;
-       boost::shared_ptr<Image> _image;
+       boost::shared_ptr<ImageProxy> _image;
 +      VideoFrame _video_position;
  };
  
index 0000000000000000000000000000000000000000,47ac5d3728eee1bc249d680d8b0aa94706d190fb..c74e846c99680b1e0e3f4ab0319ad17c109f1c9a
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,161 +1,161 @@@
 -#include <libdcp/util.h>
 -#include <libdcp/raw_convert.h>
+ /*
+     Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation; either version 2 of the License, or
+     (at your option) any later version.
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+     You should have received a copy of the GNU General Public License
+     along with this program; if not, write to the Free Software
+     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+ #include <Magick++.h>
 -      libdcp::Size size (
++#include <dcp/util.h>
++#include <dcp/raw_convert.h>
+ #include "image_proxy.h"
+ #include "image.h"
+ #include "exceptions.h"
+ #include "cross.h"
+ #include "i18n.h"
+ using std::cout;
+ using std::string;
+ using std::stringstream;
+ using boost::shared_ptr;
+ RawImageProxy::RawImageProxy (shared_ptr<Image> image)
+       : _image (image)
+ {
+ }
+ RawImageProxy::RawImageProxy (shared_ptr<cxml::Node> xml, shared_ptr<Socket> socket)
+ {
 -      node->add_child("Width")->add_child_text (libdcp::raw_convert<string> (_image->size().width));
 -      node->add_child("Height")->add_child_text (libdcp::raw_convert<string> (_image->size().height));
++      dcp::Size size (
+               xml->number_child<int> ("Width"), xml->number_child<int> ("Height")
+               );
+       _image.reset (new Image (PIX_FMT_RGB24, size, true));
+       _image->read_from_socket (socket);
+ }
+ shared_ptr<Image>
+ RawImageProxy::image () const
+ {
+       return _image;
+ }
+ void
+ RawImageProxy::add_metadata (xmlpp::Node* node) const
+ {
+       node->add_child("Type")->add_child_text (N_("Raw"));
 -      libdcp::Size size (magick_image->columns(), magick_image->rows());
++      node->add_child("Width")->add_child_text (dcp::raw_convert<string> (_image->size().width));
++      node->add_child("Height")->add_child_text (dcp::raw_convert<string> (_image->size().height));
+ }
+ void
+ RawImageProxy::send_binary (shared_ptr<Socket> socket) const
+ {
+       _image->write_to_socket (socket);
+ }
+ MagickImageProxy::MagickImageProxy (boost::filesystem::path path)
+ {
+       /* Read the file into a Blob */
+       
+       boost::uintmax_t const size = boost::filesystem::file_size (path);
+       FILE* f = fopen_boost (path, "rb");
+       if (!f) {
+               throw OpenFileError (path);
+       }
+               
+       uint8_t* data = new uint8_t[size];
+       if (fread (data, 1, size, f) != size) {
+               delete[] data;
+               throw ReadFileError (path);
+       }
+       
+       fclose (f);
+       _blob.update (data, size);
+       delete[] data;
+ }
+ MagickImageProxy::MagickImageProxy (shared_ptr<cxml::Node>, shared_ptr<Socket> socket)
+ {
+       uint32_t const size = socket->read_uint32 ();
+       uint8_t* data = new uint8_t[size];
+       socket->read (data, size);
+       _blob.update (data, size);
+       delete[] data;
+ }
+ shared_ptr<Image>
+ MagickImageProxy::image () const
+ {
+       if (_image) {
+               return _image;
+       }
+       Magick::Image* magick_image = 0;
+       try {
+               magick_image = new Magick::Image (_blob);
+       } catch (...) {
+               throw DecodeError (_("Could not decode image file"));
+       }
++      dcp::Size size (magick_image->columns(), magick_image->rows());
+       _image.reset (new Image (PIX_FMT_RGB24, size, true));
+       using namespace MagickCore;
+       
+       uint8_t* p = _image->data()[0];
+       for (int y = 0; y < size.height; ++y) {
+               uint8_t* q = p;
+               for (int x = 0; x < size.width; ++x) {
+                       Magick::Color c = magick_image->pixelColor (x, y);
+                       *q++ = c.redQuantum() * 255 / QuantumRange;
+                       *q++ = c.greenQuantum() * 255 / QuantumRange;
+                       *q++ = c.blueQuantum() * 255 / QuantumRange;
+               }
+               p += _image->stride()[0];
+       }
+       delete magick_image;
+       return _image;
+ }
+ void
+ MagickImageProxy::add_metadata (xmlpp::Node* node) const
+ {
+       node->add_child("Type")->add_child_text (N_("Magick"));
+ }
+ void
+ MagickImageProxy::send_binary (shared_ptr<Socket> socket) const
+ {
+       socket->write (_blob.length ());
+       socket->write ((uint8_t *) _blob.data (), _blob.length ());
+ }
+ shared_ptr<ImageProxy>
+ image_proxy_factory (shared_ptr<cxml::Node> xml, shared_ptr<Socket> socket)
+ {
+       if (xml->string_child("Type") == N_("Raw")) {
+               return shared_ptr<ImageProxy> (new RawImageProxy (xml, socket));
+       } else if (xml->string_child("Type") == N_("Magick")) {
+               return shared_ptr<MagickImageProxy> (new MagickImageProxy (xml, socket));
+       }
+       throw NetworkError (_("Unexpected image type received by server"));
+ }
index 75b5500936f9bf56fd7cb5a6bcd41ce61983bd2f,9f0f380e377c253eb1944e6d268fd7183155450b..ab0d8f3566ecb484ec491ba6314ff9cbbdd5f24b
  #include "playlist.h"
  #include "job.h"
  #include "image.h"
+ #include "image_proxy.h"
  #include "ratio.h"
 -#include "resampler.h"
  #include "log.h"
  #include "scaler.h"
- #include "dcp_video.h"
 +#include "render_subtitles.h"
 +#include "config.h"
 +#include "content_video.h"
+ #include "player_video_frame.h"
  
  using std::list;
  using std::cout;
@@@ -282,167 -340,79 +283,167 @@@ Player::process_content_text_subtitles 
  }
  
  void
 -Player::flush ()
 +Player::set_approximate_size ()
  {
 -      TimedAudioBuffers<Time> tb = _audio_merger.flush ();
 -      if (_audio && tb.audio) {
 -              Audio (tb.audio, tb.time);
 -              _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
 -      }
 +      _approximate_size = true;
 +}
  
- shared_ptr<DCPVideo>
- Player::black_dcp_video (DCPTime time) const
 -      while (_video && _video_position < _audio_position) {
 -              emit_black ();
 -      }
++shared_ptr<PlayerVideoFrame>
++Player::black_player_video_frame () const
 +{
-       return shared_ptr<DCPVideo> (
-               new DCPVideo (
-                       _black_image,
-                       EYES_BOTH,
++      return shared_ptr<PlayerVideoFrame> (
++              new PlayerVideoFrame (
++                      shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
 +                      Crop (),
 +                      _video_container_size,
 +                      _video_container_size,
 +                      Scaler::from_id ("bicubic"),
-                       Config::instance()->colour_conversions().front().conversion,
-                       time
++                      EYES_BOTH,
++                      PART_WHOLE,
++                      Config::instance()->colour_conversions().front().conversion
 +              )
 +      );
 +}
  
- shared_ptr<DCPVideo>
- Player::content_to_dcp (
 -      while (_audio && _audio_position < _video_position) {
 -              emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
++shared_ptr<PlayerVideoFrame>
++Player::content_to_player_video_frame (
 +      shared_ptr<VideoContent> content,
 +      ContentVideo content_video,
 +      list<shared_ptr<Piece> > subs,
 +      DCPTime time,
 +      dcp::Size image_size) const
 +{
-       shared_ptr<DCPVideo> dcp_video (
-               new DCPVideo (
++      shared_ptr<PlayerVideoFrame> pvf (
++              new PlayerVideoFrame (
 +                      content_video.image,
-                       content_video.eyes,
 +                      content->crop (),
 +                      image_size,
 +                      _video_container_size,
 +                      _film->scaler(),
-                       content->colour_conversion (),
-                       time
++                      content_video.eyes,
++                      content_video.part,
++                      content->colour_conversion ()
 +                      )
 +              );
 +      
 +      
 +      /* Add subtitles */
 +      
 +      list<PositionImage> sub_images;
 +      
 +      for (list<shared_ptr<Piece> >::const_iterator i = subs.begin(); i != subs.end(); ++i) {
 +              shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*i)->decoder);
 +              shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*i)->content);
 +              ContentTime const from = dcp_to_content_subtitle (*i, time);
 +              ContentTime const to = from + ContentTime::from_frames (1, content->video_frame_rate ());
 +              
 +              list<shared_ptr<ContentImageSubtitle> > image_subtitles = subtitle_decoder->get_image_subtitles (from, to);
 +              if (!image_subtitles.empty ()) {
 +                      list<PositionImage> im = process_content_image_subtitles (
 +                              subtitle_content,
 +                              image_subtitles
 +                              );
 +                      
 +                      copy (im.begin(), im.end(), back_inserter (sub_images));
 +              }
 +              
 +              if (_burn_subtitles) {
 +                      list<shared_ptr<ContentTextSubtitle> > text_subtitles = subtitle_decoder->get_text_subtitles (from, to);
 +                      if (!text_subtitles.empty ()) {
 +                              list<PositionImage> im = process_content_text_subtitles (text_subtitles);
 +                              copy (im.begin(), im.end(), back_inserter (sub_images));
 +                      }
 +              }
        }
        
-               dcp_video->set_subtitle (merge (sub_images));
 +      if (!sub_images.empty ()) {
-       return dcp_video;
++              pvf->set_subtitle (merge (sub_images));
 +      }
 +
++      return pvf;
  }
  
- /** @return All DCPVideo at the given time (there may be two frames for 3D) */
- list<shared_ptr<DCPVideo> >
 -/** Seek so that the next pass() will yield (approximately) the requested frame.
 - *  Pass accurate = true to try harder to get close to the request.
 - *  @return true on error
 - */
 -void
 -Player::seek (Time t, bool accurate)
++/** @return All PlayerVideoFrames at the given time (there may be two frames for 3D) */
++list<shared_ptr<PlayerVideoFrame> >
 +Player::get_video (DCPTime time, bool accurate)
  {
        if (!_have_valid_pieces) {
                setup_pieces ();
        }
 +      
 +      list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
 +              time,
 +              time + DCPTime::from_frames (1, _film->video_frame_rate ())
 +              );
  
-       list<shared_ptr<DCPVideo> > dcp_video;
 -      if (_pieces.empty ()) {
 -              return;
++      list<shared_ptr<PlayerVideoFrame> > pvf;
 +              
 +      if (ov.empty ()) {
 +              /* No video content at this time */
-               dcp_video.push_back (black_dcp_video (time));
-               return dcp_video;
++              pvf.push_back (black_player_video_frame ());
++              return pvf;
        }
  
-       /* Create a DCPVideo from the content's video at this time */
 -      for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
 -              shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
 -              if (!vc) {
 -                      continue;
 -              }
 -
 -              /* s is the offset of t from the start position of this content */
 -              Time s = t - vc->position ();
 -              s = max (static_cast<Time> (0), s);
 -              s = min (vc->length_after_trim(), s);
++      /* Create a PlayerVideoFrame from the content's video at this time */
  
 -              /* Hence set the piece positions to the `global' time */
 -              (*i)->video_position = (*i)->audio_position = vc->position() + s;
 -
 -              /* And seek the decoder */
 -              dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (
 -                      vc->time_to_content_video_frames (s + vc->trim_start ()), accurate
 -                      );
 +      shared_ptr<Piece> piece = ov.back ();
 +      shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
 +      assert (decoder);
 +      shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
 +      assert (content);
  
 -              (*i)->reset_repeat ();
 +      list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
 +      if (content_video.empty ()) {
-               dcp_video.push_back (black_dcp_video (time));
-               return dcp_video;
++              pvf.push_back (black_player_video_frame ());
++              return pvf;
        }
  
 -      _video_position = _audio_position = t;
 +      dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
 +      if (_approximate_size) {
 +              image_size.width &= ~3;
 +              image_size.height &= ~3;
 +      }
  
 -      /* XXX: don't seek audio because we don't need to... */
 +      for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
 +              list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (
 +                      time,
 +                      time + DCPTime::from_frames (1, _film->video_frame_rate ())
 +                      );
 +              
-               dcp_video.push_back (content_to_dcp (content, *i, subs, time, image_size));
++              pvf.push_back (content_to_player_video_frame (content, *i, subs, time, image_size));
 +      }
 +              
-       return dcp_video;
++      return pvf;
  }
  
 -void
 -Player::setup_pieces ()
 +shared_ptr<AudioBuffers>
 +Player::get_audio (DCPTime time, DCPTime length, bool accurate)
  {
 -      list<shared_ptr<Piece> > old_pieces = _pieces;
 +      if (!_have_valid_pieces) {
 +              setup_pieces ();
 +      }
  
 -      _pieces.clear ();
 +      AudioFrame const length_frames = length.frames (_film->audio_frame_rate ());
  
 -      ContentList content = _playlist->content ();
 -      sort (content.begin(), content.end(), ContentSorter ());
 +      shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
 +      audio->make_silent ();
 +      
 +      list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
 +      if (ov.empty ()) {
 +              return audio;
 +      }
  
 -      for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
 +      for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
  
 -              if (!(*i)->paths_valid ()) {
 +              shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
 +              assert (content);
 +              shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
 +              assert (decoder);
 +
 +              if (content->audio_frame_rate() == 0) {
 +                      /* This AudioContent has no audio (e.g. if it is an FFmpegContent with no
 +                       * audio stream).
 +                       */
                        continue;
                }
  
index e47cf53a1f00df21baed32027d65c50d675cee47,bf6260c0a912260d71a7203a0a6f845e20182db2..a96c93404e171276b7a00e0e3fdb44cb048bfba2
@@@ -40,83 -38,42 +40,62 @@@ class Playlist
  class AudioContent;
  class Piece;
  class Image;
- class DCPVideo;
 +class Decoder;
+ class Resampler;
+ class PlayerVideoFrame;
+ class ImageProxy;
+  
 +class PlayerStatistics
 +{
 +public:
 +      struct Video {
 +              Video ()
 +                      : black (0)
 +                      , repeat (0)
 +                      , good (0)
 +                      , skip (0)
 +              {}
 +              
 +              int black;
 +              int repeat;
 +              int good;
 +              int skip;
 +      } video;
 +
 +      struct Audio {
 +              Audio ()
 +                      : silence (0)
 +                      , good (0)
 +                      , skip (0)
 +              {}
 +              
 +              DCPTime silence;
 +              int64_t good;
 +              int64_t skip;
 +      } audio;
 +
 +      void dump (boost::shared_ptr<Log>) const;
 +};
 +
- /** @class PlayerImage
-  *  @brief A wrapper for an Image which contains some pending operations; these may
-  *  not be necessary if the receiver of the PlayerImage throws it away.
-  */
- class PlayerImage
- {
- public:
-       PlayerImage (boost::shared_ptr<const Image>, Crop, dcp::Size, dcp::Size, Scaler const *);
-       void set_subtitle (boost::shared_ptr<const Image>, Position<int>);
-       
-       boost::shared_ptr<Image> image ();
-       
- private:
-       boost::shared_ptr<const Image> _in;
-       Crop _crop;
-       dcp::Size _inter_size;
-       dcp::Size _out_size;
-       Scaler const * _scaler;
-       boost::shared_ptr<const Image> _subtitle_image;
-       Position<int> _subtitle_position;
- };
  /** @class Player
 - *  @brief A class which can `play' a Playlist; emitting its audio and video.
 + *  @brief A class which can `play' a Playlist.
   */
  class Player : public boost::enable_shared_from_this<Player>, public boost::noncopyable
  {
  public:
        Player (boost::shared_ptr<const Film>, boost::shared_ptr<const Playlist>);
  
-       std::list<boost::shared_ptr<DCPVideo> > get_video (DCPTime time, bool accurate);
 -      void disable_video ();
 -      void disable_audio ();
 -
 -      bool pass ();
 -      void seek (Time, bool);
++      std::list<boost::shared_ptr<PlayerVideoFrame> > get_video (DCPTime time, bool accurate);
 +      boost::shared_ptr<AudioBuffers> get_audio (DCPTime time, DCPTime length, bool accurate);
  
 -      Time video_position () const {
 -              return _video_position;
 +      void set_video_container_size (dcp::Size);
 +      void set_approximate_size ();
 +      void set_burn_subtitles (bool burn) {
 +              _burn_subtitles = burn;
        }
  
 -      void set_video_container_size (libdcp::Size);
 -
 -      bool repeat_last_video ();
 -
 -      /** Emitted when a video frame is ready.
 -       *  First parameter is the video image.
 -       *  Second parameter is true if the frame is the same as the last one that was emitted.
 -       *  Third parameter is the time.
 -       */
 -      boost::signals2::signal<void (boost::shared_ptr<PlayerVideoFrame>, bool, Time)> Video;
 +      PlayerStatistics const & statistics () const;
        
 -      /** Emitted when some audio data is ready */
 -      boost::signals2::signal<void (boost::shared_ptr<const AudioBuffers>, Time)> Audio;
 -
        /** Emitted when something has changed such that if we went back and emitted
         *  the last frame again it would look different.  This is not emitted after
         *  a seek.
@@@ -133,50 -89,22 +112,50 @@@ private
        void setup_pieces ();
        void playlist_changed ();
        void content_changed (boost::weak_ptr<Content>, int, bool);
 -      void do_seek (Time, bool);
        void flush ();
 -      void emit_black ();
 -      void emit_silence (OutputAudioFrame);
 -      boost::shared_ptr<Resampler> resampler (boost::shared_ptr<AudioContent>, bool);
        void film_changed (Film::Property);
 -      void update_subtitle ();
 -
 +      std::list<PositionImage> process_content_image_subtitles (
 +              boost::shared_ptr<SubtitleContent>, std::list<boost::shared_ptr<ContentImageSubtitle> >
 +              ) const;
 +      std::list<PositionImage> process_content_text_subtitles (std::list<boost::shared_ptr<ContentTextSubtitle> >) const;
 +      void update_subtitle_from_text ();
 +      VideoFrame dcp_to_content_video (boost::shared_ptr<const Piece> piece, DCPTime t) const;
 +      AudioFrame dcp_to_content_audio (boost::shared_ptr<const Piece> piece, DCPTime t) const;
 +      ContentTime dcp_to_content_subtitle (boost::shared_ptr<const Piece> piece, DCPTime t) const;
-       boost::shared_ptr<DCPVideo> black_dcp_video (DCPTime) const;
-       boost::shared_ptr<DCPVideo> content_to_dcp (
++      boost::shared_ptr<PlayerVideoFrame> black_player_video_frame () const;
++      boost::shared_ptr<PlayerVideoFrame> content_to_player_video_frame (
 +              boost::shared_ptr<VideoContent> content,
 +              ContentVideo content_video,
 +              std::list<boost::shared_ptr<Piece> > subs,
 +              DCPTime time,
 +              dcp::Size image_size
 +              ) const;
 +
 +      /** @return Pieces of content type C that overlap a specified time range in the DCP */
 +      template<class C>
 +      std::list<boost::shared_ptr<Piece> >
 +      overlaps (DCPTime from, DCPTime to)
 +      {
 +              if (!_have_valid_pieces) {
 +                      setup_pieces ();
 +              }
 +
 +              std::list<boost::shared_ptr<Piece> > overlaps;
 +              for (typename std::list<boost::shared_ptr<Piece> >::const_iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
 +                      if (!boost::dynamic_pointer_cast<C> ((*i)->content)) {
 +                              continue;
 +                      }
 +
 +                      if ((*i)->content->position() <= to && (*i)->content->end() >= from) {
 +                              overlaps.push_back (*i);
 +                      }
 +              }
 +              
 +              return overlaps;
 +      }
 +      
        boost::shared_ptr<const Film> _film;
        boost::shared_ptr<const Playlist> _playlist;
 -      
 -      bool _video;
 -      bool _audio;
  
        /** Our pieces are ready to go; if this is false the pieces must be (re-)created before they are used */
        bool _have_valid_pieces;
index 0000000000000000000000000000000000000000,c96ed3a33d9e14753dcccdbd55aa2a3dfd87a230..4258c63614cf25fd6585090b329b4af06cb73c2f
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,148 +1,146 @@@
 -#include <libdcp/raw_convert.h>
+ /*
+     Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation; either version 2 of the License, or
+     (at your option) any later version.
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+     You should have received a copy of the GNU General Public License
+     along with this program; if not, write to the Free Software
+     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
 -using libdcp::raw_convert;
++#include <dcp/raw_convert.h>
+ #include "player_video_frame.h"
+ #include "image.h"
+ #include "image_proxy.h"
+ #include "scaler.h"
+ using std::string;
+ using std::cout;
+ using boost::shared_ptr;
 -      libdcp::Size inter_size,
 -      libdcp::Size out_size,
++using dcp::raw_convert;
+ PlayerVideoFrame::PlayerVideoFrame (
+       shared_ptr<const ImageProxy> in,
+       Crop crop,
 -      _inter_size = libdcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
 -      _out_size = libdcp::Size (node->number_child<int> ("OutWidth"), node->number_child<int> ("OutHeight"));
++      dcp::Size inter_size,
++      dcp::Size out_size,
+       Scaler const * scaler,
+       Eyes eyes,
+       Part part,
+       ColourConversion colour_conversion
+       )
+       : _in (in)
+       , _crop (crop)
+       , _inter_size (inter_size)
+       , _out_size (out_size)
+       , _scaler (scaler)
+       , _eyes (eyes)
+       , _part (part)
+       , _colour_conversion (colour_conversion)
+ {
+ }
+ PlayerVideoFrame::PlayerVideoFrame (shared_ptr<cxml::Node> node, shared_ptr<Socket> socket)
+ {
+       _crop = Crop (node);
 -              _subtitle_position = Position<int> (node->number_child<int> ("SubtitleX"), node->number_child<int> ("SubtitleY"));
++      _inter_size = dcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
++      _out_size = dcp::Size (node->number_child<int> ("OutWidth"), node->number_child<int> ("OutHeight"));
+       _scaler = Scaler::from_id (node->string_child ("Scaler"));
+       _eyes = (Eyes) node->number_child<int> ("Eyes");
+       _part = (Part) node->number_child<int> ("Part");
+       _colour_conversion = ColourConversion (node);
+       _in = image_proxy_factory (node->node_child ("In"), socket);
+       if (node->optional_number_child<int> ("SubtitleX")) {
+               
 -              shared_ptr<Image> image (
 -                      new Image (PIX_FMT_RGBA, libdcp::Size (node->number_child<int> ("SubtitleWidth"), node->number_child<int> ("SubtitleHeight")), true)
++              _subtitle.position = Position<int> (node->number_child<int> ("SubtitleX"), node->number_child<int> ("SubtitleY"));
 -              image->read_from_socket (socket);
 -              _subtitle_image = image;
++              _subtitle.image.reset (
++                      new Image (PIX_FMT_RGBA, dcp::Size (node->number_child<int> ("SubtitleWidth"), node->number_child<int> ("SubtitleHeight")), true)
+                       );
+               
 -PlayerVideoFrame::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
++              _subtitle.image->read_from_socket (socket);
+       }
+ }
+ void
 -      _subtitle_image = image;
 -      _subtitle_position = pos;
++PlayerVideoFrame::set_subtitle (PositionImage image)
+ {
 -      if (_subtitle_image) {
 -              out->alpha_blend (_subtitle_image, _subtitle_position);
++      _subtitle = image;
+ }
+ shared_ptr<Image>
+ PlayerVideoFrame::image () const
+ {
+       shared_ptr<Image> im = _in->image ();
+       
+       Crop total_crop = _crop;
+       switch (_part) {
+       case PART_LEFT_HALF:
+               total_crop.right += im->size().width / 2;
+               break;
+       case PART_RIGHT_HALF:
+               total_crop.left += im->size().width / 2;
+               break;
+       case PART_TOP_HALF:
+               total_crop.bottom += im->size().height / 2;
+               break;
+       case PART_BOTTOM_HALF:
+               total_crop.top += im->size().height / 2;
+               break;
+       default:
+               break;
+       }
+               
+       shared_ptr<Image> out = im->crop_scale_window (total_crop, _inter_size, _out_size, _scaler, PIX_FMT_RGB24, false);
+       Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
 -      if (_subtitle_image) {
 -              node->add_child ("SubtitleWidth")->add_child_text (raw_convert<string> (_subtitle_image->size().width));
 -              node->add_child ("SubtitleHeight")->add_child_text (raw_convert<string> (_subtitle_image->size().height));
 -              node->add_child ("SubtitleX")->add_child_text (raw_convert<string> (_subtitle_position.x));
 -              node->add_child ("SubtitleY")->add_child_text (raw_convert<string> (_subtitle_position.y));
++      if (_subtitle.image) {
++              out->alpha_blend (_subtitle.image, _subtitle.position);
+       }
+       return out;
+ }
+ void
+ PlayerVideoFrame::add_metadata (xmlpp::Node* node) const
+ {
+       _crop.as_xml (node);
+       _in->add_metadata (node->add_child ("In"));
+       node->add_child("InterWidth")->add_child_text (raw_convert<string> (_inter_size.width));
+       node->add_child("InterHeight")->add_child_text (raw_convert<string> (_inter_size.height));
+       node->add_child("OutWidth")->add_child_text (raw_convert<string> (_out_size.width));
+       node->add_child("OutHeight")->add_child_text (raw_convert<string> (_out_size.height));
+       node->add_child("Scaler")->add_child_text (_scaler->id ());
+       node->add_child("Eyes")->add_child_text (raw_convert<string> (_eyes));
+       node->add_child("Part")->add_child_text (raw_convert<string> (_part));
+       _colour_conversion.as_xml (node);
 -      if (_subtitle_image) {
 -              _subtitle_image->write_to_socket (socket);
++      if (_subtitle.image) {
++              node->add_child ("SubtitleWidth")->add_child_text (raw_convert<string> (_subtitle.image->size().width));
++              node->add_child ("SubtitleHeight")->add_child_text (raw_convert<string> (_subtitle.image->size().height));
++              node->add_child ("SubtitleX")->add_child_text (raw_convert<string> (_subtitle.position.x));
++              node->add_child ("SubtitleY")->add_child_text (raw_convert<string> (_subtitle.position.y));
+       }
+ }
+ void
+ PlayerVideoFrame::send_binary (shared_ptr<Socket> socket) const
+ {
+       _in->send_binary (socket);
++      if (_subtitle.image) {
++              _subtitle.image->write_to_socket (socket);
+       }
+ }
index 0000000000000000000000000000000000000000,6461134a9c60649cb106292c1633d4fd36ec3c1d..225b0a4bafe43c8dee4be008b8e5bdb5b519fb1b
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,66 +1,66 @@@
 -      PlayerVideoFrame (boost::shared_ptr<const ImageProxy>, Crop, libdcp::Size, libdcp::Size, Scaler const *, Eyes, Part, ColourConversion);
+ /*
+     Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation; either version 2 of the License, or
+     (at your option) any later version.
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+     You should have received a copy of the GNU General Public License
+     along with this program; if not, write to the Free Software
+     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+ #include <boost/shared_ptr.hpp>
+ #include "types.h"
+ #include "position.h"
+ #include "colour_conversion.h"
++#include "position_image.h"
+ class Image;
+ class ImageProxy;
+ class Scaler;
+ class Socket;
+ /** Everything needed to describe a video frame coming out of the player, but with the
+  *  bits still their raw form.  We may want to combine the bits on a remote machine,
+  *  or maybe not even bother to combine them at all.
+  */
+ class PlayerVideoFrame
+ {
+ public:
 -      void set_subtitle (boost::shared_ptr<const Image>, Position<int>);
++      PlayerVideoFrame (boost::shared_ptr<const ImageProxy>, Crop, dcp::Size, dcp::Size, Scaler const *, Eyes, Part, ColourConversion);
+       PlayerVideoFrame (boost::shared_ptr<cxml::Node>, boost::shared_ptr<Socket>);
 -      libdcp::Size _inter_size;
 -      libdcp::Size _out_size;
++      void set_subtitle (PositionImage);
+       
+       boost::shared_ptr<Image> image () const;
+       void add_metadata (xmlpp::Node* node) const;
+       void send_binary (boost::shared_ptr<Socket> socket) const;
+       Eyes eyes () const {
+               return _eyes;
+       }
+       ColourConversion colour_conversion () const {
+               return _colour_conversion;
+       }
+ private:
+       boost::shared_ptr<const ImageProxy> _in;
+       Crop _crop;
 -      boost::shared_ptr<const Image> _subtitle_image;
 -      Position<int> _subtitle_position;
++      dcp::Size _inter_size;
++      dcp::Size _out_size;
+       Scaler const * _scaler;
+       Eyes _eyes;
+       Part _part;
+       ColourConversion _colour_conversion;
++      PositionImage _subtitle;
+ };
Simple merge
index cc41b4256e2861630a312bd968fec4137e1dd9e9,b11ce8be599aaab0d8c3aa63f210199066fdd279..a4cd36a4fb509f17ba6f58eae2729e0bb2901060
@@@ -58,15 -74,7 +58,15 @@@ voi
  Transcoder::go ()
  {
        _encoder->process_begin ();
 -      while (!_player->pass ()) {}
 +
 +      DCPTime const frame = DCPTime::from_frames (1, _film->video_frame_rate ());
 +      for (DCPTime t; t < _film->length(); t += frame) {
-               list<shared_ptr<DCPVideo> > v = _player->get_video (t, true);
-               for (list<shared_ptr<DCPVideo> >::const_iterator i = v.begin(); i != v.end(); ++i) {
++              list<shared_ptr<PlayerVideoFrame> > v = _player->get_video (t, true);
++              for (list<shared_ptr<PlayerVideoFrame> >::const_iterator i = v.begin(); i != v.end(); ++i) {
 +                      _encoder->process_video (*i);
 +              }
 +              _encoder->process_audio (_player->get_audio (t, frame, true));
 +      }
  
        _finishing = true;
        _encoder->process_end ();
diff --cc src/lib/types.h
index 35c7a91f9785865e1dada8d2449fa725f4149ae4,3fab302fc4fe4a26ababa059e4ad3b263a333d16..e858d1e1feb6b7827ce09d79822e5817b4fa3df0
@@@ -38,8 -44,13 +46,8 @@@ namespace xmlpp 
   *  with servers.  Intended to be bumped when incompatibilities
   *  are introduced.
   */
- #define SERVER_LINK_VERSION 1
+ #define SERVER_LINK_VERSION 2
  
 -typedef int64_t Time;
 -#define TIME_MAX INT64_MAX
 -#define TIME_HZ        ((Time) 96000)
 -typedef int64_t OutputAudioFrame;
 -typedef int   OutputVideoFrame;
  typedef std::vector<boost::shared_ptr<Content> > ContentList;
  typedef std::vector<boost::shared_ptr<VideoContent> > VideoContentList;
  typedef std::vector<boost::shared_ptr<AudioContent> > AudioContentList;
Simple merge
index 1b6da8a91133a387b62813054cfd8bb2baf506eb,5867ac9257aacc31f8c712039c093f531e636771..43b1049ccaf5f89920147f040cf5e96635f66675
@@@ -39,139 -33,32 +39,134 @@@ VideoDecoder::VideoDecoder (shared_ptr<
  
  }
  
 +list<ContentVideo>
 +VideoDecoder::decoded_video (VideoFrame frame)
 +{
 +      list<ContentVideo> output;
 +      
 +      for (list<ContentVideo>::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) {
 +              if (i->frame == frame) {
 +                      output.push_back (*i);
 +              }
 +      }
 +
 +      return output;
 +}
 +
 +/** Get all frames which exist in the content at a given frame index.
 + *  @param frame Frame index.
 + *  @param accurate true to try hard to return frames at the precise time that was requested, otherwise frames nearby may be returned.
 + *  @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D.
 + */
 +list<ContentVideo>
 +VideoDecoder::get_video (VideoFrame frame, bool accurate)
 +{
 +      if (_decoded_video.empty() || (frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1))) {
 +              /* Either we have no decoded data, or what we do have is a long way from what we want: seek */
 +              seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate);
 +      }
 +
 +      list<ContentVideo> dec;
 +
 +      /* Now enough pass() calls should either:
 +       *  (a) give us what we want, or
 +       *  (b) hit the end of the decoder.
 +       */
 +      if (accurate) {
 +              /* We are being accurate, so we want the right frame.
 +               * This could all be one statement but it's split up for clarity.
 +               */
 +              while (true) {
 +                      if (!decoded_video(frame).empty ()) {
 +                              /* We got what we want */
 +                              break;
 +                      }
 +
 +                      if (pass ()) {
 +                              /* The decoder has nothing more for us */
 +                              break;
 +                      }
 +
 +                      if (!_decoded_video.empty() && _decoded_video.front().frame > frame) {
 +                              /* We're never going to get the frame we want.  Perhaps the caller is asking
 +                               * for a video frame before the content's video starts (if its audio
 +                               * begins before its video, for example).
 +                               */
 +                              break;
 +                      }
 +              }
 +
 +              dec = decoded_video (frame);
 +      } else {
 +              /* Any frame will do: use the first one that comes out of pass() */
 +              while (_decoded_video.empty() && !pass ()) {}
 +              if (!_decoded_video.empty ()) {
 +                      dec.push_back (_decoded_video.front ());
 +              }
 +      }
 +
 +      /* Clean up decoded_video */
 +      while (!_decoded_video.empty() && _decoded_video.front().frame < (frame - 1)) {
 +              _decoded_video.pop_front ();
 +      }
 +
 +      return dec;
 +}
 +
 +
 +/** Called by subclasses when they have a video frame ready */
  void
- VideoDecoder::video (shared_ptr<const Image> image, VideoFrame frame)
 -VideoDecoder::video (shared_ptr<const ImageProxy> image, bool same, VideoContent::Frame frame)
++VideoDecoder::video (shared_ptr<const ImageProxy> image, VideoFrame frame)
  {
 +      /* We should not receive the same thing twice */
 +      assert (_decoded_video.empty() || frame != _decoded_video.back().frame);
 +
 +      /* Fill in gaps */
 +      /* XXX: 3D */
 +
 +      while (!_decoded_video.empty () && (_decoded_video.back().frame + 1) < frame) {
 +#ifdef DCPOMATIC_DEBUG
 +              test_gaps++;
 +#endif
 +              _decoded_video.push_back (
 +                      ContentVideo (
 +                              _decoded_video.back().image,
 +                              _decoded_video.back().eyes,
++                              _decoded_video.back().part,
 +                              _decoded_video.back().frame + 1
 +                              )
 +                      );
 +      }
 +      
        switch (_video_content->video_frame_type ()) {
        case VIDEO_FRAME_TYPE_2D:
-               _decoded_video.push_back (ContentVideo (image, EYES_BOTH, frame));
 -              Video (image, EYES_BOTH, PART_WHOLE, same, frame);
++              _decoded_video.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
                break;
        case VIDEO_FRAME_TYPE_3D_ALTERNATE:
-               _decoded_video.push_back (ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, frame));
 -              Video (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, same, frame / 2);
++              _decoded_video.push_back (ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
                break;
        case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
-       {
-               int const half = image->size().width / 2;
-               _decoded_video.push_back (ContentVideo (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, frame));
-               _decoded_video.push_back (ContentVideo (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, frame));
 -              Video (image, EYES_LEFT, PART_LEFT_HALF, same, frame);
 -              Video (image, EYES_RIGHT, PART_RIGHT_HALF, same, frame);
++              _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
++              _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame));
                break;
-       }
        case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
-       {
-               int const half = image->size().height / 2;
-               _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, frame));
-               _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, frame));
 -              Video (image, EYES_LEFT, PART_TOP_HALF, same, frame);
 -              Video (image, EYES_RIGHT, PART_BOTTOM_HALF, same, frame);
++              _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame));
++              _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame));
                break;
-       }
        case VIDEO_FRAME_TYPE_3D_LEFT:
-               _decoded_video.push_back (ContentVideo (image, EYES_LEFT, frame));
 -              Video (image, EYES_LEFT, PART_WHOLE, same, frame);
++              _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame));
                break;
        case VIDEO_FRAME_TYPE_3D_RIGHT:
-               _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, frame));
 -              Video (image, EYES_RIGHT, PART_WHOLE, same, frame);
++              _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
                break;
 +      default:
 +              assert (false);
        }
 -      
 -      _video_position = frame + 1;
 +}
 +
 +void
 +VideoDecoder::seek (ContentTime, bool)
 +{
 +      _decoded_video.clear ();
  }
  
index 145baa40b0ca115acda97f8b33376d8712890193,42add42aacc547c3be5e2bced87882dfdacecd0b..2c0028fd1bcfa4ce6c6a8c831452608b89467041
  #include "decoder.h"
  #include "video_content.h"
  #include "util.h"
 +#include "content_video.h"
  
  class VideoContent;
- class Image;
+ class ImageProxy;
  
 +/** @class VideoDecoder
 + *  @brief Parent for classes which decode video.
 + */
  class VideoDecoder : public virtual Decoder
  {
  public:
 -      VideoDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const VideoContent>);
 -
 -      /** Seek so that the next pass() will yield (approximately) the requested frame.
 -       *  Pass accurate = true to try harder to get close to the request.
 -       */
 -      virtual void seek (VideoContent::Frame frame, bool accurate) = 0;
 -
 -      /** Emitted when a video frame is ready.
 -       *  First parameter is the video image.
 -       *  Second parameter is the eye(s) which should see this image.
 -       *  Third parameter is the part of this image that should be used.
 -       *  Fourth parameter is true if the image is the same as the last one that was emitted for this Eyes value.
 -       *  Fourth parameter is the frame within our source.
 -       */
 -      boost::signals2::signal<void (boost::shared_ptr<const ImageProxy>, Eyes, Part, bool, VideoContent::Frame)> Video;
 -      
 +      VideoDecoder (boost::shared_ptr<const VideoContent> c);
 +
 +      std::list<ContentVideo> get_video (VideoFrame frame, bool accurate);
 +
 +      boost::shared_ptr<const VideoContent> video_content () const {
 +              return _video_content;
 +      }
 +
 +#ifdef DCPOMATIC_DEBUG
 +      int test_gaps;
 +#endif
 +
  protected:
  
 -      void video (boost::shared_ptr<const ImageProxy>, bool, VideoContent::Frame);
 +      void seek (ContentTime time, bool accurate);
-       void video (boost::shared_ptr<const Image>, VideoFrame frame);
++      void video (boost::shared_ptr<const ImageProxy>, VideoFrame frame);
 +      std::list<ContentVideo> decoded_video (VideoFrame frame);
 +
        boost::shared_ptr<const VideoContent> _video_content;
 -      /** This is in frames without taking 3D into account (e.g. if we are doing 3D alternate,
 -       *  this would equal 2 on the left-eye second frame (not 1)).
 -       */
 -      VideoContent::Frame _video_position;
 +      std::list<ContentVideo> _decoded_video;
  };
  
  #endif
diff --cc src/lib/wscript
index 433f50b3fdca4565902baf3b4b7e7246a59514d4,72e149879196a20873dbfdb1bc698b06a1320d85..8f26c53c6e9f6c4456ee4d3123c38b2491d89e0c
@@@ -17,9 -16,8 +17,8 @@@ sources = ""
            cross.cc
            dci_metadata.cc
            dcp_content_type.cc
-           dcp_video.cc
            dcp_video_frame.cc
 -          decoder.cc
 +          dcpomatic_time.cc
            dolby_cp750.cc
            encoder.cc
            examine_content_job.cc
            kdm.cc
            json_server.cc
            log.cc
 -          piece.cc
            player.cc
+           player_video_frame.cc
            playlist.cc
            ratio.cc
 +          render_subtitles.cc
            resampler.cc
            scp_dcp_job.cc
            scaler.cc
index ba16697564705fe25afe03dc2458522e4de9899a,896517b270ae0bfc293f3505d208abb491a213fd..3c2ea4b36068ecbde2eae65658cc04b75d513536
@@@ -45,27 -45,18 +45,18 @@@ using boost::shared_ptr
  static shared_ptr<Film> film;
  static ServerDescription* server;
  static shared_ptr<FileLog> log_ (new FileLog ("servomatictest.log"));
 -static int frame = 0;
 +static int frame_count = 0;
  
  void
- process_video (shared_ptr<DCPVideo> frame)
+ process_video (shared_ptr<PlayerVideoFrame> pvf)
  {
-       shared_ptr<DCPVideoFrame> local  (
-               new DCPVideoFrame (
-                       frame->image (PIX_FMT_RGB24, false), frame_count, frame->eyes(), frame->conversion(), film->video_frame_rate(), 250000000, RESOLUTION_2K, log_
-                       )
-               );
-       
-       shared_ptr<DCPVideoFrame> remote (
-               new DCPVideoFrame (
-                       frame->image (PIX_FMT_RGB24, false), frame_count, frame->eyes(), frame->conversion(), film->video_frame_rate(), 250000000, RESOLUTION_2K, log_
-                       )
-               );
 -      shared_ptr<DCPVideoFrame> local  (new DCPVideoFrame (pvf, frame, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
 -      shared_ptr<DCPVideoFrame> remote (new DCPVideoFrame (pvf, frame, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
++      shared_ptr<DCPVideoFrame> local  (new DCPVideoFrame (pvf, frame_count, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
++      shared_ptr<DCPVideoFrame> remote (new DCPVideoFrame (pvf, frame_count, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
  
 -      cout << "Frame " << frame << ": ";
 +      cout << "Frame " << frame_count << ": ";
        cout.flush ();
  
 -      ++frame;
 +      ++frame_count;
  
        shared_ptr<EncodedData> local_encoded = local->encode_locally ();
        shared_ptr<EncodedData> remote_encoded;
index e517c9ccaf55c2cc79c293b7a8211bbc3453271a,ed1a2ce4109293b3d932be8fabacb2bc4477878b..c848fe09b6ffa5a3f99d52f17090e837bc8e9ce3
  #include "lib/examine_content_job.h"
  #include "lib/filter.h"
  #include "lib/player.h"
+ #include "lib/player_video_frame.h"
  #include "lib/video_content.h"
  #include "lib/video_decoder.h"
- #include "lib/dcp_video.h"
 +#include "lib/timer.h"
  #include "film_viewer.h"
  #include "wx_util.h"
  
@@@ -150,17 -151,15 +150,17 @@@ FilmViewer::get (DCPTime p, bool accura
                return;
        }
  
-       list<shared_ptr<DCPVideo> > dcp_video = _player->get_video (p, accurate);
-       if (!dcp_video.empty ()) {
-               _frame = dcp_video.front()->image (PIX_FMT_BGRA, true);
 -      /* We could do this with a seek and a fetch_next_frame, but this is
 -         a shortcut to make it quicker.
 -      */
 -
 -      _got_frame = false;
 -      if (!_player->repeat_last_video ()) {
 -              fetch_next_frame ();
++      list<shared_ptr<PlayerVideoFrame> > pvf = _player->get_video (p, accurate);
++      if (!pvf.empty ()) {
++              _frame = pvf.front()->image ();
 +              _frame = _frame->scale (_frame->size(), Scaler::from_id ("fastbilinear"), PIX_FMT_RGB24, false);
 +      } else {
 +              _frame.reset ();
        }
 +
 +      _position = p;
        
 +      set_position_text ();
        _panel->Refresh ();
        _panel->Update ();
  }
Simple merge
index a459e6c71449721620c61ea473c8287b4facb446,c8a2b49efdcd88f83133ec6e25d81cc3175a5225..8e4fb0e1867e845e644864012eae8812b2468183
@@@ -75,8 -70,20 +78,20 @@@ BOOST_AUTO_TEST_CASE (client_server_tes
                p += sub_image->stride()[0];
        }
  
-       /* XXX */
- //    shared_ptr<Subtitle> subtitle (new Subtitle (Position<int> (50, 60), sub_image));
+       shared_ptr<PlayerVideoFrame> pvf (
+               new PlayerVideoFrame (
+                       shared_ptr<ImageProxy> (new RawImageProxy (image)),
+                       Crop (),
 -                      libdcp::Size (1998, 1080),
 -                      libdcp::Size (1998, 1080),
++                      dcp::Size (1998, 1080),
++                      dcp::Size (1998, 1080),
+                       Scaler::from_id ("bicubic"),
+                       EYES_BOTH,
+                       PART_WHOLE,
+                       ColourConversion ()
+                       )
+               );
 -      pvf->set_subtitle (sub_image, Position<int> (50, 60));
++      pvf->set_subtitle (PositionImage (sub_image, Position<int> (50, 60)));
  
        shared_ptr<FileLog> log (new FileLog ("build/test/client_server_test.log"));