+2014-03-07 Carl Hetherington <cth@carlh.net>
+
+ * Add subtitle view.
+
+ 2014-05-19 Carl Hetherington <cth@carlh.net>
+
+ * Version 1.69.9 released.
+
+ 2014-05-19 Carl Hetherington <cth@carlh.net>
+
+ * Decode image sources in the multi-threaded part
+ of the transcoder, rather than the single-threaded.
+
2014-05-16 Carl Hetherington <cth@carlh.net>
* Version 1.69.8 released.
*/
#include <libxml++/libxml++.h>
-#include <libdcp/colour_matrix.h>
-#include <libdcp/raw_convert.h>
+#include <dcp/colour_matrix.h>
+#include <dcp/raw_convert.h>
#include <libcxml/cxml.h>
#include "config.h"
#include "colour_conversion.h"
using std::list;
using std::string;
+ using std::stringstream;
using std::cout;
using std::vector;
using boost::shared_ptr;
using boost::optional;
-using libdcp::raw_convert;
+using dcp::raw_convert;
ColourConversion::ColourConversion ()
: input_gamma (2.4)
{
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
- matrix (i, j) = libdcp::colour_matrix::srgb_to_xyz[i][j];
+ matrix (i, j) = dcp::colour_matrix::srgb_to_xyz[i][j];
}
}
}
--- /dev/null
- class Image;
+/*
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef DCPOMATIC_CONTENT_VIDEO_H
+#define DCPOMATIC_CONTENT_VIDEO_H
+
- ContentVideo (boost::shared_ptr<const Image> i, Eyes e, VideoFrame f)
++class ImageProxy;
+
+/** @class ContentVideo
+ * @brief A frame of video straight out of some content.
+ */
+class ContentVideo
+{
+public:
+ ContentVideo ()
+ : eyes (EYES_BOTH)
+ {}
+
- boost::shared_ptr<const Image> image;
++ ContentVideo (boost::shared_ptr<const ImageProxy> i, Eyes e, Part p, VideoFrame f)
+ : image (i)
+ , eyes (e)
++ , part (p)
+ , frame (f)
+ {}
+
++ boost::shared_ptr<const ImageProxy> image;
+ Eyes eyes;
++ Part part;
+ VideoFrame frame;
+};
+
+#endif
#include <boost/array.hpp>
#include <boost/asio.hpp>
#include <boost/filesystem.hpp>
-#include <libdcp/rec709_linearised_gamma_lut.h>
-#include <libdcp/srgb_linearised_gamma_lut.h>
-#include <libdcp/gamma_lut.h>
-#include <libdcp/xyz_frame.h>
-#include <libdcp/rgb_xyz.h>
-#include <libdcp/colour_matrix.h>
-#include <libdcp/raw_convert.h>
+#include <boost/lexical_cast.hpp>
+ #include <openssl/md5.h>
+#include <dcp/gamma_lut.h>
+#include <dcp/xyz_frame.h>
+#include <dcp/rgb_xyz.h>
+#include <dcp/colour_matrix.h>
+#include <dcp/raw_convert.h>
#include <libcxml/cxml.h>
#include "film.h"
#include "dcp_video_frame.h"
#include "image.h"
#include "log.h"
#include "cross.h"
+ #include "player_video_frame.h"
#include "i18n.h"
using std::stringstream;
using std::cout;
using boost::shared_ptr;
-using libdcp::Size;
-using libdcp::raw_convert;
+using boost::lexical_cast;
+using dcp::Size;
+using dcp::raw_convert;
#define DCI_COEFFICENT (48.0 / 52.37)
/** Construct a DCP video frame.
- * @param input Input image.
- * @param f Index of the frame within the DCP.
+ * @param frame Input frame.
+ * @param index Index of the frame within the DCP.
* @param bw J2K bandwidth to use (see Config::j2k_bandwidth ())
* @param l Log to write to.
*/
DCPVideoFrame::DCPVideoFrame (
- shared_ptr<const Image> image, int f, Eyes eyes, ColourConversion c, int dcp_fps, int bw, Resolution r, shared_ptr<Log> l
+ shared_ptr<const PlayerVideoFrame> frame, int index, int dcp_fps, int bw, Resolution r, shared_ptr<Log> l
)
- : _image (image)
- , _frame (f)
- , _eyes (eyes)
- , _conversion (c)
+ : _frame (frame)
+ , _index (index)
, _frames_per_second (dcp_fps)
, _j2k_bandwidth (bw)
, _resolution (r)
}
- DCPVideoFrame::DCPVideoFrame (shared_ptr<const Image> image, cxml::ConstNodePtr node, shared_ptr<Log> log)
- : _image (image)
+ DCPVideoFrame::DCPVideoFrame (shared_ptr<const PlayerVideoFrame> frame, shared_ptr<const cxml::Node> node, shared_ptr<Log> log)
+ : _frame (frame)
, _log (log)
{
- _frame = node->number_child<int> ("Frame");
- string const eyes = node->string_child ("Eyes");
- if (eyes == "Both") {
- _eyes = EYES_BOTH;
- } else if (eyes == "Left") {
- _eyes = EYES_LEFT;
- } else if (eyes == "Right") {
- _eyes = EYES_RIGHT;
- } else {
- assert (false);
- }
- _conversion = ColourConversion (node->node_child ("ColourConversion"));
+ _index = node->number_child<int> ("Index");
_frames_per_second = node->number_child<int> ("FramesPerSecond");
_j2k_bandwidth = node->number_child<int> ("J2KBandwidth");
_resolution = Resolution (node->optional_number_child<int>("Resolution").get_value_or (RESOLUTION_2K));
shared_ptr<EncodedData>
DCPVideoFrame::encode_locally ()
{
- shared_ptr<dcp::GammaLUT> in_lut;
- in_lut = dcp::GammaLUT::cache.get (12, _conversion.input_gamma, _conversion.input_gamma_linearised);
- shared_ptr<libdcp::LUT> in_lut;
- if (_frame->colour_conversion().input_gamma_linearised) {
- in_lut = libdcp::SRGBLinearisedGammaLUT::cache.get (12, _frame->colour_conversion().input_gamma);
- } else {
- in_lut = libdcp::GammaLUT::cache.get (12, _frame->colour_conversion().input_gamma);
- }
--
++ shared_ptr<dcp::GammaLUT> in_lut = dcp::GammaLUT::cache.get (
++ 12, _frame->colour_conversion().input_gamma, _frame->colour_conversion().input_gamma_linearised
++ );
++
/* XXX: libdcp should probably use boost */
double matrix[3][3];
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
- matrix[i][j] = _conversion.matrix (i, j);
+ matrix[i][j] = _frame->colour_conversion().matrix (i, j);
}
}
-
+
- shared_ptr<libdcp::XYZFrame> xyz = libdcp::rgb_to_xyz (
+ shared_ptr<dcp::XYZFrame> xyz = dcp::rgb_to_xyz (
- _image,
+ _frame->image(),
in_lut,
- dcp::GammaLUT::cache.get (16, 1 / _conversion.output_gamma, false),
- libdcp::GammaLUT::cache.get (16, 1 / _frame->colour_conversion().output_gamma),
++ dcp::GammaLUT::cache.get (16, 1 / _frame->colour_conversion().output_gamma, false),
matrix
);
+
+ {
+ MD5_CTX md5_context;
+ MD5_Init (&md5_context);
+ MD5_Update (&md5_context, xyz->data(0), 1998 * 1080 * 4);
+ MD5_Update (&md5_context, xyz->data(1), 1998 * 1080 * 4);
+ MD5_Update (&md5_context, xyz->data(2), 1998 * 1080 * 4);
+ unsigned char digest[MD5_DIGEST_LENGTH];
+ MD5_Final (digest, &md5_context);
+ stringstream s;
+ for (int i = 0; i < MD5_DIGEST_LENGTH; ++i) {
+ s << std::hex << std::setfill('0') << std::setw(2) << ((int) digest[i]);
+ }
+ }
+
/* Set the max image and component sizes based on frame_rate */
int max_cs_len = ((float) _j2k_bandwidth) / 8 / _frames_per_second;
- if (_eyes == EYES_LEFT || _eyes == EYES_RIGHT) {
+ if (_frame->eyes() == EYES_LEFT || _frame->eyes() == EYES_RIGHT) {
/* In 3D we have only half the normal bandwidth per eye */
max_cs_len /= 2;
}
throw EncodeError (N_("JPEG2000 encoding failed"));
}
- switch (_eyes) {
+ switch (_frame->eyes()) {
case EYES_BOTH:
- _log->log (String::compose (N_("Finished locally-encoded frame %1 for mono"), _frame));
+ _log->log (String::compose (N_("Finished locally-encoded frame %1 for mono"), _index));
break;
case EYES_LEFT:
- _log->log (String::compose (N_("Finished locally-encoded frame %1 for L"), _frame));
+ _log->log (String::compose (N_("Finished locally-encoded frame %1 for L"), _index));
break;
case EYES_RIGHT:
- _log->log (String::compose (N_("Finished locally-encoded frame %1 for R"), _frame));
+ _log->log (String::compose (N_("Finished locally-encoded frame %1 for R"), _index));
break;
default:
break;
socket->connect (*endpoint_iterator);
+ /* Collect all XML metadata */
xmlpp::Document doc;
xmlpp::Element* root = doc.create_root_node ("EncodingRequest");
-
root->add_child("Version")->add_child_text (raw_convert<string> (SERVER_LINK_VERSION));
- root->add_child("Width")->add_child_text (raw_convert<string> (_image->size().width));
- root->add_child("Height")->add_child_text (raw_convert<string> (_image->size().height));
add_metadata (root);
+ _log->log (String::compose (N_("Sending frame %1 to remote"), _index));
+
+ /* Send XML metadata */
stringstream xml;
doc.write_to_stream (xml, "UTF-8");
-
- _log->log (String::compose (N_("Sending frame %1 to remote"), _frame));
-
socket->write (xml.str().length() + 1);
socket->write ((uint8_t *) xml.str().c_str(), xml.str().length() + 1);
- _image->write_to_socket (socket);
+ /* Send binary data */
+ _frame->send_binary (socket);
+ /* Read the response (JPEG2000-encoded data); this blocks until the data
+ is ready and sent back.
+ */
shared_ptr<EncodedData> e (new RemotelyEncodedData (socket->read_uint32 ()));
socket->read (e->data(), e->size());
- _log->log (String::compose (N_("Finished remotely-encoded frame %1"), _frame));
+ _log->log (String::compose (N_("Finished remotely-encoded frame %1"), _index));
return e;
}
void
DCPVideoFrame::add_metadata (xmlpp::Element* el) const
{
- el->add_child("Frame")->add_child_text (raw_convert<string> (_frame));
-
- switch (_eyes) {
- case EYES_BOTH:
- el->add_child("Eyes")->add_child_text ("Both");
- break;
- case EYES_LEFT:
- el->add_child("Eyes")->add_child_text ("Left");
- break;
- case EYES_RIGHT:
- el->add_child("Eyes")->add_child_text ("Right");
- break;
- default:
- assert (false);
- }
-
- _conversion.as_xml (el->add_child("ColourConversion"));
-
+ el->add_child("Index")->add_child_text (raw_convert<string> (_index));
el->add_child("FramesPerSecond")->add_child_text (raw_convert<string> (_frames_per_second));
el->add_child("J2KBandwidth")->add_child_text (raw_convert<string> (_j2k_bandwidth));
el->add_child("Resolution")->add_child_text (raw_convert<string> (int (_resolution)));
+ _frame->add_metadata (el);
+ }
+
+ Eyes
+ DCPVideoFrame::eyes () const
+ {
+ return _frame->eyes ();
}
EncodedData::EncodedData (int s)
}
void
-EncodedData::write_info (shared_ptr<const Film> film, int frame, Eyes eyes, libdcp::FrameInfo fin) const
+EncodedData::write_info (shared_ptr<const Film> film, int frame, Eyes eyes, dcp::FrameInfo fin) const
{
boost::filesystem::path const info = film->info_path (frame, eyes);
FILE* h = fopen_boost (info, "w");
*/
-#include <openjpeg.h>
-#include <libdcp/picture_asset.h>
-#include <libdcp/picture_asset_writer.h>
+#include <dcp/picture_mxf_writer.h>
#include "util.h"
/** @file src/dcp_video_frame.h
class Image;
class Log;
class Subtitle;
+ class PlayerVideoFrame;
/** @class EncodedData
* @brief Container for J2K-encoded data.
void send (boost::shared_ptr<Socket> socket);
void write (boost::shared_ptr<const Film>, int, Eyes) const;
- void write_info (boost::shared_ptr<const Film>, int, Eyes, libdcp::FrameInfo) const;
+ void write_info (boost::shared_ptr<const Film>, int, Eyes, dcp::FrameInfo) const;
/** @return data */
uint8_t* data () const {
class DCPVideoFrame : public boost::noncopyable
{
public:
- DCPVideoFrame (boost::shared_ptr<const Image>, int, Eyes, ColourConversion, int, int, Resolution, boost::shared_ptr<Log>);
- DCPVideoFrame (boost::shared_ptr<const Image>, cxml::ConstNodePtr, boost::shared_ptr<Log>);
+ DCPVideoFrame (boost::shared_ptr<const PlayerVideoFrame>, int, int, int, Resolution, boost::shared_ptr<Log>);
- DCPVideoFrame (boost::shared_ptr<const PlayerVideoFrame>, boost::shared_ptr<const cxml::Node>, boost::shared_ptr<Log>);
++ DCPVideoFrame (boost::shared_ptr<const PlayerVideoFrame>, cxml::ConstNodePtr, boost::shared_ptr<Log>);
boost::shared_ptr<EncodedData> encode_locally ();
boost::shared_ptr<EncodedData> encode_remotely (ServerDescription);
- Eyes eyes () const {
- return _eyes;
- }
-
- int frame () const {
- return _frame;
+ int index () const {
+ return _index;
}
+
+ Eyes eyes () const;
private:
void add_metadata (xmlpp::Element *) const;
- boost::shared_ptr<const Image> _image;
- int _frame; ///< frame index within the DCP's intrinsic duration
- Eyes _eyes;
- ColourConversion _conversion;
+ boost::shared_ptr<const PlayerVideoFrame> _frame;
+ int _index; ///< frame index within the DCP's intrinsic duration
int _frames_per_second; ///< Frames per second that we will use for the DCP
int _j2k_bandwidth; ///< J2K bandwidth to use
Resolution _resolution; ///< Resolution (2K or 4K)
#include "writer.h"
#include "server_finder.h"
#include "player.h"
- #include "dcp_video.h"
+ #include "player_video_frame.h"
#include "i18n.h"
, _video_frames_out (0)
, _terminate (false)
{
- _have_a_real_frame[EYES_BOTH] = false;
- _have_a_real_frame[EYES_LEFT] = false;
- _have_a_real_frame[EYES_RIGHT] = false;
+
}
Encoder::~Encoder ()
*/
for (list<shared_ptr<DCPVideoFrame> >::iterator i = _queue.begin(); i != _queue.end(); ++i) {
- _film->log()->log (String::compose (N_("Encode left-over frame %1"), (*i)->frame ()));
+ _film->log()->log (String::compose (N_("Encode left-over frame %1"), (*i)->index ()));
try {
- _writer->write ((*i)->encode_locally(), (*i)->frame (), (*i)->eyes ());
+ _writer->write ((*i)->encode_locally(), (*i)->index (), (*i)->eyes ());
frame_done ();
} catch (std::exception& e) {
_film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
}
void
- Encoder::process_video (shared_ptr<DCPVideo> frame)
-Encoder::process_video (shared_ptr<PlayerVideoFrame> pvf, bool same)
++Encoder::process_video (shared_ptr<PlayerVideoFrame> pvf)
{
_waker.nudge ();
rethrow ();
if (_writer->can_fake_write (_video_frames_out)) {
- _writer->fake_write (_video_frames_out, frame->eyes ());
+ _writer->fake_write (_video_frames_out, pvf->eyes ());
- _have_a_real_frame[pvf->eyes()] = false;
- frame_done ();
- } else if (same && _have_a_real_frame[pvf->eyes()]) {
- /* Use the last frame that we encoded. */
- _writer->repeat (_video_frames_out, pvf->eyes());
frame_done ();
} else {
/* Queue this new frame for encoding */
TIMING ("adding to queue of %1", _queue.size ());
_queue.push_back (shared_ptr<DCPVideoFrame> (
new DCPVideoFrame (
- frame->image(PIX_FMT_RGB24, false),
- pvf, _video_frames_out, _film->video_frame_rate(),
- _film->j2k_bandwidth(), _film->resolution(), _film->log()
++ pvf,
+ _video_frames_out,
- frame->eyes(),
- frame->conversion(),
+ _film->video_frame_rate(),
+ _film->j2k_bandwidth(),
+ _film->resolution(),
+ _film->log()
)
));
_condition.notify_all ();
- _have_a_real_frame[pvf->eyes()] = true;
}
- if (frame->eyes() != EYES_LEFT) {
+ if (pvf->eyes() != EYES_LEFT) {
++_video_frames_out;
}
}
TIMING ("encoder thread %1 wakes with queue of %2", boost::this_thread::get_id(), _queue.size());
shared_ptr<DCPVideoFrame> vf = _queue.front ();
- TIMING ("encoder thread %1 pops frame %2 (%3) from queue", boost::this_thread::get_id(), vf->frame(), vf->eyes ());
+ TIMING ("encoder thread %1 pops frame %2 (%3) from queue", boost::this_thread::get_id(), vf->index(), vf->eyes ());
_queue.pop_front ();
lock.unlock ();
_film->log()->log (
String::compose (
N_("Remote encode of %1 on %2 failed (%3); thread sleeping for %4s"),
- vf->frame(), server->host_name(), e.what(), remote_backoff)
+ vf->index(), server->host_name(), e.what(), remote_backoff)
);
}
} else {
try {
- TIMING ("encoder thread %1 begins local encode of %2", boost::this_thread::get_id(), vf->frame());
+ TIMING ("encoder thread %1 begins local encode of %2", boost::this_thread::get_id(), vf->index());
encoded = vf->encode_locally ();
- TIMING ("encoder thread %1 finishes local encode of %2", boost::this_thread::get_id(), vf->frame());
+ TIMING ("encoder thread %1 finishes local encode of %2", boost::this_thread::get_id(), vf->index());
} catch (std::exception& e) {
_film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
}
}
if (encoded) {
- _writer->write (encoded, vf->frame (), vf->eyes ());
+ _writer->write (encoded, vf->index (), vf->eyes ());
frame_done ();
} else {
lock.lock ();
_film->log()->log (
- String::compose (N_("Encoder thread %1 pushes frame %2 back onto queue after failure"), boost::this_thread::get_id(), vf->frame())
+ String::compose (N_("Encoder thread %1 pushes frame %2 back onto queue after failure"), boost::this_thread::get_id(), vf->index())
);
_queue.push_front (vf);
lock.unlock ();
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
class Writer;
class Job;
class ServerFinder;
- class DCPVideo;
+ class PlayerVideoFrame;
/** @class Encoder
* @brief Encoder to J2K and WAV for DCP.
void process_begin ();
/** Call with a frame of video.
- * @param pvf Video frame image.
- * @param same true if pvf is the same as the last time we were called.
+ * @param f Video frame.
*/
- void process_video (boost::shared_ptr<DCPVideo> f);
- void process_video (boost::shared_ptr<PlayerVideoFrame> pvf, bool same);
++ void process_video (boost::shared_ptr<PlayerVideoFrame> f);
/** Call with some audio data */
void process_audio (boost::shared_ptr<const AudioBuffers>);
/** Number of video frames written for the DCP so far */
int _video_frames_out;
- bool _have_a_real_frame[EYES_COUNT];
bool _terminate;
std::list<boost::shared_ptr<DCPVideoFrame> > _queue;
std::list<boost::thread *> _threads;
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
-#include "film.h"
#include "filter.h"
#include "exceptions.h"
#include "image.h"
#include "filter_graph.h"
#include "audio_buffers.h"
#include "ffmpeg_content.h"
+ #include "image_proxy.h"
#include "i18n.h"
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
-using libdcp::Size;
+using dcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio)
- : Decoder (f)
- , VideoDecoder (f, c)
- , AudioDecoder (f, c)
- , SubtitleDecoder (f)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
+ : VideoDecoder (c)
+ , AudioDecoder (c)
, FFmpeg (c)
+ , _log (log)
, _subtitle_codec_context (0)
, _subtitle_codec (0)
- , _decode_video (video)
- , _decode_audio (audio)
- , _pts_offset (0)
- , _just_sought (false)
{
setup_subtitle ();
Then we remove big initial gaps in PTS and we allow our
insertion of black frames to work.
- We will do:
- audio_pts_to_use = audio_pts_from_ffmpeg + pts_offset;
- video_pts_to_use = video_pts_from_ffmpeg + pts_offset;
+ We will do pts_to_use = pts_from_ffmpeg + pts_offset;
*/
- bool const have_video = video && c->first_video();
- bool const have_audio = audio && c->audio_stream() && c->audio_stream()->first_audio;
+ bool const have_video = c->first_video();
+ bool const have_audio = c->audio_stream () && c->audio_stream()->first_audio;
/* First, make one of them start at 0 */
/* Now adjust both so that the video pts starts on a frame */
if (have_video && have_audio) {
- double first_video = c->first_video().get() + _pts_offset;
- double const old_first_video = first_video;
-
- /* Round the first video up to a frame boundary */
- if (fabs (rint (first_video * c->video_frame_rate()) - first_video * c->video_frame_rate()) > 1e-6) {
- first_video = ceil (first_video * c->video_frame_rate()) / c->video_frame_rate ();
- }
-
- _pts_offset += first_video - old_first_video;
+ ContentTime first_video = c->first_video().get() + _pts_offset;
+ ContentTime const old_first_video = first_video;
+ _pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video;
}
}
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
- if (_decode_video) {
- while (decode_video_packet ()) {}
- }
+ while (decode_video_packet ()) {}
- if (_ffmpeg_content->audio_stream() && _decode_audio) {
+ if (_ffmpeg_content->audio_stream()) {
decode_audio_packet ();
+ AudioDecoder::flush ();
}
-
- /* Stop us being asked for any more data */
- _video_position = _ffmpeg_content->video_length_after_3d_combine ();
- _audio_position = _ffmpeg_content->audio_length ();
}
-void
+bool
FFmpegDecoder::pass ()
{
int r = av_read_frame (_format_context, &_packet);
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
av_strerror (r, buf, sizeof(buf));
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
- film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
+ _log->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
}
flush ();
- return;
+ return true;
}
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
-
int const si = _packet.stream_index;
- if (si == _video_stream && _decode_video) {
+ if (si == _video_stream) {
decode_video_packet ();
- } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, si) && _decode_audio) {
+ } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, si)) {
decode_audio_packet ();
- } else if (_ffmpeg_content->subtitle_stream() && _ffmpeg_content->subtitle_stream()->uses_index (_format_context, si) && film->with_subtitles ()) {
+ } else if (_ffmpeg_content->subtitle_stream() && _ffmpeg_content->subtitle_stream()->uses_index (_format_context, si)) {
decode_subtitle_packet ();
}
av_free_packet (&_packet);
+ return false;
}
/** @param data pointer to array of pointers to buffers.
return av_get_bytes_per_sample (audio_sample_format ());
}
-void
-FFmpegDecoder::seek (VideoContent::Frame frame, bool accurate)
+int
+FFmpegDecoder::minimal_run (boost::function<bool (optional<ContentTime>, optional<ContentTime>, int)> finished)
{
- double const time_base = av_q2d (_format_context->streams[_video_stream]->time_base);
+ int frames_read = 0;
+ optional<ContentTime> last_video;
+ optional<ContentTime> last_audio;
- /* If we are doing an accurate seek, our initial shot will be 5 frames (5 being
- a number plucked from the air) earlier than we want to end up. The loop below
- will hopefully then step through to where we want to be.
- */
- int initial = frame;
+ while (!finished (last_video, last_audio, frames_read)) {
+ int r = av_read_frame (_format_context, &_packet);
+ if (r < 0) {
+ /* We should flush our decoders here, possibly yielding a few more frames,
+ but the consequence of having to do that is too hideous to contemplate.
+ Instead we give up and say that you can't seek too close to the end
+ of a file.
+ */
+ return frames_read;
+ }
+
+ ++frames_read;
+
+ double const time_base = av_q2d (_format_context->streams[_packet.stream_index]->time_base);
+
+ if (_packet.stream_index == _video_stream) {
+
+ avcodec_get_frame_defaults (_frame);
+
+ int got_picture = 0;
+ r = avcodec_decode_video2 (video_codec_context(), _frame, &got_picture, &_packet);
+ if (r >= 0 && got_picture) {
+ last_video = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base) + _pts_offset;
+ }
+
+ } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, _packet.stream_index)) {
+ AVPacket copy_packet = _packet;
+ while (copy_packet.size > 0) {
- if (accurate) {
- initial -= 5;
+ int got_frame;
+ r = avcodec_decode_audio4 (audio_codec_context(), _frame, &got_frame, &_packet);
+ if (r >= 0 && got_frame) {
+ last_audio = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base) + _pts_offset;
+ }
+
+ copy_packet.data += r;
+ copy_packet.size -= r;
+ }
+ }
+
+ av_free_packet (&_packet);
}
- if (initial < 0) {
- initial = 0;
+ return frames_read;
+}
+
+bool
+FFmpegDecoder::seek_overrun_finished (ContentTime seek, optional<ContentTime> last_video, optional<ContentTime> last_audio) const
+{
+ return (last_video && last_video.get() >= seek) || (last_audio && last_audio.get() >= seek);
+}
+
+bool
+FFmpegDecoder::seek_final_finished (int n, int done) const
+{
+ return n == done;
+}
+
+void
+FFmpegDecoder::seek_and_flush (ContentTime t)
+{
+ ContentTime const u = t - _pts_offset;
+ int64_t s = u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base);
+
+ if (_ffmpeg_content->audio_stream ()) {
+ s = min (
+ s, int64_t (u.seconds() / av_q2d (_ffmpeg_content->audio_stream()->stream(_format_context)->time_base))
+ );
}
- /* Initial seek time in the stream's timebase */
- int64_t const initial_vt = ((initial / _ffmpeg_content->video_frame_rate()) - _pts_offset) / time_base;
+ /* Ridiculous empirical hack */
+ s--;
+ if (s < 0) {
+ s = 0;
+ }
- av_seek_frame (_format_context, _video_stream, initial_vt, AVSEEK_FLAG_BACKWARD);
+ av_seek_frame (_format_context, _video_stream, s, 0);
avcodec_flush_buffers (video_codec_context());
+ if (audio_codec_context ()) {
+ avcodec_flush_buffers (audio_codec_context ());
+ }
if (_subtitle_codec_context) {
avcodec_flush_buffers (_subtitle_codec_context);
}
+}
- /* This !accurate is piling hack upon hack; setting _just_sought to true
- even with accurate == true defeats our attempt to align the start
- of the video and audio. Here we disable that defeat when accurate == true
- i.e. when we are making a DCP rather than just previewing one.
- Ewww. This should be gone in 2.0.
+void
+FFmpegDecoder::seek (ContentTime time, bool accurate)
+{
+ VideoDecoder::seek (time, accurate);
+ AudioDecoder::seek (time, accurate);
+
+ /* If we are doing an accurate seek, our initial shot will be 2s (2 being
+ a number plucked from the air) earlier than we want to end up. The loop below
+ will hopefully then step through to where we want to be.
*/
- if (!accurate) {
- _just_sought = true;
+
+ ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
+ ContentTime initial_seek = time - pre_roll;
+ if (initial_seek < ContentTime (0)) {
+ initial_seek = ContentTime (0);
}
-
- _video_position = frame;
-
- if (frame == 0 || !accurate) {
- /* We're already there, or we're as close as we need to be */
+
+ /* Initial seek time in the video stream's timebase */
+
+ seek_and_flush (initial_seek);
+
+ if (!accurate) {
+ /* That'll do */
return;
}
- while (1) {
- int r = av_read_frame (_format_context, &_packet);
- if (r < 0) {
- return;
- }
-
- if (_packet.stream_index != _video_stream) {
- av_free_packet (&_packet);
- continue;
- }
-
- int finished = 0;
- r = avcodec_decode_video2 (video_codec_context(), _frame, &finished, &_packet);
- if (r >= 0 && finished) {
- _video_position = rint (
- (av_frame_get_best_effort_timestamp (_frame) * time_base + _pts_offset) * _ffmpeg_content->video_frame_rate()
- );
+ int const N = minimal_run (boost::bind (&FFmpegDecoder::seek_overrun_finished, this, time, _1, _2));
- if (_video_position >= (frame - 1)) {
- av_free_packet (&_packet);
- break;
- }
- }
-
- av_free_packet (&_packet);
+ seek_and_flush (initial_seek);
+ if (N > 0) {
+ minimal_run (boost::bind (&FFmpegDecoder::seek_final_finished, this, N - 1, _3));
}
}
int frame_finished;
int const decode_result = avcodec_decode_audio4 (audio_codec_context(), _frame, &frame_finished, ©_packet);
+
if (decode_result < 0) {
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
- film->log()->log (String::compose ("avcodec_decode_audio4 failed (%1)", decode_result));
+ _log->log (String::compose ("avcodec_decode_audio4 failed (%1)", decode_result));
return;
}
if (frame_finished) {
-
- if (_audio_position == 0) {
- /* Where we are in the source, in seconds */
- double const pts = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame) + _pts_offset;
-
- if (pts > 0) {
- /* Emit some silence */
- shared_ptr<AudioBuffers> silence (
- new AudioBuffers (
- _ffmpeg_content->audio_channels(),
- pts * _ffmpeg_content->content_audio_frame_rate()
- )
- );
-
- silence->make_silent ();
- audio (silence, _audio_position);
- }
- }
+ ContentTime const ct = ContentTime::from_seconds (
+ av_frame_get_best_effort_timestamp (_frame) *
+ av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base))
+ + _pts_offset;
int const data_size = av_samples_get_buffer_size (
0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1
);
-
- audio (deinterleave_audio (_frame->data, data_size), _audio_position);
+
+ audio (deinterleave_audio (_frame->data, data_size), ct);
}
copy_packet.data += decode_result;
shared_ptr<FilterGraph> graph;
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
+ while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
++i;
}
if (i == _filter_graphs.end ()) {
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
-
- graph.reset (new FilterGraph (_ffmpeg_content, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+ graph.reset (new FilterGraph (_ffmpeg_content, dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
_filter_graphs.push_back (graph);
-
- film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+ _log->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
} else {
graph = *i;
}
shared_ptr<Image> image = i->first;
if (i->second != AV_NOPTS_VALUE) {
-
- double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset;
-
- if (_just_sought) {
- /* We just did a seek, so disable any attempts to correct for where we
- are / should be.
- */
- _video_position = rint (pts * _ffmpeg_content->video_frame_rate ());
- _just_sought = false;
- }
-
- double const next = _video_position / _ffmpeg_content->video_frame_rate();
- double const one_frame = 1 / _ffmpeg_content->video_frame_rate ();
- double delta = pts - next;
-
- while (delta > one_frame) {
- /* This PTS is more than one frame forward in time of where we think we should be; emit
- a black frame.
- */
-
- /* XXX: I think this should be a copy of the last frame... */
- boost::shared_ptr<Image> black (
- new Image (
- static_cast<AVPixelFormat> (_frame->format),
- libdcp::Size (video_codec_context()->width, video_codec_context()->height),
- true
- )
- );
-
- black->make_black ();
- video (shared_ptr<ImageProxy> (new RawImageProxy (image)), false, _video_position);
- delta -= one_frame;
- }
-
- if (delta > -one_frame) {
- /* This PTS is within a frame of being right; emit this (otherwise it will be dropped) */
- video (shared_ptr<ImageProxy> (new RawImageProxy (image)), false, _video_position);
- }
-
+ double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds ();
- video (image, rint (pts * _ffmpeg_content->video_frame_rate ()));
++ video (shared_ptr<ImageProxy> (new RawImageProxy (image)), rint (pts * _ffmpeg_content->video_frame_rate ()));
} else {
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
- film->log()->log ("Dropping frame without PTS");
+ _log->log ("Dropping frame without PTS");
}
}
}
}
-bool
-FFmpegDecoder::done () const
-{
- bool const vd = !_decode_video || (_video_position >= _ffmpeg_content->video_length());
- bool const ad = !_decode_audio || !_ffmpeg_content->audio_stream() || (_audio_position >= _ffmpeg_content->audio_length());
- return vd && ad;
-}
-
void
FFmpegDecoder::decode_subtitle_packet ()
{
indicate that the previous subtitle should stop.
*/
if (sub.num_rects <= 0) {
- subtitle (shared_ptr<Image> (), dcpomatic::Rect<double> (), 0, 0);
+ image_subtitle (ContentTime (), ContentTime (), shared_ptr<Image> (), dcpomatic::Rect<double> ());
return;
} else if (sub.num_rects > 1) {
throw DecodeError (_("multi-part subtitles not yet supported"));
}
- /* Subtitle PTS in seconds (within the source, not taking into account any of the
+ /* Subtitle PTS (within the source, not taking into account any of the
source that we may have chopped off for the DCP)
*/
- double const packet_time = (static_cast<double> (sub.pts ) / AV_TIME_BASE) + _pts_offset;
-
+ ContentTime packet_time = ContentTime::from_seconds (static_cast<double> (sub.pts) / AV_TIME_BASE) + _pts_offset;
+
/* hence start time for this sub */
- Time const from = (packet_time + (double (sub.start_display_time) / 1e3)) * TIME_HZ;
- Time const to = (packet_time + (double (sub.end_display_time) / 1e3)) * TIME_HZ;
+ ContentTime const from = packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3);
+ ContentTime const to = packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3);
AVSubtitleRect const * rect = sub.rects[0];
if (rect->type != SUBTITLE_BITMAP) {
- throw DecodeError (_("non-bitmap subtitles not yet supported"));
+ /* XXX */
+ // throw DecodeError (_("non-bitmap subtitles not yet supported"));
+ return;
}
/* Note RGBA is expressed little-endian, so the first byte in the word is R, second
G, third B, fourth A.
*/
- shared_ptr<Image> image (new Image (PIX_FMT_RGBA, libdcp::Size (rect->w, rect->h), true));
+ shared_ptr<Image> image (new Image (PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
/* Start of the first line in the subtitle */
uint8_t* sub_p = rect->pict.data[0];
out_p += image->stride()[0] / sizeof (uint32_t);
}
- libdcp::Size const vs = _ffmpeg_content->video_size ();
+ dcp::Size const vs = _ffmpeg_content->video_size ();
- subtitle (
+ image_subtitle (
+ from,
+ to,
image,
dcpomatic::Rect<double> (
static_cast<double> (rect->x) / vs.width,
static_cast<double> (rect->y) / vs.height,
static_cast<double> (rect->w) / vs.width,
static_cast<double> (rect->h) / vs.height
- ),
- from,
- to
+ )
);
-
avsubtitle_free (&sub);
}
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
*/
#include <iostream>
+ #include <openssl/md5.h>
extern "C" {
#include <libswscale/swscale.h>
#include <libavutil/pixfmt.h>
#include "image.h"
#include "exceptions.h"
#include "scaler.h"
+#include "timer.h"
+#include "rect.h"
#include "i18n.h"
using std::min;
using std::cout;
using std::cerr;
+using std::list;
+ using std::stringstream;
using boost::shared_ptr;
-using libdcp::Size;
+using dcp::Size;
int
Image::line_factor (int n) const
/** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size' */
shared_ptr<Image>
-Image::crop_scale_window (Crop crop, libdcp::Size inter_size, libdcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const
+Image::crop_scale_window (Crop crop, dcp::Size inter_size, dcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const
{
assert (scaler);
/* Empirical testing suggests that sws_scale() will crash if
out->make_black ();
/* Size of the image after any crop */
- libdcp::Size const cropped_size = crop.apply (size ());
+ dcp::Size const cropped_size = crop.apply (size ());
/* Scale context for a scale from cropped_size to inter_size */
struct SwsContext* scale_context = sws_getContext (
- cropped_size.width, cropped_size.height, pixel_format(),
- inter_size.width, inter_size.height, out_format,
- scaler->ffmpeg_id (), 0, 0, 0
+ cropped_size.width, cropped_size.height, pixel_format(),
+ inter_size.width, inter_size.height, out_format,
+ scaler->ffmpeg_id (), 0, 0, 0
);
if (!scale_context) {
}
shared_ptr<Image>
-Image::scale (libdcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const
+Image::scale (dcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const
{
assert (scaler);
/* Empirical testing suggests that sws_scale() will crash if
shared_ptr<Image>
Image::crop (Crop crop, bool aligned) const
{
- libdcp::Size cropped_size = crop.apply (size ());
+ dcp::Size cropped_size = crop.apply (size ());
shared_ptr<Image> out (new Image (pixel_format(), cropped_size, aligned));
for (int c = 0; c < components(); ++c) {
}
}
+void
+Image::make_transparent ()
+{
+ if (_pixel_format != PIX_FMT_RGBA) {
+ throw PixelFormatError ("make_transparent()", _pixel_format);
+ }
+
+ memset (data()[0], 0, lines(0) * stride()[0]);
+}
+
void
Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
{
- /* Only implemented for RGBA onto RGB24 so far */
- assert (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGBA);
+ int this_bpp = 0;
+ int other_bpp = 0;
+
+ if (_pixel_format == PIX_FMT_BGRA && other->pixel_format() == PIX_FMT_RGBA) {
+ this_bpp = 4;
+ other_bpp = 4;
+ } else if (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGBA) {
+ this_bpp = 3;
+ other_bpp = 4;
+ } else {
+ assert (false);
+ }
int start_tx = position.x;
int start_ox = 0;
}
for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
- uint8_t* tp = data()[0] + ty * stride()[0] + position.x * 3;
+ uint8_t* tp = data()[0] + ty * stride()[0] + position.x * this_bpp;
uint8_t* op = other->data()[0] + oy * other->stride()[0];
for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
float const alpha = float (op[3]) / 255;
tp[0] = (tp[0] * (1 - alpha)) + op[0] * alpha;
tp[1] = (tp[1] * (1 - alpha)) + op[1] * alpha;
tp[2] = (tp[2] * (1 - alpha)) + op[2] * alpha;
- tp += 3;
- op += 4;
+ tp += this_bpp;
+ op += other_bpp;
}
}
}
* @param p Pixel format.
* @param s Size in pixels.
*/
-Image::Image (AVPixelFormat p, libdcp::Size s, bool aligned)
- : libdcp::Image (s)
+Image::Image (AVPixelFormat p, dcp::Size s, bool aligned)
+ : dcp::Image (s)
, _pixel_format (p)
, _aligned (aligned)
{
}
Image::Image (Image const & other)
- : libdcp::Image (other)
+ : dcp::Image (other)
, _pixel_format (other._pixel_format)
, _aligned (other._aligned)
{
}
Image::Image (AVFrame* frame)
- : libdcp::Image (libdcp::Size (frame->width, frame->height))
+ : dcp::Image (dcp::Size (frame->width, frame->height))
, _pixel_format (static_cast<AVPixelFormat> (frame->format))
, _aligned (true)
{
}
Image::Image (shared_ptr<const Image> other, bool aligned)
- : libdcp::Image (other)
+ : dcp::Image (other)
, _pixel_format (other->_pixel_format)
, _aligned (aligned)
{
void
Image::swap (Image & other)
{
- libdcp::Image::swap (other);
+ dcp::Image::swap (other);
std::swap (_pixel_format, other._pixel_format);
return _stride;
}
-libdcp::Size
+dcp::Size
Image::size () const
{
return _size;
return _aligned;
}
-
+PositionImage
+merge (list<PositionImage> images)
+{
+ if (images.empty ()) {
+ return PositionImage ();
+ }
+
+ dcpomatic::Rect<int> all (images.front().position, images.front().image->size().width, images.front().image->size().height);
+ for (list<PositionImage>::const_iterator i = images.begin(); i != images.end(); ++i) {
+ all.extend (dcpomatic::Rect<int> (i->position, i->image->size().width, i->image->size().height));
+ }
+
+ shared_ptr<Image> merged (new Image (images.front().image->pixel_format (), dcp::Size (all.width, all.height), true));
+ merged->make_transparent ();
+ for (list<PositionImage>::const_iterator i = images.begin(); i != images.end(); ++i) {
+ merged->alpha_blend (i->image, i->position);
+ }
+
+ return PositionImage (merged, all.position ());
+}
++
+ string
+ Image::digest () const
+ {
+ MD5_CTX md5_context;
+ MD5_Init (&md5_context);
+
+ for (int i = 0; i < components(); ++i) {
+ MD5_Update (&md5_context, data()[i], line_size()[i]);
+ }
+
+ unsigned char digest[MD5_DIGEST_LENGTH];
+ MD5_Final (digest, &md5_context);
+
+ stringstream s;
+ for (int i = 0; i < MD5_DIGEST_LENGTH; ++i) {
+ s << std::hex << std::setfill('0') << std::setw(2) << ((int) digest[i]);
+ }
+
+ return s.str ();
+ }
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
}
-#include <libdcp/image.h>
+#include <dcp/image.h>
#include "util.h"
#include "position.h"
+#include "position_image.h"
class Scaler;
-class Image : public libdcp::Image
+class Image : public dcp::Image
{
public:
- Image (AVPixelFormat, libdcp::Size, bool);
+ Image (AVPixelFormat, dcp::Size, bool);
Image (AVFrame *);
Image (Image const &);
Image (boost::shared_ptr<const Image>, bool);
uint8_t ** data () const;
int * line_size () const;
int * stride () const;
- libdcp::Size size () const;
+ dcp::Size size () const;
bool aligned () const;
int components () const;
int line_factor (int) const;
int lines (int) const;
- boost::shared_ptr<Image> scale (libdcp::Size, Scaler const *, AVPixelFormat, bool aligned) const;
+ boost::shared_ptr<Image> scale (dcp::Size, Scaler const *, AVPixelFormat, bool aligned) const;
boost::shared_ptr<Image> crop (Crop c, bool aligned) const;
- boost::shared_ptr<Image> crop_scale_window (Crop c, libdcp::Size, libdcp::Size, Scaler const *, AVPixelFormat, bool aligned) const;
+ boost::shared_ptr<Image> crop_scale_window (Crop c, dcp::Size, dcp::Size, Scaler const *, AVPixelFormat, bool aligned) const;
void make_black ();
+ void make_transparent ();
void alpha_blend (boost::shared_ptr<const Image> image, Position<int> pos);
void copy (boost::shared_ptr<const Image> image, Position<int> pos);
return _pixel_format;
}
+ std::string digest () const;
+
private:
friend class pixel_formats_test;
bool _aligned;
};
+extern PositionImage merge (std::list<PositionImage> images);
+
#endif
#include "image_content.h"
#include "image_decoder.h"
#include "image.h"
+ #include "image_proxy.h"
#include "film.h"
#include "exceptions.h"
using std::cout;
using boost::shared_ptr;
-using libdcp::Size;
+using dcp::Size;
-ImageDecoder::ImageDecoder (shared_ptr<const Film> f, shared_ptr<const ImageContent> c)
- : Decoder (f)
- , VideoDecoder (f, c)
+ImageDecoder::ImageDecoder (shared_ptr<const ImageContent> c)
+ : VideoDecoder (c)
, _image_content (c)
{
}
-void
+bool
ImageDecoder::pass ()
{
- if (_video_position >= _image_content->video_length ()) {
- return;
+ if (_video_position >= _image_content->video_length().frames (_image_content->video_frame_rate ())) {
+ return true;
}
-- if (_image && _image_content->still ()) {
- video (_image, _video_position);
- ++_video_position;
- return false;
- video (_image, true, _video_position);
- return;
++ if (!_image_content->still() || !_image) {
++ /* Either we need an image or we are using moving images, so load one */
++ _image.reset (new MagickImageProxy (_image_content->path (_image_content->still() ? 0 : _video_position)));
}
-
- Magick::Image* magick_image = 0;
-
- boost::filesystem::path const path = _image_content->path (_image_content->still() ? 0 : _video_position);
-
- try {
- magick_image = new Magick::Image (path.string ());
- } catch (...) {
- throw OpenFileError (path);
- }
-
- dcp::Size size (magick_image->columns(), magick_image->rows());
-
- _image.reset (new Image (PIX_FMT_RGB24, size, true));
-
- using namespace MagickCore;
-
- uint8_t* p = _image->data()[0];
- for (int y = 0; y < size.height; ++y) {
- uint8_t* q = p;
- for (int x = 0; x < size.width; ++x) {
- Magick::Color c = magick_image->pixelColor (x, y);
- *q++ = c.redQuantum() * 255 / QuantumRange;
- *q++ = c.greenQuantum() * 255 / QuantumRange;
- *q++ = c.blueQuantum() * 255 / QuantumRange;
- }
- p += _image->stride()[0];
- }
-
- delete magick_image;
--
- _image.reset (new MagickImageProxy (_image_content->path (_image_content->still() ? 0 : _video_position)));
- video (_image, false, _video_position);
++
+ video (_image, _video_position);
+ ++_video_position;
-
+ return false;
}
void
-ImageDecoder::seek (VideoContent::Frame frame, bool)
-{
- _video_position = frame;
-}
-
-bool
-ImageDecoder::done () const
+ImageDecoder::seek (ContentTime time, bool accurate)
{
- return _video_position >= _image_content->video_length ();
+ VideoDecoder::seek (time, accurate);
+ _video_position = time.frames (_image_content->video_frame_rate ());
}
class ImageDecoder : public VideoDecoder
{
public:
- ImageDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const ImageContent>);
+ ImageDecoder (boost::shared_ptr<const ImageContent> c);
boost::shared_ptr<const ImageContent> content () {
return _image_content;
}
- /* Decoder */
-
- void pass ();
- void seek (VideoContent::Frame, bool);
- bool done () const;
+ void seek (ContentTime, bool);
private:
+ bool pass ();
+
boost::shared_ptr<const ImageContent> _image_content;
- boost::shared_ptr<Image> _image;
+ boost::shared_ptr<ImageProxy> _image;
+ VideoFrame _video_position;
};
--- /dev/null
-#include <libdcp/util.h>
-#include <libdcp/raw_convert.h>
+ /*
+ Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+ #include <Magick++.h>
- libdcp::Size size (
++#include <dcp/util.h>
++#include <dcp/raw_convert.h>
+ #include "image_proxy.h"
+ #include "image.h"
+ #include "exceptions.h"
+ #include "cross.h"
+
+ #include "i18n.h"
+
+ using std::cout;
+ using std::string;
+ using std::stringstream;
+ using boost::shared_ptr;
+
+ RawImageProxy::RawImageProxy (shared_ptr<Image> image)
+ : _image (image)
+ {
+
+ }
+
+ RawImageProxy::RawImageProxy (shared_ptr<cxml::Node> xml, shared_ptr<Socket> socket)
+ {
- node->add_child("Width")->add_child_text (libdcp::raw_convert<string> (_image->size().width));
- node->add_child("Height")->add_child_text (libdcp::raw_convert<string> (_image->size().height));
++ dcp::Size size (
+ xml->number_child<int> ("Width"), xml->number_child<int> ("Height")
+ );
+
+ _image.reset (new Image (PIX_FMT_RGB24, size, true));
+ _image->read_from_socket (socket);
+ }
+
+ shared_ptr<Image>
+ RawImageProxy::image () const
+ {
+ return _image;
+ }
+
+ void
+ RawImageProxy::add_metadata (xmlpp::Node* node) const
+ {
+ node->add_child("Type")->add_child_text (N_("Raw"));
- libdcp::Size size (magick_image->columns(), magick_image->rows());
++ node->add_child("Width")->add_child_text (dcp::raw_convert<string> (_image->size().width));
++ node->add_child("Height")->add_child_text (dcp::raw_convert<string> (_image->size().height));
+ }
+
+ void
+ RawImageProxy::send_binary (shared_ptr<Socket> socket) const
+ {
+ _image->write_to_socket (socket);
+ }
+
+ MagickImageProxy::MagickImageProxy (boost::filesystem::path path)
+ {
+ /* Read the file into a Blob */
+
+ boost::uintmax_t const size = boost::filesystem::file_size (path);
+ FILE* f = fopen_boost (path, "rb");
+ if (!f) {
+ throw OpenFileError (path);
+ }
+
+ uint8_t* data = new uint8_t[size];
+ if (fread (data, 1, size, f) != size) {
+ delete[] data;
+ throw ReadFileError (path);
+ }
+
+ fclose (f);
+ _blob.update (data, size);
+ delete[] data;
+ }
+
+ MagickImageProxy::MagickImageProxy (shared_ptr<cxml::Node>, shared_ptr<Socket> socket)
+ {
+ uint32_t const size = socket->read_uint32 ();
+ uint8_t* data = new uint8_t[size];
+ socket->read (data, size);
+ _blob.update (data, size);
+ delete[] data;
+ }
+
+ shared_ptr<Image>
+ MagickImageProxy::image () const
+ {
+ if (_image) {
+ return _image;
+ }
+
+ Magick::Image* magick_image = 0;
+ try {
+ magick_image = new Magick::Image (_blob);
+ } catch (...) {
+ throw DecodeError (_("Could not decode image file"));
+ }
+
++ dcp::Size size (magick_image->columns(), magick_image->rows());
+
+ _image.reset (new Image (PIX_FMT_RGB24, size, true));
+
+ using namespace MagickCore;
+
+ uint8_t* p = _image->data()[0];
+ for (int y = 0; y < size.height; ++y) {
+ uint8_t* q = p;
+ for (int x = 0; x < size.width; ++x) {
+ Magick::Color c = magick_image->pixelColor (x, y);
+ *q++ = c.redQuantum() * 255 / QuantumRange;
+ *q++ = c.greenQuantum() * 255 / QuantumRange;
+ *q++ = c.blueQuantum() * 255 / QuantumRange;
+ }
+ p += _image->stride()[0];
+ }
+
+ delete magick_image;
+
+ return _image;
+ }
+
+ void
+ MagickImageProxy::add_metadata (xmlpp::Node* node) const
+ {
+ node->add_child("Type")->add_child_text (N_("Magick"));
+ }
+
+ void
+ MagickImageProxy::send_binary (shared_ptr<Socket> socket) const
+ {
+ socket->write (_blob.length ());
+ socket->write ((uint8_t *) _blob.data (), _blob.length ());
+ }
+
+ shared_ptr<ImageProxy>
+ image_proxy_factory (shared_ptr<cxml::Node> xml, shared_ptr<Socket> socket)
+ {
+ if (xml->string_child("Type") == N_("Raw")) {
+ return shared_ptr<ImageProxy> (new RawImageProxy (xml, socket));
+ } else if (xml->string_child("Type") == N_("Magick")) {
+ return shared_ptr<MagickImageProxy> (new MagickImageProxy (xml, socket));
+ }
+
+ throw NetworkError (_("Unexpected image type received by server"));
+ }
*/
#include <stdint.h>
+#include <algorithm>
#include "player.h"
#include "film.h"
#include "ffmpeg_decoder.h"
+#include "audio_buffers.h"
#include "ffmpeg_content.h"
#include "image_decoder.h"
#include "image_content.h"
#include "sndfile_decoder.h"
#include "sndfile_content.h"
#include "subtitle_content.h"
+#include "subrip_decoder.h"
+#include "subrip_content.h"
#include "playlist.h"
#include "job.h"
#include "image.h"
+ #include "image_proxy.h"
#include "ratio.h"
-#include "resampler.h"
#include "log.h"
#include "scaler.h"
- #include "dcp_video.h"
+#include "render_subtitles.h"
+#include "config.h"
+#include "content_video.h"
+ #include "player_video_frame.h"
using std::list;
using std::cout;
using std::min;
using std::max;
+using std::min;
using std::vector;
using std::pair;
using std::map;
+using std::make_pair;
using boost::shared_ptr;
using boost::weak_ptr;
using boost::dynamic_pointer_cast;
+using boost::optional;
Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
: _film (f)
, _playlist (p)
- , _video (true)
- , _audio (true)
, _have_valid_pieces (false)
- , _video_position (0)
- , _audio_position (0)
- , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
- , _last_emit_was_black (false)
+ , _approximate_size (false)
+ , _burn_subtitles (false)
{
_playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
_playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
}
void
-Player::disable_video ()
-{
- _video = false;
-}
-
-void
-Player::disable_audio ()
+Player::setup_pieces ()
{
- _audio = false;
-}
+ list<shared_ptr<Piece> > old_pieces = _pieces;
+ _pieces.clear ();
-bool
-Player::pass ()
-{
- if (!_have_valid_pieces) {
- setup_pieces ();
- }
+ ContentList content = _playlist->content ();
- Time earliest_t = TIME_MAX;
- shared_ptr<Piece> earliest;
- enum {
- VIDEO,
- AUDIO
- } type = VIDEO;
+ for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
- for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
- if ((*i)->decoder->done ()) {
+ if (!(*i)->paths_valid ()) {
continue;
}
-
- shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> ((*i)->decoder);
- shared_ptr<AudioDecoder> ad = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
-
- if (_video && vd) {
- if ((*i)->video_position < earliest_t) {
- earliest_t = (*i)->video_position;
- earliest = *i;
- type = VIDEO;
+
+ shared_ptr<Decoder> decoder;
+ optional<FrameRateChange> frc;
+
+ /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
+ DCPTime best_overlap_t;
+ shared_ptr<VideoContent> best_overlap;
+ for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
+ shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
+ if (!vc) {
+ continue;
}
- }
-
- if (_audio && ad && ad->has_audio ()) {
- if ((*i)->audio_position < earliest_t) {
- earliest_t = (*i)->audio_position;
- earliest = *i;
- type = AUDIO;
+
+ DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
+ if (overlap > best_overlap_t) {
+ best_overlap = vc;
+ best_overlap_t = overlap;
}
}
- }
- if (!earliest) {
- flush ();
- return true;
- }
-
- switch (type) {
- case VIDEO:
- if (earliest_t > _video_position) {
- emit_black ();
+ optional<FrameRateChange> best_overlap_frc;
+ if (best_overlap) {
+ best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
} else {
- if (earliest->repeating ()) {
- earliest->repeat (this);
- } else {
- earliest->decoder->pass ();
- }
+ /* No video overlap; e.g. if the DCP is just audio */
+ best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
}
- break;
- case AUDIO:
- if (earliest_t > _audio_position) {
- emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
- } else {
- earliest->decoder->pass ();
-
- if (earliest->decoder->done()) {
- shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
- assert (ac);
- shared_ptr<Resampler> re = resampler (ac, false);
- if (re) {
- shared_ptr<const AudioBuffers> b = re->flush ();
- if (b->frames ()) {
- process_audio (earliest, b, ac->audio_length ());
- }
- }
- }
+ /* FFmpeg */
+ shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
+ if (fc) {
+ decoder.reset (new FFmpegDecoder (fc, _film->log()));
+ frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
}
- break;
- }
- if (_audio) {
- boost::optional<Time> audio_done_up_to;
- for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
- if ((*i)->decoder->done ()) {
- continue;
+ /* ImageContent */
+ shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
+ if (ic) {
+ /* See if we can re-use an old ImageDecoder */
+ for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
+ shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
+ if (imd && imd->content() == ic) {
+ decoder = imd;
+ }
}
- shared_ptr<AudioDecoder> ad = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
- if (ad && ad->has_audio ()) {
- audio_done_up_to = min (audio_done_up_to.get_value_or (TIME_MAX), (*i)->audio_position);
+ if (!decoder) {
+ decoder.reset (new ImageDecoder (ic));
}
+
+ frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
}
- if (audio_done_up_to) {
- TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to.get ());
- Audio (tb.audio, tb.time);
- _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
+ /* SndfileContent */
+ shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
+ if (sc) {
+ decoder.reset (new SndfileDecoder (sc));
+ frc = best_overlap_frc;
}
+
+ /* SubRipContent */
+ shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
+ if (rc) {
+ decoder.reset (new SubRipDecoder (rc));
+ frc = best_overlap_frc;
+ }
+
+ _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
}
-
- return false;
+
+ _have_valid_pieces = true;
}
-/** @param extra Amount of extra time to add to the content frame's time (for repeat) */
void
-Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const ImageProxy> image, Eyes eyes, Part part, bool same, VideoContent::Frame frame, Time extra)
+Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
{
- /* Keep a note of what came in so that we can repeat it if required */
- _last_incoming_video.weak_piece = weak_piece;
- _last_incoming_video.image = image;
- _last_incoming_video.eyes = eyes;
- _last_incoming_video.part = part;
- _last_incoming_video.same = same;
- _last_incoming_video.frame = frame;
- _last_incoming_video.extra = extra;
-
- shared_ptr<Piece> piece = weak_piece.lock ();
- if (!piece) {
- return;
- }
-
- shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
- assert (content);
-
- FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
- if (frc.skip && (frame % 2) == 1) {
- return;
- }
-
- Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
- if (content->trimmed (relative_time)) {
+ shared_ptr<Content> c = w.lock ();
+ if (!c) {
return;
}
- Time const time = content->position() + relative_time + extra - content->trim_start ();
- libdcp::Size const image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
-
- shared_ptr<PlayerVideoFrame> pi (
- new PlayerVideoFrame (
- image,
- content->crop(),
- image_size,
- _video_container_size,
- _film->scaler(),
- eyes,
- part,
- content->colour_conversion()
- )
- );
-
- if (_film->with_subtitles ()) {
- for (list<Subtitle>::const_iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
- if (i->covers (time)) {
- /* This may be true for more than one of _subtitles, but the last (latest-starting)
- one is the one we want to use, so that's ok.
- */
- Position<int> const container_offset (
- (_video_container_size.width - image_size.width) / 2,
- (_video_container_size.height - image_size.width) / 2
- );
-
- pi->set_subtitle (i->out_image(), i->out_position() + container_offset);
- }
- }
- }
-
- /* Clear out old subtitles */
- for (list<Subtitle>::iterator i = _subtitles.begin(); i != _subtitles.end(); ) {
- list<Subtitle>::iterator j = i;
- ++j;
+ if (
+ property == ContentProperty::POSITION ||
+ property == ContentProperty::LENGTH ||
+ property == ContentProperty::TRIM_START ||
+ property == ContentProperty::TRIM_END ||
+ property == ContentProperty::PATH ||
+ property == VideoContentProperty::VIDEO_FRAME_TYPE
+ ) {
- if (i->ends_before (time)) {
- _subtitles.erase (i);
- }
-
- i = j;
- }
-
-#ifdef DCPOMATIC_DEBUG
- _last_video = piece->content;
-#endif
-
- Video (pi, same, time);
-
- _last_emit_was_black = false;
- _video_position = piece->video_position = (time + TIME_HZ / _film->video_frame_rate());
+ _have_valid_pieces = false;
+ Changed (frequent);
- if (frc.repeat > 1 && !piece->repeating ()) {
- piece->set_repeat (_last_incoming_video, frc.repeat - 1);
+ } else if (
+ property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
+ property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
+ property == SubtitleContentProperty::SUBTITLE_SCALE ||
+ property == VideoContentProperty::VIDEO_CROP ||
+ property == VideoContentProperty::VIDEO_SCALE ||
+ property == VideoContentProperty::VIDEO_FRAME_RATE
+ ) {
+
+ Changed (frequent);
}
}
void
-Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
+Player::playlist_changed ()
{
- shared_ptr<Piece> piece = weak_piece.lock ();
- if (!piece) {
- return;
- }
+ _have_valid_pieces = false;
+ Changed (false);
+}
- shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
- assert (content);
+void
+Player::set_video_container_size (dcp::Size s)
+{
+ _video_container_size = s;
- /* Gain */
- if (content->audio_gain() != 0) {
- shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
- gain->apply_gain (content->audio_gain ());
- audio = gain;
- }
+ _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
+ _black_image->make_black ();
+}
- /* Resample */
- if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
- shared_ptr<Resampler> r = resampler (content, true);
- pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
- audio = ro.first;
- frame = ro.second;
- }
-
- Time const relative_time = _film->audio_frames_to_time (frame);
+void
+Player::film_changed (Film::Property p)
+{
+ /* Here we should notice Film properties that affect our output, and
+ alert listeners that our output now would be different to how it was
+ last time we were run.
+ */
- if (content->trimmed (relative_time)) {
- return;
+ if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
+ Changed (false);
}
+}
- Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time - content->trim_start ();
+list<PositionImage>
+Player::process_content_image_subtitles (shared_ptr<SubtitleContent> content, list<shared_ptr<ContentImageSubtitle> > subs) const
+{
+ list<PositionImage> all;
- /* Remap channels */
- shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
- dcp_mapped->make_silent ();
-
- AudioMapping map = content->audio_mapping ();
- for (int i = 0; i < map.content_channels(); ++i) {
- for (int j = 0; j < _film->audio_channels(); ++j) {
- if (map.get (i, static_cast<libdcp::Channel> (j)) > 0) {
- dcp_mapped->accumulate_channel (
- audio.get(),
- i,
- static_cast<libdcp::Channel> (j),
- map.get (i, static_cast<libdcp::Channel> (j))
- );
- }
+ for (list<shared_ptr<ContentImageSubtitle> >::const_iterator i = subs.begin(); i != subs.end(); ++i) {
+ if (!(*i)->image) {
+ continue;
}
+
+ dcpomatic::Rect<double> in_rect = (*i)->rectangle;
+ dcp::Size scaled_size;
+
+ in_rect.x += content->subtitle_x_offset ();
+ in_rect.y += content->subtitle_y_offset ();
+
+ /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
+ scaled_size.width = in_rect.width * _video_container_size.width * content->subtitle_scale ();
+ scaled_size.height = in_rect.height * _video_container_size.height * content->subtitle_scale ();
+
+ /* Then we need a corrective translation, consisting of two parts:
+ *
+ * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
+ * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
+ *
+ * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
+ * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
+ * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
+ *
+ * Combining these two translations gives these expressions.
+ */
+
+ all.push_back (
+ PositionImage (
+ (*i)->image->scale (
+ scaled_size,
+ Scaler::from_id ("bicubic"),
+ (*i)->image->pixel_format (),
+ true
+ ),
+ Position<int> (
+ rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - content->subtitle_scale ()) / 2))),
+ rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - content->subtitle_scale ()) / 2)))
+ )
+ )
+ );
}
- audio = dcp_mapped;
+ return all;
+}
- /* We must cut off anything that comes before the start of all time */
- if (time < 0) {
- int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
- if (frames >= audio->frames ()) {
- return;
+list<PositionImage>
+Player::process_content_text_subtitles (list<shared_ptr<ContentTextSubtitle> > sub) const
+{
+ list<PositionImage> all;
+ for (list<shared_ptr<ContentTextSubtitle> >::const_iterator i = sub.begin(); i != sub.end(); ++i) {
+ if (!(*i)->subs.empty ()) {
+ all.push_back (render_subtitles ((*i)->subs, _video_container_size));
}
-
- shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
- trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
-
- audio = trimmed;
- time = 0;
}
- _audio_merger.push (audio, time);
- piece->audio_position += _film->audio_frames_to_time (audio->frames ());
+ return all;
}
void
-Player::flush ()
+Player::set_approximate_size ()
{
- TimedAudioBuffers<Time> tb = _audio_merger.flush ();
- if (_audio && tb.audio) {
- Audio (tb.audio, tb.time);
- _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
- }
+ _approximate_size = true;
+}
- shared_ptr<DCPVideo>
- Player::black_dcp_video (DCPTime time) const
- while (_video && _video_position < _audio_position) {
- emit_black ();
- }
++shared_ptr<PlayerVideoFrame>
++Player::black_player_video_frame () const
+{
- return shared_ptr<DCPVideo> (
- new DCPVideo (
- _black_image,
- EYES_BOTH,
++ return shared_ptr<PlayerVideoFrame> (
++ new PlayerVideoFrame (
++ shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
+ Crop (),
+ _video_container_size,
+ _video_container_size,
+ Scaler::from_id ("bicubic"),
- Config::instance()->colour_conversions().front().conversion,
- time
++ EYES_BOTH,
++ PART_WHOLE,
++ Config::instance()->colour_conversions().front().conversion
+ )
+ );
+}
- shared_ptr<DCPVideo>
- Player::content_to_dcp (
- while (_audio && _audio_position < _video_position) {
- emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
++shared_ptr<PlayerVideoFrame>
++Player::content_to_player_video_frame (
+ shared_ptr<VideoContent> content,
+ ContentVideo content_video,
+ list<shared_ptr<Piece> > subs,
+ DCPTime time,
+ dcp::Size image_size) const
+{
- shared_ptr<DCPVideo> dcp_video (
- new DCPVideo (
++ shared_ptr<PlayerVideoFrame> pvf (
++ new PlayerVideoFrame (
+ content_video.image,
- content_video.eyes,
+ content->crop (),
+ image_size,
+ _video_container_size,
+ _film->scaler(),
- content->colour_conversion (),
- time
++ content_video.eyes,
++ content_video.part,
++ content->colour_conversion ()
+ )
+ );
+
+
+ /* Add subtitles */
+
+ list<PositionImage> sub_images;
+
+ for (list<shared_ptr<Piece> >::const_iterator i = subs.begin(); i != subs.end(); ++i) {
+ shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*i)->decoder);
+ shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*i)->content);
+ ContentTime const from = dcp_to_content_subtitle (*i, time);
+ ContentTime const to = from + ContentTime::from_frames (1, content->video_frame_rate ());
+
+ list<shared_ptr<ContentImageSubtitle> > image_subtitles = subtitle_decoder->get_image_subtitles (from, to);
+ if (!image_subtitles.empty ()) {
+ list<PositionImage> im = process_content_image_subtitles (
+ subtitle_content,
+ image_subtitles
+ );
+
+ copy (im.begin(), im.end(), back_inserter (sub_images));
+ }
+
+ if (_burn_subtitles) {
+ list<shared_ptr<ContentTextSubtitle> > text_subtitles = subtitle_decoder->get_text_subtitles (from, to);
+ if (!text_subtitles.empty ()) {
+ list<PositionImage> im = process_content_text_subtitles (text_subtitles);
+ copy (im.begin(), im.end(), back_inserter (sub_images));
+ }
+ }
}
- dcp_video->set_subtitle (merge (sub_images));
+ if (!sub_images.empty ()) {
- return dcp_video;
++ pvf->set_subtitle (merge (sub_images));
+ }
+
++ return pvf;
}
- /** @return All DCPVideo at the given time (there may be two frames for 3D) */
- list<shared_ptr<DCPVideo> >
-/** Seek so that the next pass() will yield (approximately) the requested frame.
- * Pass accurate = true to try harder to get close to the request.
- * @return true on error
- */
-void
-Player::seek (Time t, bool accurate)
++/** @return All PlayerVideoFrames at the given time (there may be two frames for 3D) */
++list<shared_ptr<PlayerVideoFrame> >
+Player::get_video (DCPTime time, bool accurate)
{
if (!_have_valid_pieces) {
setup_pieces ();
}
+
+ list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
+ time,
+ time + DCPTime::from_frames (1, _film->video_frame_rate ())
+ );
- list<shared_ptr<DCPVideo> > dcp_video;
- if (_pieces.empty ()) {
- return;
++ list<shared_ptr<PlayerVideoFrame> > pvf;
+
+ if (ov.empty ()) {
+ /* No video content at this time */
- dcp_video.push_back (black_dcp_video (time));
- return dcp_video;
++ pvf.push_back (black_player_video_frame ());
++ return pvf;
}
- /* Create a DCPVideo from the content's video at this time */
- for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
- shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
- if (!vc) {
- continue;
- }
-
- /* s is the offset of t from the start position of this content */
- Time s = t - vc->position ();
- s = max (static_cast<Time> (0), s);
- s = min (vc->length_after_trim(), s);
++ /* Create a PlayerVideoFrame from the content's video at this time */
- /* Hence set the piece positions to the `global' time */
- (*i)->video_position = (*i)->audio_position = vc->position() + s;
-
- /* And seek the decoder */
- dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (
- vc->time_to_content_video_frames (s + vc->trim_start ()), accurate
- );
+ shared_ptr<Piece> piece = ov.back ();
+ shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
+ assert (decoder);
+ shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
+ assert (content);
- (*i)->reset_repeat ();
+ list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
+ if (content_video.empty ()) {
- dcp_video.push_back (black_dcp_video (time));
- return dcp_video;
++ pvf.push_back (black_player_video_frame ());
++ return pvf;
}
- _video_position = _audio_position = t;
+ dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
+ if (_approximate_size) {
+ image_size.width &= ~3;
+ image_size.height &= ~3;
+ }
- /* XXX: don't seek audio because we don't need to... */
+ for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
+ list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (
+ time,
+ time + DCPTime::from_frames (1, _film->video_frame_rate ())
+ );
+
- dcp_video.push_back (content_to_dcp (content, *i, subs, time, image_size));
++ pvf.push_back (content_to_player_video_frame (content, *i, subs, time, image_size));
+ }
+
- return dcp_video;
++ return pvf;
}
-void
-Player::setup_pieces ()
+shared_ptr<AudioBuffers>
+Player::get_audio (DCPTime time, DCPTime length, bool accurate)
{
- list<shared_ptr<Piece> > old_pieces = _pieces;
+ if (!_have_valid_pieces) {
+ setup_pieces ();
+ }
- _pieces.clear ();
+ AudioFrame const length_frames = length.frames (_film->audio_frame_rate ());
- ContentList content = _playlist->content ();
- sort (content.begin(), content.end(), ContentSorter ());
+ shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
+ audio->make_silent ();
+
+ list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
+ if (ov.empty ()) {
+ return audio;
+ }
- for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
+ for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
- if (!(*i)->paths_valid ()) {
+ shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
+ assert (content);
+ shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
+ assert (decoder);
+
+ if (content->audio_frame_rate() == 0) {
+ /* This AudioContent has no audio (e.g. if it is an FFmpegContent with no
+ * audio stream).
+ */
continue;
}
- shared_ptr<Piece> piece (new Piece (*i));
-
- /* XXX: into content? */
-
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
- if (fc) {
- shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
-
- fd->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, _5, 0));
- fd->Audio.connect (bind (&Player::process_audio, this, weak_ptr<Piece> (piece), _1, _2));
- fd->Subtitle.connect (bind (&Player::process_subtitle, this, weak_ptr<Piece> (piece), _1, _2, _3, _4));
-
- fd->seek (fc->time_to_content_video_frames (fc->trim_start ()), true);
- piece->decoder = fd;
+ /* The time that we should request from the content */
+ DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
+ DCPTime offset;
+ if (request < DCPTime ()) {
+ /* We went off the start of the content, so we will need to offset
+ the stuff we get back.
+ */
+ offset = -request;
+ request = DCPTime ();
}
-
- shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
- if (ic) {
- bool reusing = false;
-
- /* See if we can re-use an old ImageDecoder */
- for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
- shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
- if (imd && imd->content() == ic) {
- piece = *j;
- reusing = true;
- }
- }
- if (!reusing) {
- shared_ptr<ImageDecoder> id (new ImageDecoder (_film, ic));
- id->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, _5, 0));
- piece->decoder = id;
- }
- }
+ AudioFrame const content_frame = dcp_to_content_audio (*i, request);
- shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
- if (sc) {
- shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
- sd->Audio.connect (bind (&Player::process_audio, this, weak_ptr<Piece> (piece), _1, _2));
+ /* Audio from this piece's decoder (which might be more or less than what we asked for) */
+ shared_ptr<ContentAudio> all = decoder->get_audio (content_frame, length_frames, accurate);
- piece->decoder = sd;
+ /* Gain */
+ if (content->audio_gain() != 0) {
+ shared_ptr<AudioBuffers> gain (new AudioBuffers (all->audio));
+ gain->apply_gain (content->audio_gain ());
+ all->audio = gain;
}
- _pieces.push_back (piece);
- }
-
- _have_valid_pieces = true;
-}
-
-void
-Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
-{
- shared_ptr<Content> c = w.lock ();
- if (!c) {
- return;
- }
-
- if (
- property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
- property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END ||
- property == VideoContentProperty::VIDEO_FRAME_TYPE
- ) {
-
- _have_valid_pieces = false;
- Changed (frequent);
-
- } else if (
- property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
- property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
- property == SubtitleContentProperty::SUBTITLE_SCALE
- ) {
-
- for (list<Subtitle>::iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
- i->update (_film, _video_container_size);
+ /* Remap channels */
+ shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all->audio->frames()));
+ dcp_mapped->make_silent ();
+ AudioMapping map = content->audio_mapping ();
+ for (int i = 0; i < map.content_channels(); ++i) {
+ for (int j = 0; j < _film->audio_channels(); ++j) {
+ if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
+ dcp_mapped->accumulate_channel (
+ all->audio.get(),
+ i,
+ j,
+ map.get (i, static_cast<dcp::Channel> (j))
+ );
+ }
+ }
}
- Changed (frequent);
+ all->audio = dcp_mapped;
- } else if (
- property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_SCALE ||
- property == VideoContentProperty::VIDEO_FRAME_RATE
- ) {
-
- Changed (frequent);
-
- } else if (property == ContentProperty::PATH) {
-
- _have_valid_pieces = false;
- Changed (frequent);
+ audio->accumulate_frames (
+ all->audio.get(),
+ content_frame - all->frame,
+ offset.frames (_film->audio_frame_rate()),
+ min (AudioFrame (all->audio->frames()), length_frames) - offset.frames (_film->audio_frame_rate ())
+ );
}
-}
-
-void
-Player::playlist_changed ()
-{
- _have_valid_pieces = false;
- Changed (false);
-}
-void
-Player::set_video_container_size (libdcp::Size s)
-{
- _video_container_size = s;
-
- shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
- im->make_black ();
-
- _black_frame.reset (
- new PlayerVideoFrame (
- shared_ptr<ImageProxy> (new RawImageProxy (im)),
- Crop(),
- _video_container_size,
- _video_container_size,
- Scaler::from_id ("bicubic"),
- EYES_BOTH,
- PART_WHOLE,
- ColourConversion ()
- )
- );
+ return audio;
}
-shared_ptr<Resampler>
-Player::resampler (shared_ptr<AudioContent> c, bool create)
+VideoFrame
+Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
{
- map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
- if (i != _resamplers.end ()) {
- return i->second;
- }
+ /* s is the offset of t from the start position of this content */
+ DCPTime s = t - piece->content->position ();
+ s = DCPTime (max (int64_t (0), s.get ()));
+ s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
- if (!create) {
- return shared_ptr<Resampler> ();
- }
-
- _film->log()->log (
- String::compose (
- "Creating new resampler for %1 to %2 with %3 channels", c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()
- )
- );
-
- shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
- _resamplers[c] = r;
- return r;
+ /* Convert this to the content frame */
+ return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) * piece->frc.factor ();
}
-void
-Player::emit_black ()
+AudioFrame
+Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
-#ifdef DCPOMATIC_DEBUG
- _last_video.reset ();
-#endif
+ /* s is the offset of t from the start position of this content */
+ DCPTime s = t - piece->content->position ();
+ s = DCPTime (max (int64_t (0), s.get ()));
+ s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
- Video (_black_frame, _last_emit_was_black, _video_position);
- _video_position += _film->video_frames_to_time (1);
- _last_emit_was_black = true;
+ /* Convert this to the content frame */
+ return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate());
}
-void
-Player::emit_silence (OutputAudioFrame most)
+ContentTime
+Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
{
- if (most == 0) {
- return;
- }
-
- OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
- shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
- silence->make_silent ();
- Audio (silence, _audio_position);
- _audio_position += _film->audio_frames_to_time (N);
-}
-
-void
-Player::film_changed (Film::Property p)
-{
- /* Here we should notice Film properties that affect our output, and
- alert listeners that our output now would be different to how it was
- last time we were run.
- */
+ /* s is the offset of t from the start position of this content */
+ DCPTime s = t - piece->content->position ();
+ s = DCPTime (max (int64_t (0), s.get ()));
+ s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
- if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
- Changed (false);
- }
+ return ContentTime (s, piece->frc);
}
void
-Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
+PlayerStatistics::dump (shared_ptr<Log> log) const
{
- if (!image) {
- /* A null image means that we should stop any current subtitles at `from' */
- for (list<Subtitle>::iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
- i->set_stop (from);
- }
- } else {
- _subtitles.push_back (Subtitle (_film, _video_container_size, weak_piece, image, rect, from, to));
- }
+ log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
+ log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()));
}
-/** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
- * @return false if this could not be done.
- */
-bool
-Player::repeat_last_video ()
+PlayerStatistics const &
+Player::statistics () const
{
- if (!_last_incoming_video.image || !_have_valid_pieces) {
- return false;
- }
-
- process_video (
- _last_incoming_video.weak_piece,
- _last_incoming_video.image,
- _last_incoming_video.eyes,
- _last_incoming_video.part,
- _last_incoming_video.same,
- _last_incoming_video.frame,
- _last_incoming_video.extra
- );
-
- return true;
+ return _statistics;
}
#include "content.h"
#include "film.h"
#include "rect.h"
-#include "audio_merger.h"
#include "audio_content.h"
+#include "dcpomatic_time.h"
+#include "content_subtitle.h"
+#include "position_image.h"
#include "piece.h"
-#include "subtitle.h"
+#include "content_video.h"
class Job;
class Film;
class AudioContent;
class Piece;
class Image;
- class DCPVideo;
+class Decoder;
-
+ class Resampler;
+ class PlayerVideoFrame;
+ class ImageProxy;
+
+class PlayerStatistics
+{
+public:
+ struct Video {
+ Video ()
+ : black (0)
+ , repeat (0)
+ , good (0)
+ , skip (0)
+ {}
+
+ int black;
+ int repeat;
+ int good;
+ int skip;
+ } video;
+
+ struct Audio {
+ Audio ()
+ : silence (0)
+ , good (0)
+ , skip (0)
+ {}
+
+ DCPTime silence;
+ int64_t good;
+ int64_t skip;
+ } audio;
+
+ void dump (boost::shared_ptr<Log>) const;
+};
+
- /** @class PlayerImage
- * @brief A wrapper for an Image which contains some pending operations; these may
- * not be necessary if the receiver of the PlayerImage throws it away.
- */
- class PlayerImage
- {
- public:
- PlayerImage (boost::shared_ptr<const Image>, Crop, dcp::Size, dcp::Size, Scaler const *);
-
- void set_subtitle (boost::shared_ptr<const Image>, Position<int>);
-
- boost::shared_ptr<Image> image ();
-
- private:
- boost::shared_ptr<const Image> _in;
- Crop _crop;
- dcp::Size _inter_size;
- dcp::Size _out_size;
- Scaler const * _scaler;
- boost::shared_ptr<const Image> _subtitle_image;
- Position<int> _subtitle_position;
- };
-
/** @class Player
- * @brief A class which can `play' a Playlist; emitting its audio and video.
+ * @brief A class which can `play' a Playlist.
*/
class Player : public boost::enable_shared_from_this<Player>, public boost::noncopyable
{
public:
Player (boost::shared_ptr<const Film>, boost::shared_ptr<const Playlist>);
- std::list<boost::shared_ptr<DCPVideo> > get_video (DCPTime time, bool accurate);
- void disable_video ();
- void disable_audio ();
-
- bool pass ();
- void seek (Time, bool);
++ std::list<boost::shared_ptr<PlayerVideoFrame> > get_video (DCPTime time, bool accurate);
+ boost::shared_ptr<AudioBuffers> get_audio (DCPTime time, DCPTime length, bool accurate);
- Time video_position () const {
- return _video_position;
+ void set_video_container_size (dcp::Size);
+ void set_approximate_size ();
+ void set_burn_subtitles (bool burn) {
+ _burn_subtitles = burn;
}
- void set_video_container_size (libdcp::Size);
-
- bool repeat_last_video ();
-
- /** Emitted when a video frame is ready.
- * First parameter is the video image.
- * Second parameter is true if the frame is the same as the last one that was emitted.
- * Third parameter is the time.
- */
- boost::signals2::signal<void (boost::shared_ptr<PlayerVideoFrame>, bool, Time)> Video;
+ PlayerStatistics const & statistics () const;
- /** Emitted when some audio data is ready */
- boost::signals2::signal<void (boost::shared_ptr<const AudioBuffers>, Time)> Audio;
-
/** Emitted when something has changed such that if we went back and emitted
* the last frame again it would look different. This is not emitted after
* a seek.
private:
friend class PlayerWrapper;
friend class Piece;
+ friend class player_overlaps_test;
- void process_video (boost::weak_ptr<Piece>, boost::shared_ptr<const ImageProxy>, Eyes, Part, bool, VideoContent::Frame, Time);
- void process_audio (boost::weak_ptr<Piece>, boost::shared_ptr<const AudioBuffers>, AudioContent::Frame);
- void process_subtitle (boost::weak_ptr<Piece>, boost::shared_ptr<Image>, dcpomatic::Rect<double>, Time, Time);
void setup_pieces ();
void playlist_changed ();
void content_changed (boost::weak_ptr<Content>, int, bool);
- void do_seek (Time, bool);
void flush ();
- void emit_black ();
- void emit_silence (OutputAudioFrame);
- boost::shared_ptr<Resampler> resampler (boost::shared_ptr<AudioContent>, bool);
void film_changed (Film::Property);
- void update_subtitle ();
-
+ std::list<PositionImage> process_content_image_subtitles (
+ boost::shared_ptr<SubtitleContent>, std::list<boost::shared_ptr<ContentImageSubtitle> >
+ ) const;
+ std::list<PositionImage> process_content_text_subtitles (std::list<boost::shared_ptr<ContentTextSubtitle> >) const;
+ void update_subtitle_from_text ();
+ VideoFrame dcp_to_content_video (boost::shared_ptr<const Piece> piece, DCPTime t) const;
+ AudioFrame dcp_to_content_audio (boost::shared_ptr<const Piece> piece, DCPTime t) const;
+ ContentTime dcp_to_content_subtitle (boost::shared_ptr<const Piece> piece, DCPTime t) const;
- boost::shared_ptr<DCPVideo> black_dcp_video (DCPTime) const;
- boost::shared_ptr<DCPVideo> content_to_dcp (
++ boost::shared_ptr<PlayerVideoFrame> black_player_video_frame () const;
++ boost::shared_ptr<PlayerVideoFrame> content_to_player_video_frame (
+ boost::shared_ptr<VideoContent> content,
+ ContentVideo content_video,
+ std::list<boost::shared_ptr<Piece> > subs,
+ DCPTime time,
+ dcp::Size image_size
+ ) const;
+
+ /** @return Pieces of content type C that overlap a specified time range in the DCP */
+ template<class C>
+ std::list<boost::shared_ptr<Piece> >
+ overlaps (DCPTime from, DCPTime to)
+ {
+ if (!_have_valid_pieces) {
+ setup_pieces ();
+ }
+
+ std::list<boost::shared_ptr<Piece> > overlaps;
+ for (typename std::list<boost::shared_ptr<Piece> >::const_iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+ if (!boost::dynamic_pointer_cast<C> ((*i)->content)) {
+ continue;
+ }
+
+ if ((*i)->content->position() <= to && (*i)->content->end() >= from) {
+ overlaps.push_back (*i);
+ }
+ }
+
+ return overlaps;
+ }
+
boost::shared_ptr<const Film> _film;
boost::shared_ptr<const Playlist> _playlist;
-
- bool _video;
- bool _audio;
/** Our pieces are ready to go; if this is false the pieces must be (re-)created before they are used */
bool _have_valid_pieces;
std::list<boost::shared_ptr<Piece> > _pieces;
- /** The time after the last video that we emitted */
- Time _video_position;
- /** The time after the last audio that we emitted */
- Time _audio_position;
-
- AudioMerger<Time, AudioContent::Frame> _audio_merger;
-
- libdcp::Size _video_container_size;
- boost::shared_ptr<PlayerVideoFrame> _black_frame;
- std::map<boost::shared_ptr<AudioContent>, boost::shared_ptr<Resampler> > _resamplers;
-
- std::list<Subtitle> _subtitles;
-
-#ifdef DCPOMATIC_DEBUG
- boost::shared_ptr<Content> _last_video;
-#endif
+ dcp::Size _video_container_size;
+ boost::shared_ptr<Image> _black_image;
- bool _last_emit_was_black;
+ bool _approximate_size;
+ bool _burn_subtitles;
- IncomingVideo _last_incoming_video;
+ PlayerStatistics _statistics;
boost::signals2::scoped_connection _playlist_changed_connection;
boost::signals2::scoped_connection _playlist_content_changed_connection;
--- /dev/null
-#include <libdcp/raw_convert.h>
+ /*
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
-using libdcp::raw_convert;
++#include <dcp/raw_convert.h>
+ #include "player_video_frame.h"
+ #include "image.h"
+ #include "image_proxy.h"
+ #include "scaler.h"
+
+ using std::string;
+ using std::cout;
+ using boost::shared_ptr;
- libdcp::Size inter_size,
- libdcp::Size out_size,
++using dcp::raw_convert;
+
+ PlayerVideoFrame::PlayerVideoFrame (
+ shared_ptr<const ImageProxy> in,
+ Crop crop,
- _inter_size = libdcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
- _out_size = libdcp::Size (node->number_child<int> ("OutWidth"), node->number_child<int> ("OutHeight"));
++ dcp::Size inter_size,
++ dcp::Size out_size,
+ Scaler const * scaler,
+ Eyes eyes,
+ Part part,
+ ColourConversion colour_conversion
+ )
+ : _in (in)
+ , _crop (crop)
+ , _inter_size (inter_size)
+ , _out_size (out_size)
+ , _scaler (scaler)
+ , _eyes (eyes)
+ , _part (part)
+ , _colour_conversion (colour_conversion)
+ {
+
+ }
+
+ PlayerVideoFrame::PlayerVideoFrame (shared_ptr<cxml::Node> node, shared_ptr<Socket> socket)
+ {
+ _crop = Crop (node);
+
- _subtitle_position = Position<int> (node->number_child<int> ("SubtitleX"), node->number_child<int> ("SubtitleY"));
++ _inter_size = dcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
++ _out_size = dcp::Size (node->number_child<int> ("OutWidth"), node->number_child<int> ("OutHeight"));
+ _scaler = Scaler::from_id (node->string_child ("Scaler"));
+ _eyes = (Eyes) node->number_child<int> ("Eyes");
+ _part = (Part) node->number_child<int> ("Part");
+ _colour_conversion = ColourConversion (node);
+
+ _in = image_proxy_factory (node->node_child ("In"), socket);
+
+ if (node->optional_number_child<int> ("SubtitleX")) {
+
- shared_ptr<Image> image (
- new Image (PIX_FMT_RGBA, libdcp::Size (node->number_child<int> ("SubtitleWidth"), node->number_child<int> ("SubtitleHeight")), true)
++ _subtitle.position = Position<int> (node->number_child<int> ("SubtitleX"), node->number_child<int> ("SubtitleY"));
+
- image->read_from_socket (socket);
- _subtitle_image = image;
++ _subtitle.image.reset (
++ new Image (PIX_FMT_RGBA, dcp::Size (node->number_child<int> ("SubtitleWidth"), node->number_child<int> ("SubtitleHeight")), true)
+ );
+
-PlayerVideoFrame::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
++ _subtitle.image->read_from_socket (socket);
+ }
+ }
+
+ void
- _subtitle_image = image;
- _subtitle_position = pos;
++PlayerVideoFrame::set_subtitle (PositionImage image)
+ {
- if (_subtitle_image) {
- out->alpha_blend (_subtitle_image, _subtitle_position);
++ _subtitle = image;
+ }
+
+ shared_ptr<Image>
+ PlayerVideoFrame::image () const
+ {
+ shared_ptr<Image> im = _in->image ();
+
+ Crop total_crop = _crop;
+ switch (_part) {
+ case PART_LEFT_HALF:
+ total_crop.right += im->size().width / 2;
+ break;
+ case PART_RIGHT_HALF:
+ total_crop.left += im->size().width / 2;
+ break;
+ case PART_TOP_HALF:
+ total_crop.bottom += im->size().height / 2;
+ break;
+ case PART_BOTTOM_HALF:
+ total_crop.top += im->size().height / 2;
+ break;
+ default:
+ break;
+ }
+
+ shared_ptr<Image> out = im->crop_scale_window (total_crop, _inter_size, _out_size, _scaler, PIX_FMT_RGB24, false);
+
+ Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
+
- if (_subtitle_image) {
- node->add_child ("SubtitleWidth")->add_child_text (raw_convert<string> (_subtitle_image->size().width));
- node->add_child ("SubtitleHeight")->add_child_text (raw_convert<string> (_subtitle_image->size().height));
- node->add_child ("SubtitleX")->add_child_text (raw_convert<string> (_subtitle_position.x));
- node->add_child ("SubtitleY")->add_child_text (raw_convert<string> (_subtitle_position.y));
++ if (_subtitle.image) {
++ out->alpha_blend (_subtitle.image, _subtitle.position);
+ }
+
+ return out;
+ }
+
+ void
+ PlayerVideoFrame::add_metadata (xmlpp::Node* node) const
+ {
+ _crop.as_xml (node);
+ _in->add_metadata (node->add_child ("In"));
+ node->add_child("InterWidth")->add_child_text (raw_convert<string> (_inter_size.width));
+ node->add_child("InterHeight")->add_child_text (raw_convert<string> (_inter_size.height));
+ node->add_child("OutWidth")->add_child_text (raw_convert<string> (_out_size.width));
+ node->add_child("OutHeight")->add_child_text (raw_convert<string> (_out_size.height));
+ node->add_child("Scaler")->add_child_text (_scaler->id ());
+ node->add_child("Eyes")->add_child_text (raw_convert<string> (_eyes));
+ node->add_child("Part")->add_child_text (raw_convert<string> (_part));
+ _colour_conversion.as_xml (node);
- if (_subtitle_image) {
- _subtitle_image->write_to_socket (socket);
++ if (_subtitle.image) {
++ node->add_child ("SubtitleWidth")->add_child_text (raw_convert<string> (_subtitle.image->size().width));
++ node->add_child ("SubtitleHeight")->add_child_text (raw_convert<string> (_subtitle.image->size().height));
++ node->add_child ("SubtitleX")->add_child_text (raw_convert<string> (_subtitle.position.x));
++ node->add_child ("SubtitleY")->add_child_text (raw_convert<string> (_subtitle.position.y));
+ }
+ }
+
+ void
+ PlayerVideoFrame::send_binary (shared_ptr<Socket> socket) const
+ {
+ _in->send_binary (socket);
++ if (_subtitle.image) {
++ _subtitle.image->write_to_socket (socket);
+ }
+ }
--- /dev/null
- PlayerVideoFrame (boost::shared_ptr<const ImageProxy>, Crop, libdcp::Size, libdcp::Size, Scaler const *, Eyes, Part, ColourConversion);
+ /*
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+ #include <boost/shared_ptr.hpp>
+ #include "types.h"
+ #include "position.h"
+ #include "colour_conversion.h"
++#include "position_image.h"
+
+ class Image;
+ class ImageProxy;
+ class Scaler;
+ class Socket;
+
+ /** Everything needed to describe a video frame coming out of the player, but with the
+ * bits still their raw form. We may want to combine the bits on a remote machine,
+ * or maybe not even bother to combine them at all.
+ */
+ class PlayerVideoFrame
+ {
+ public:
- void set_subtitle (boost::shared_ptr<const Image>, Position<int>);
++ PlayerVideoFrame (boost::shared_ptr<const ImageProxy>, Crop, dcp::Size, dcp::Size, Scaler const *, Eyes, Part, ColourConversion);
+ PlayerVideoFrame (boost::shared_ptr<cxml::Node>, boost::shared_ptr<Socket>);
+
- libdcp::Size _inter_size;
- libdcp::Size _out_size;
++ void set_subtitle (PositionImage);
+
+ boost::shared_ptr<Image> image () const;
+
+ void add_metadata (xmlpp::Node* node) const;
+ void send_binary (boost::shared_ptr<Socket> socket) const;
+
+ Eyes eyes () const {
+ return _eyes;
+ }
+
+ ColourConversion colour_conversion () const {
+ return _colour_conversion;
+ }
+
+ private:
+ boost::shared_ptr<const ImageProxy> _in;
+ Crop _crop;
- boost::shared_ptr<const Image> _subtitle_image;
- Position<int> _subtitle_position;
++ dcp::Size _inter_size;
++ dcp::Size _out_size;
+ Scaler const * _scaler;
+ Eyes _eyes;
+ Part _part;
+ ColourConversion _colour_conversion;
++ PositionImage _subtitle;
+ };
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include <boost/algorithm/string.hpp>
#include <boost/scoped_array.hpp>
#include <libcxml/cxml.h>
-#include <libdcp/raw_convert.h>
+#include <dcp/raw_convert.h>
#include "server.h"
#include "util.h"
#include "scaler.h"
#include "dcp_video_frame.h"
#include "config.h"
#include "cross.h"
+ #include "player_video_frame.h"
#include "i18n.h"
using boost::bind;
using boost::scoped_array;
using boost::optional;
-using libdcp::Size;
-using libdcp::raw_convert;
+using dcp::Size;
+using dcp::raw_convert;
Server::Server (shared_ptr<Log> log, bool verbose)
: _log (log)
uint32_t length = socket->read_uint32 ();
scoped_array<char> buffer (new char[length]);
socket->read (reinterpret_cast<uint8_t*> (buffer.get()), length);
-
+
stringstream s (buffer.get());
shared_ptr<cxml::Document> xml (new cxml::Document ("EncodingRequest"));
xml->read_stream (s);
return -1;
}
- dcp::Size size (
- xml->number_child<int> ("Width"), xml->number_child<int> ("Height")
- );
-
- shared_ptr<Image> image (new Image (PIX_FMT_RGB24, size, true));
+ shared_ptr<PlayerVideoFrame> pvf (new PlayerVideoFrame (xml, socket));
- image->read_from_socket (socket);
- DCPVideoFrame dcp_video_frame (image, xml, _log);
+ DCPVideoFrame dcp_video_frame (pvf, xml, _log);
gettimeofday (&after_read, 0);
try {
encoded->send (socket);
} catch (std::exception& e) {
- _log->log (String::compose (
- "Send failed; frame %1, data size %2, pixel format %3, image size %4x%5, %6 components",
- dcp_video_frame.frame(), encoded->size(), image->pixel_format(), image->size().width, image->size().height, image->components()
- )
- );
+ _log->log (String::compose ("Send failed; frame %1", dcp_video_frame.index()));
throw;
}
- return dcp_video_frame.frame ();
+ return dcp_video_frame.index ();
}
void
#include "job.h"
using std::string;
+using std::cout;
+using std::list;
using boost::shared_ptr;
using boost::weak_ptr;
using boost::dynamic_pointer_cast;
-static void
-video_proxy (weak_ptr<Encoder> encoder, shared_ptr<PlayerVideoFrame> pvf, bool same)
-{
- shared_ptr<Encoder> e = encoder.lock ();
- if (e) {
- e->process_video (pvf, same);
- }
-}
-
-static void
-audio_proxy (weak_ptr<Encoder> encoder, shared_ptr<const AudioBuffers> audio)
-{
- shared_ptr<Encoder> e = encoder.lock ();
- if (e) {
- e->process_audio (audio);
- }
-}
-
/** Construct a transcoder using a Decoder that we create and a supplied Encoder.
* @param f Film that we are transcoding.
* @param e Encoder to use.
*/
Transcoder::Transcoder (shared_ptr<const Film> f, shared_ptr<Job> j)
- : _player (f->make_player ())
+ : _film (f)
+ , _player (f->make_player ())
, _encoder (new Encoder (f, j))
, _finishing (false)
{
- _player->Video.connect (bind (video_proxy, _encoder, _1, _2));
- _player->Audio.connect (bind (audio_proxy, _encoder, _1));
+
}
void
Transcoder::go ()
{
_encoder->process_begin ();
- while (!_player->pass ()) {}
+
+ DCPTime const frame = DCPTime::from_frames (1, _film->video_frame_rate ());
+ for (DCPTime t; t < _film->length(); t += frame) {
- list<shared_ptr<DCPVideo> > v = _player->get_video (t, true);
- for (list<shared_ptr<DCPVideo> >::const_iterator i = v.begin(); i != v.end(); ++i) {
++ list<shared_ptr<PlayerVideoFrame> > v = _player->get_video (t, true);
++ for (list<shared_ptr<PlayerVideoFrame> >::const_iterator i = v.begin(); i != v.end(); ++i) {
+ _encoder->process_video (*i);
+ }
+ _encoder->process_audio (_player->get_audio (t, frame, true));
+ }
_finishing = true;
_encoder->process_end ();
+
+ _player->statistics().dump (_film->log ());
}
float
#include <vector>
#include <stdint.h>
#include <boost/shared_ptr.hpp>
-#include <libdcp/util.h>
+#include <dcp/util.h>
+#include "dcpomatic_time.h"
+#include "position.h"
class Content;
class VideoContent;
class FFmpegContent;
class AudioBuffers;
+ namespace cxml {
+ class Node;
+ }
+
+ namespace xmlpp {
+ class Node;
+ }
+
/** The version number of the protocol used to communicate
* with servers. Intended to be bumped when incompatibilities
* are introduced.
*/
- #define SERVER_LINK_VERSION 1
+ #define SERVER_LINK_VERSION 2
-typedef int64_t Time;
-#define TIME_MAX INT64_MAX
-#define TIME_HZ ((Time) 96000)
-typedef int64_t OutputAudioFrame;
-typedef int OutputVideoFrame;
typedef std::vector<boost::shared_ptr<Content> > ContentList;
typedef std::vector<boost::shared_ptr<VideoContent> > VideoContentList;
typedef std::vector<boost::shared_ptr<AudioContent> > AudioContentList;
typedef std::vector<boost::shared_ptr<SubtitleContent> > SubtitleContentList;
typedef std::vector<boost::shared_ptr<FFmpegContent> > FFmpegContentList;
-template<class T>
+typedef int64_t VideoFrame;
+typedef int64_t AudioFrame;
+
+/* XXX -> DCPAudio */
struct TimedAudioBuffers
{
TimedAudioBuffers ()
: time (0)
{}
- TimedAudioBuffers (boost::shared_ptr<AudioBuffers> a, T t)
+ TimedAudioBuffers (boost::shared_ptr<AudioBuffers> a, DCPTime t)
: audio (a)
, time (t)
{}
boost::shared_ptr<AudioBuffers> audio;
- T time;
+ DCPTime time;
};
enum VideoFrameType
EYES_COUNT
};
+ enum Part
+ {
+ PART_LEFT_HALF,
+ PART_RIGHT_HALF,
+ PART_TOP_HALF,
+ PART_BOTTOM_HALF,
+ PART_WHOLE
+ };
+
/** @struct Crop
* @brief A description of the crop of an image or video.
*/
{
Crop () : left (0), right (0), top (0), bottom (0) {}
Crop (int l, int r, int t, int b) : left (l), right (r), top (t), bottom (b) {}
+ Crop (boost::shared_ptr<cxml::Node>);
/** Number of pixels to remove from the left-hand side */
int left;
/** Number of pixels to remove from the bottom */
int bottom;
- libdcp::Size apply (libdcp::Size s, int minimum = 4) const {
+ dcp::Size apply (dcp::Size s, int minimum = 4) const {
s.width -= left + right;
s.height -= top + bottom;
return s;
}
+
+ void as_xml (xmlpp::Node *) const;
};
extern bool operator== (Crop const & a, Crop const & b);
#include <iomanip>
#include <libcxml/cxml.h>
-#include <libdcp/colour_matrix.h>
-#include <libdcp/raw_convert.h>
+#include <dcp/colour_matrix.h>
+#include <dcp/raw_convert.h>
#include "video_content.h"
#include "video_examiner.h"
#include "compose.hpp"
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
-using libdcp::raw_convert;
+using dcp::raw_convert;
vector<VideoContentScale> VideoContentScale::_scales;
setup_default_colour_conversion ();
}
-VideoContent::VideoContent (shared_ptr<const Film> f, Time s, VideoContent::Frame len)
+VideoContent::VideoContent (shared_ptr<const Film> f, DCPTime s, ContentTime len)
: Content (f, s)
, _video_length (len)
, _video_frame_rate (0)
setup_default_colour_conversion ();
}
-VideoContent::VideoContent (shared_ptr<const Film> f, shared_ptr<const cxml::Node> node, int version)
+VideoContent::VideoContent (shared_ptr<const Film> f, cxml::ConstNodePtr node, int version)
: Content (f, node)
{
- _video_length = node->number_child<VideoContent::Frame> ("VideoLength");
+ _video_length = ContentTime (node->number_child<int64_t> ("VideoLength"));
_video_size.width = node->number_child<int> ("VideoWidth");
_video_size.height = node->number_child<int> ("VideoHeight");
_video_frame_rate = node->number_child<float> ("VideoFrameRate");
VideoContent::as_xml (xmlpp::Node* node) const
{
boost::mutex::scoped_lock lm (_mutex);
- node->add_child("VideoLength")->add_child_text (raw_convert<string> (_video_length));
+ node->add_child("VideoLength")->add_child_text (raw_convert<string> (_video_length.get ()));
node->add_child("VideoWidth")->add_child_text (raw_convert<string> (_video_size.width));
node->add_child("VideoHeight")->add_child_text (raw_convert<string> (_video_size.height));
node->add_child("VideoFrameRate")->add_child_text (raw_convert<string> (_video_frame_rate));
node->add_child("VideoFrameType")->add_child_text (raw_convert<string> (static_cast<int> (_video_frame_type)));
- node->add_child("LeftCrop")->add_child_text (raw_convert<string> (_crop.left));
- node->add_child("RightCrop")->add_child_text (raw_convert<string> (_crop.right));
- node->add_child("TopCrop")->add_child_text (raw_convert<string> (_crop.top));
- node->add_child("BottomCrop")->add_child_text (raw_convert<string> (_crop.bottom));
+ _crop.as_xml (node);
_scale.as_xml (node->add_child("Scale"));
_colour_conversion.as_xml (node->add_child("ColourConversion"));
}
void
VideoContent::setup_default_colour_conversion ()
{
- _colour_conversion = PresetColourConversion (_("sRGB"), 2.4, true, libdcp::colour_matrix::srgb_to_xyz, 2.6).conversion;
+ _colour_conversion = PresetColourConversion (_("sRGB"), 2.4, true, dcp::colour_matrix::srgb_to_xyz, 2.6).conversion;
}
void
VideoContent::take_from_video_examiner (shared_ptr<VideoExaminer> d)
{
/* These examiner calls could call other content methods which take a lock on the mutex */
- libdcp::Size const vs = d->video_size ();
+ dcp::Size const vs = d->video_size ();
float const vfr = d->video_frame_rate ();
{
{
return String::compose (
"video: length %1, size %2x%3, rate %4",
- video_length_after_3d_combine(), video_size().width, video_size().height, video_frame_rate()
+ video_length_after_3d_combine().seconds(),
+ video_size().width,
+ video_size().height,
+ video_frame_rate()
);
}
-libdcp::Size
+dcp::Size
VideoContent::video_size_after_3d_split () const
{
- libdcp::Size const s = video_size ();
+ dcp::Size const s = video_size ();
switch (video_frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
case VIDEO_FRAME_TYPE_3D_RIGHT:
return s;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- return libdcp::Size (s.width / 2, s.height);
+ return dcp::Size (s.width / 2, s.height);
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- return libdcp::Size (s.width, s.height / 2);
+ return dcp::Size (s.width, s.height / 2);
}
assert (false);
}
/** @return Video size after 3D split and crop */
-libdcp::Size
+dcp::Size
VideoContent::video_size_after_crop () const
{
return crop().apply (video_size_after_3d_split ());
}
/** @param t A time offset from the start of this piece of content.
- * @return Corresponding frame index.
+ * @return Corresponding time with respect to the content.
*/
-VideoContent::Frame
-VideoContent::time_to_content_video_frames (Time t) const
+ContentTime
+VideoContent::dcp_time_to_content_time (DCPTime t) const
{
shared_ptr<const Film> film = _film.lock ();
assert (film);
-
- FrameRateConversion frc (video_frame_rate(), film->video_frame_rate());
-
- /* Here we are converting from time (in the DCP) to a frame number in the content.
- Hence we need to use the DCP's frame rate and the double/skip correction, not
- the source's rate.
- */
- return t * film->video_frame_rate() / (frc.factor() * TIME_HZ);
+ return ContentTime (t, FrameRateChange (video_frame_rate(), film->video_frame_rate()));
}
VideoContentScale::VideoContentScale (Ratio const * r)
}
-VideoContentScale::VideoContentScale (shared_ptr<cxml::Node> node)
+VideoContentScale::VideoContentScale (cxml::NodePtr node)
: _ratio (0)
, _scale (true)
{
/** @param display_container Size of the container that we are displaying this content in.
* @param film_container The size of the film's image.
*/
-libdcp::Size
-VideoContentScale::size (shared_ptr<const VideoContent> c, libdcp::Size display_container, libdcp::Size film_container) const
+dcp::Size
+VideoContentScale::size (shared_ptr<const VideoContent> c, dcp::Size display_container, dcp::Size film_container) const
{
if (_ratio) {
return fit_ratio_within (_ratio->ratio (), display_container);
}
- libdcp::Size const ac = c->video_size_after_crop ();
+ dcp::Size const ac = c->video_size_after_crop ();
/* Force scale if the film_container is smaller than the content's image */
if (_scale || film_container.width < ac.width || film_container.height < ac.height) {
/* Scale the image so that it will be in the right place in film_container, even if display_container is a
different size.
*/
- return libdcp::Size (
+ return dcp::Size (
c->video_size().width * float(display_container.width) / film_container.width,
c->video_size().height * float(display_container.height) / film_container.height
);
#include "video_decoder.h"
#include "image.h"
+#include "content_video.h"
#include "i18n.h"
using std::cout;
+using std::list;
using boost::shared_ptr;
+using boost::optional;
-VideoDecoder::VideoDecoder (shared_ptr<const Film> f, shared_ptr<const VideoContent> c)
- : Decoder (f)
+VideoDecoder::VideoDecoder (shared_ptr<const VideoContent> c)
+#ifdef DCPOMATIC_DEBUG
+ : test_gaps (0)
, _video_content (c)
- , _video_position (0)
+#else
+ : _video_content (c)
+#endif
{
}
+list<ContentVideo>
+VideoDecoder::decoded_video (VideoFrame frame)
+{
+ list<ContentVideo> output;
+
+ for (list<ContentVideo>::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) {
+ if (i->frame == frame) {
+ output.push_back (*i);
+ }
+ }
+
+ return output;
+}
+
+/** Get all frames which exist in the content at a given frame index.
+ * @param frame Frame index.
+ * @param accurate true to try hard to return frames at the precise time that was requested, otherwise frames nearby may be returned.
+ * @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D.
+ */
+list<ContentVideo>
+VideoDecoder::get_video (VideoFrame frame, bool accurate)
+{
+ if (_decoded_video.empty() || (frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1))) {
+ /* Either we have no decoded data, or what we do have is a long way from what we want: seek */
+ seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate);
+ }
+
+ list<ContentVideo> dec;
+
+ /* Now enough pass() calls should either:
+ * (a) give us what we want, or
+ * (b) hit the end of the decoder.
+ */
+ if (accurate) {
+ /* We are being accurate, so we want the right frame.
+ * This could all be one statement but it's split up for clarity.
+ */
+ while (true) {
+ if (!decoded_video(frame).empty ()) {
+ /* We got what we want */
+ break;
+ }
+
+ if (pass ()) {
+ /* The decoder has nothing more for us */
+ break;
+ }
+
+ if (!_decoded_video.empty() && _decoded_video.front().frame > frame) {
+ /* We're never going to get the frame we want. Perhaps the caller is asking
+ * for a video frame before the content's video starts (if its audio
+ * begins before its video, for example).
+ */
+ break;
+ }
+ }
+
+ dec = decoded_video (frame);
+ } else {
+ /* Any frame will do: use the first one that comes out of pass() */
+ while (_decoded_video.empty() && !pass ()) {}
+ if (!_decoded_video.empty ()) {
+ dec.push_back (_decoded_video.front ());
+ }
+ }
+
+ /* Clean up decoded_video */
+ while (!_decoded_video.empty() && _decoded_video.front().frame < (frame - 1)) {
+ _decoded_video.pop_front ();
+ }
+
+ return dec;
+}
+
+
+/** Called by subclasses when they have a video frame ready */
void
- VideoDecoder::video (shared_ptr<const Image> image, VideoFrame frame)
-VideoDecoder::video (shared_ptr<const ImageProxy> image, bool same, VideoContent::Frame frame)
++VideoDecoder::video (shared_ptr<const ImageProxy> image, VideoFrame frame)
{
+ /* We should not receive the same thing twice */
+ assert (_decoded_video.empty() || frame != _decoded_video.back().frame);
+
+ /* Fill in gaps */
+ /* XXX: 3D */
+
+ while (!_decoded_video.empty () && (_decoded_video.back().frame + 1) < frame) {
+#ifdef DCPOMATIC_DEBUG
+ test_gaps++;
+#endif
+ _decoded_video.push_back (
+ ContentVideo (
+ _decoded_video.back().image,
+ _decoded_video.back().eyes,
++ _decoded_video.back().part,
+ _decoded_video.back().frame + 1
+ )
+ );
+ }
+
switch (_video_content->video_frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
- _decoded_video.push_back (ContentVideo (image, EYES_BOTH, frame));
- Video (image, EYES_BOTH, PART_WHOLE, same, frame);
++ _decoded_video.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- _decoded_video.push_back (ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, frame));
- Video (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, same, frame / 2);
++ _decoded_video.push_back (ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- {
- int const half = image->size().width / 2;
- _decoded_video.push_back (ContentVideo (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, frame));
- _decoded_video.push_back (ContentVideo (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, frame));
- Video (image, EYES_LEFT, PART_LEFT_HALF, same, frame);
- Video (image, EYES_RIGHT, PART_RIGHT_HALF, same, frame);
++ _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
++ _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame));
break;
- }
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- {
- int const half = image->size().height / 2;
- _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, frame));
- _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, frame));
- Video (image, EYES_LEFT, PART_TOP_HALF, same, frame);
- Video (image, EYES_RIGHT, PART_BOTTOM_HALF, same, frame);
++ _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame));
++ _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame));
break;
- }
case VIDEO_FRAME_TYPE_3D_LEFT:
- _decoded_video.push_back (ContentVideo (image, EYES_LEFT, frame));
- Video (image, EYES_LEFT, PART_WHOLE, same, frame);
++ _decoded_video.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_RIGHT:
- _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, frame));
- Video (image, EYES_RIGHT, PART_WHOLE, same, frame);
++ _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
break;
+ default:
+ assert (false);
}
-
- _video_position = frame + 1;
+}
+
+void
+VideoDecoder::seek (ContentTime, bool)
+{
+ _decoded_video.clear ();
}
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
*/
+/** @file src/lib/video_decoder.h
+ * @brief VideoDecoder class.
+ */
+
#ifndef DCPOMATIC_VIDEO_DECODER_H
#define DCPOMATIC_VIDEO_DECODER_H
#include "decoder.h"
#include "video_content.h"
#include "util.h"
+#include "content_video.h"
class VideoContent;
- class Image;
+ class ImageProxy;
+/** @class VideoDecoder
+ * @brief Parent for classes which decode video.
+ */
class VideoDecoder : public virtual Decoder
{
public:
- VideoDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const VideoContent>);
-
- /** Seek so that the next pass() will yield (approximately) the requested frame.
- * Pass accurate = true to try harder to get close to the request.
- */
- virtual void seek (VideoContent::Frame frame, bool accurate) = 0;
-
- /** Emitted when a video frame is ready.
- * First parameter is the video image.
- * Second parameter is the eye(s) which should see this image.
- * Third parameter is the part of this image that should be used.
- * Fourth parameter is true if the image is the same as the last one that was emitted for this Eyes value.
- * Fourth parameter is the frame within our source.
- */
- boost::signals2::signal<void (boost::shared_ptr<const ImageProxy>, Eyes, Part, bool, VideoContent::Frame)> Video;
-
+ VideoDecoder (boost::shared_ptr<const VideoContent> c);
+
+ std::list<ContentVideo> get_video (VideoFrame frame, bool accurate);
+
+ boost::shared_ptr<const VideoContent> video_content () const {
+ return _video_content;
+ }
+
+#ifdef DCPOMATIC_DEBUG
+ int test_gaps;
+#endif
+
protected:
- void video (boost::shared_ptr<const ImageProxy>, bool, VideoContent::Frame);
+ void seek (ContentTime time, bool accurate);
- void video (boost::shared_ptr<const Image>, VideoFrame frame);
++ void video (boost::shared_ptr<const ImageProxy>, VideoFrame frame);
+ std::list<ContentVideo> decoded_video (VideoFrame frame);
+
boost::shared_ptr<const VideoContent> _video_content;
- /** This is in frames without taking 3D into account (e.g. if we are doing 3D alternate,
- * this would equal 2 on the left-eye second frame (not 1)).
- */
- VideoContent::Frame _video_position;
+ std::list<ContentVideo> _decoded_video;
};
#endif
config.cc
content.cc
content_factory.cc
+ content_subtitle.cc
cross.cc
dci_metadata.cc
dcp_content_type.cc
- dcp_video.cc
dcp_video_frame.cc
- decoder.cc
+ dcpomatic_time.cc
dolby_cp750.cc
encoder.cc
examine_content_job.cc
ffmpeg_examiner.cc
film.cc
filter.cc
+ frame_rate_change.cc
internet.cc
image.cc
image_content.cc
image_decoder.cc
image_examiner.cc
+ image_proxy.cc
job.cc
job_manager.cc
kdm.cc
json_server.cc
log.cc
- piece.cc
player.cc
+ player_video_frame.cc
playlist.cc
ratio.cc
+ render_subtitles.cc
resampler.cc
scp_dcp_job.cc
scaler.cc
sndfile_content.cc
sndfile_decoder.cc
sound_processor.cc
- subtitle.cc
+ subrip.cc
+ subrip_content.cc
+ subrip_decoder.cc
subtitle_content.cc
subtitle_decoder.cc
timer.cc
AVCODEC AVUTIL AVFORMAT AVFILTER SWSCALE SWRESAMPLE
BOOST_FILESYSTEM BOOST_THREAD BOOST_DATETIME BOOST_SIGNALS2
SNDFILE OPENJPEG POSTPROC TIFF MAGICK SSH DCP CXML GLIB LZMA XML++
- CURL ZIP QUICKMAIL
+ CURL ZIP QUICKMAIL PANGOMM CAIROMM
"""
if bld.env.TARGET_OSX:
#include "lib/log.h"
#include "lib/video_decoder.h"
#include "lib/player.h"
- #include "lib/dcp_video.h"
+ #include "lib/player_video_frame.h"
using std::cout;
using std::cerr;
static shared_ptr<Film> film;
static ServerDescription* server;
static shared_ptr<FileLog> log_ (new FileLog ("servomatictest.log"));
-static int frame = 0;
+static int frame_count = 0;
void
- process_video (shared_ptr<DCPVideo> frame)
+ process_video (shared_ptr<PlayerVideoFrame> pvf)
{
- shared_ptr<DCPVideoFrame> local (
- new DCPVideoFrame (
- frame->image (PIX_FMT_RGB24, false), frame_count, frame->eyes(), frame->conversion(), film->video_frame_rate(), 250000000, RESOLUTION_2K, log_
- )
- );
-
- shared_ptr<DCPVideoFrame> remote (
- new DCPVideoFrame (
- frame->image (PIX_FMT_RGB24, false), frame_count, frame->eyes(), frame->conversion(), film->video_frame_rate(), 250000000, RESOLUTION_2K, log_
- )
- );
- shared_ptr<DCPVideoFrame> local (new DCPVideoFrame (pvf, frame, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
- shared_ptr<DCPVideoFrame> remote (new DCPVideoFrame (pvf, frame, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
++ shared_ptr<DCPVideoFrame> local (new DCPVideoFrame (pvf, frame_count, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
++ shared_ptr<DCPVideoFrame> remote (new DCPVideoFrame (pvf, frame_count, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_));
- cout << "Frame " << frame << ": ";
+ cout << "Frame " << frame_count << ": ";
cout.flush ();
- ++frame;
+ ++frame_count;
shared_ptr<EncodedData> local_encoded = local->encode_locally ();
shared_ptr<EncodedData> remote_encoded;
film->read_metadata ();
shared_ptr<Player> player = film->make_player ();
- player->disable_audio ();
- player->Video.connect (boost::bind (process_video, _1));
- bool done = false;
- while (!done) {
- done = player->pass ();
+ DCPTime const frame = DCPTime::from_frames (1, film->video_frame_rate ());
+ for (DCPTime t; t < film->length(); t += frame) {
+ process_video (player->get_video(t, true).front ());
}
} catch (std::exception& e) {
cerr << "Error: " << e.what() << "\n";
#include "lib/examine_content_job.h"
#include "lib/filter.h"
#include "lib/player.h"
+ #include "lib/player_video_frame.h"
#include "lib/video_content.h"
#include "lib/video_decoder.h"
- #include "lib/dcp_video.h"
+#include "lib/timer.h"
#include "film_viewer.h"
#include "wx_util.h"
using boost::shared_ptr;
using boost::dynamic_pointer_cast;
using boost::weak_ptr;
-using libdcp::Size;
+using dcp::Size;
FilmViewer::FilmViewer (shared_ptr<Film> f, wxWindow* p)
: wxPanel (p)
, _frame_number (new wxStaticText (this, wxID_ANY, wxT("")))
, _timecode (new wxStaticText (this, wxID_ANY, wxT("")))
, _play_button (new wxToggleButton (this, wxID_ANY, _("Play")))
- , _got_frame (false)
{
#ifndef __WXOSX__
_panel->SetDoubleBuffered (true);
_frame.reset ();
_slider->SetValue (0);
- set_position_text (0);
+ set_position_text ();
if (!_film) {
return;
return;
}
- _player->disable_audio ();
- _player->Video.connect (boost::bind (&FilmViewer::process_video, this, _1, _3));
+ _player->set_approximate_size ();
_player->Changed.connect (boost::bind (&FilmViewer::player_changed, this, _1));
calculate_sizes ();
- fetch_next_frame ();
+ get (_position, true);
}
void
-FilmViewer::fetch_current_frame_again ()
+FilmViewer::get (DCPTime p, bool accurate)
{
if (!_player) {
return;
}
- list<shared_ptr<DCPVideo> > dcp_video = _player->get_video (p, accurate);
- if (!dcp_video.empty ()) {
- _frame = dcp_video.front()->image (PIX_FMT_BGRA, true);
- /* We could do this with a seek and a fetch_next_frame, but this is
- a shortcut to make it quicker.
- */
-
- _got_frame = false;
- if (!_player->repeat_last_video ()) {
- fetch_next_frame ();
++ list<shared_ptr<PlayerVideoFrame> > pvf = _player->get_video (p, accurate);
++ if (!pvf.empty ()) {
++ _frame = pvf.front()->image ();
+ _frame = _frame->scale (_frame->size(), Scaler::from_id ("fastbilinear"), PIX_FMT_RGB24, false);
+ } else {
+ _frame.reset ();
}
+
+ _position = p;
+ set_position_text ();
_panel->Refresh ();
_panel->Update ();
}
void
FilmViewer::timer ()
{
- if (!_player) {
- return;
- }
-
- fetch_next_frame ();
+ get (_position + DCPTime::from_frames (1, _film->video_frame_rate ()), true);
- Time const len = _film->length ();
+ DCPTime const len = _film->length ();
- if (len) {
- int const new_slider_position = 4096 * _player->video_position() / len;
+ if (len.get ()) {
+ int const new_slider_position = 4096 * _position.get() / len.get();
if (new_slider_position != _slider->GetValue()) {
_slider->SetValue (new_slider_position);
}
void
FilmViewer::slider_moved ()
{
- if (_film && _player) {
- Time t = _slider->GetValue() * _film->length() / 4096;
- /* Ensure that we hit the end of the film at the end of the slider */
- if (t >= _film->length ()) {
- t = _film->length() - _film->video_frames_to_time (1);
- }
- _player->seek (t, false);
- fetch_next_frame ();
+ if (!_film) {
+ return;
+ }
+
+ DCPTime t (_slider->GetValue() * _film->length().get() / 4096);
+ /* Ensure that we hit the end of the film at the end of the slider */
+ if (t >= _film->length ()) {
+ t = _film->length() - DCPTime::from_frames (1, _film->video_frame_rate ());
}
+ get (t, false);
}
void
_panel_size.width = ev.GetSize().GetWidth();
_panel_size.height = ev.GetSize().GetHeight();
calculate_sizes ();
- fetch_current_frame_again ();
+ get (_position, true);
}
void
_out_size.width = max (64, _out_size.width);
_out_size.height = max (64, _out_size.height);
+ /* The player will round its image down to the nearest 4 pixels
+ to speed up its scale, so do similar here to avoid black borders
+ around things. This is a bit of a hack.
+ */
+ _out_size.width &= ~3;
+ _out_size.height &= ~3;
+
_player->set_video_container_size (_out_size);
}
}
void
-FilmViewer::process_video (shared_ptr<PlayerVideoFrame> pvf, Time t)
-{
- if (pvf->eyes() == EYES_RIGHT) {
- return;
- }
-
- _frame = pvf->image ();
- _got_frame = true;
-
- set_position_text (t);
-}
-
-void
-FilmViewer::set_position_text (Time t)
+FilmViewer::set_position_text ()
{
if (!_film) {
_frame_number->SetLabel ("0");
double const fps = _film->video_frame_rate ();
/* Count frame number from 1 ... not sure if this is the best idea */
- _frame_number->SetLabel (wxString::Format (wxT("%d"), int (rint (t * fps / TIME_HZ)) + 1));
+ _frame_number->SetLabel (wxString::Format (wxT("%d"), int (rint (_position.seconds() * fps)) + 1));
- double w = static_cast<double>(t) / TIME_HZ;
+ double w = _position.seconds ();
int const h = (w / 3600);
w -= h * 3600;
int const m = (w / 60);
_timecode->SetLabel (wxString::Format (wxT("%02d:%02d:%02d.%02d"), h, m, s, f));
}
-/** Ask the player to emit its next frame, then update our display */
-void
-FilmViewer::fetch_next_frame ()
-{
- /* Clear our frame in case we don't get a new one */
- _frame.reset ();
-
- if (!_player) {
- return;
- }
-
- _got_frame = false;
-
- try {
- while (!_got_frame && !_player->pass ()) {}
- } catch (DecodeError& e) {
- _play_button->SetValue (false);
- check_play_state ();
- error_dialog (this, wxString::Format (_("Could not decode video for view (%s)"), std_to_wx(e.what()).data()));
- } catch (OpenFileError& e) {
- /* There was a problem opening a content file; we'll let this slide as it
- probably means a missing content file, which we're already taking care of.
- */
- }
-
- _panel->Refresh ();
- _panel->Update ();
-}
-
void
FilmViewer::active_jobs_changed (bool a)
{
void
FilmViewer::back_clicked ()
{
- if (!_player) {
- return;
+ DCPTime p = _position - DCPTime::from_frames (1, _film->video_frame_rate ());
+ if (p < DCPTime ()) {
+ p = DCPTime ();
}
- /* Player::video_position is the time after the last frame that we received.
- We want to see the one before it, so we need to go back 2.
- */
-
- Time p = _player->video_position() - _film->video_frames_to_time (2);
- if (p < 0) {
- p = 0;
- }
-
- _player->seek (p, true);
- fetch_next_frame ();
+ get (p, true);
}
void
FilmViewer::forward_clicked ()
{
- if (!_player) {
- return;
- }
-
- fetch_next_frame ();
+ get (_position + DCPTime::from_frames (1, _film->video_frame_rate ()), true);
}
void
}
calculate_sizes ();
- fetch_current_frame_again ();
+ get (_position, true);
}
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
class FFmpegPlayer;
class Image;
class RGBPlusAlphaImage;
- class PlayerImage;
+ class PlayerVideoFrame;
/** @class FilmViewer
* @brief A wx widget to view a preview of a Film.
- *
- * The film takes the following path through the viewer:
- *
- * 1. fetch_next_frame() asks our _player to decode some data. If it does, process_video()
- * will be called.
- *
- * 2. process_video() takes the image from the player (_frame).
- *
- * 3. fetch_next_frame() calls _panel->Refresh() and _panel->Update() which results in
- * paint_panel() being called; this creates frame_bitmap from _frame and blits it to the display.
- *
- * fetch_current_frame_again() asks the player to re-emit its current frame on the next pass(), and then
- * starts from step #1.
*/
class FilmViewer : public wxPanel
{
void slider_moved ();
void play_clicked ();
void timer ();
- void process_video (boost::shared_ptr<PlayerVideoFrame>, Time);
void calculate_sizes ();
void check_play_state ();
- void fetch_current_frame_again ();
- void fetch_next_frame ();
void active_jobs_changed (bool);
void back_clicked ();
void forward_clicked ();
void player_changed (bool);
- void set_position_text (Time);
+ void set_position_text ();
+ void get (DCPTime, bool);
boost::shared_ptr<Film> _film;
boost::shared_ptr<Player> _player;
wxTimer _timer;
boost::shared_ptr<const Image> _frame;
- bool _got_frame;
+ DCPTime _position;
/** Size of our output (including padding if we have any) */
- libdcp::Size _out_size;
+ dcp::Size _out_size;
/** Size of the panel that we have available */
- libdcp::Size _panel_size;
+ dcp::Size _panel_size;
};
*/
+/** @file test/client_server_test.cc
+ * @brief Test the server class.
+ *
+ * Create a test image and then encode it using the standard mechanism
+ * and also using a Server object running on localhost. Compare the resulting
+ * encoded data to check that they are the same.
+ */
+
#include <boost/test/unit_test.hpp>
#include <boost/thread.hpp>
#include "lib/server.h"
#include "lib/image.h"
#include "lib/cross.h"
#include "lib/dcp_video_frame.h"
+ #include "lib/scaler.h"
+ #include "lib/player_video_frame.h"
+ #include "lib/image_proxy.h"
using std::list;
using boost::shared_ptr;
BOOST_CHECK (remotely_encoded);
BOOST_CHECK_EQUAL (locally_encoded->size(), remotely_encoded->size());
- BOOST_CHECK (memcmp (locally_encoded->data(), remotely_encoded->data(), locally_encoded->size()) == 0);
+ BOOST_CHECK_EQUAL (memcmp (locally_encoded->data(), remotely_encoded->data(), locally_encoded->size()), 0);
}
BOOST_AUTO_TEST_CASE (client_server_test)
{
- shared_ptr<Image> image (new Image (PIX_FMT_RGB24, libdcp::Size (1998, 1080), true));
+ shared_ptr<Image> image (new Image (PIX_FMT_RGB24, dcp::Size (1998, 1080), true));
uint8_t* p = image->data()[0];
for (int y = 0; y < 1080; ++y) {
p += image->stride()[0];
}
- shared_ptr<Image> sub_image (new Image (PIX_FMT_RGBA, libdcp::Size (100, 200), true));
+ shared_ptr<Image> sub_image (new Image (PIX_FMT_RGBA, dcp::Size (100, 200), true));
p = sub_image->data()[0];
for (int y = 0; y < 200; ++y) {
uint8_t* q = p;
p += sub_image->stride()[0];
}
- /* XXX */
- // shared_ptr<Subtitle> subtitle (new Subtitle (Position<int> (50, 60), sub_image));
+ shared_ptr<PlayerVideoFrame> pvf (
+ new PlayerVideoFrame (
+ shared_ptr<ImageProxy> (new RawImageProxy (image)),
+ Crop (),
- libdcp::Size (1998, 1080),
- libdcp::Size (1998, 1080),
++ dcp::Size (1998, 1080),
++ dcp::Size (1998, 1080),
+ Scaler::from_id ("bicubic"),
+ EYES_BOTH,
+ PART_WHOLE,
+ ColourConversion ()
+ )
+ );
+
- pvf->set_subtitle (sub_image, Position<int> (50, 60));
++ pvf->set_subtitle (PositionImage (sub_image, Position<int> (50, 60)));
shared_ptr<FileLog> log (new FileLog ("build/test/client_server_test.log"));
shared_ptr<DCPVideoFrame> frame (
new DCPVideoFrame (
- image,
+ pvf,
0,
- EYES_BOTH,
- ColourConversion (),
24,
200000000,
RESOLUTION_2K,